xref: /linux/drivers/gpu/drm/i915/gt/intel_gt_irq.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/sched/clock.h>
7 
8 #include "i915_drv.h"
9 #include "i915_irq.h"
10 #include "intel_breadcrumbs.h"
11 #include "intel_gt.h"
12 #include "intel_gt_irq.h"
13 #include "intel_gt_regs.h"
14 #include "intel_uncore.h"
15 #include "intel_rps.h"
16 #include "pxp/intel_pxp_irq.h"
17 
18 static void guc_irq_handler(struct intel_guc *guc, u16 iir)
19 {
20 	if (iir & GUC_INTR_GUC2HOST)
21 		intel_guc_to_host_event_handler(guc);
22 }
23 
24 static u32
25 gen11_gt_engine_identity(struct intel_gt *gt,
26 			 const unsigned int bank, const unsigned int bit)
27 {
28 	void __iomem * const regs = gt->uncore->regs;
29 	u32 timeout_ts;
30 	u32 ident;
31 
32 	lockdep_assert_held(&gt->irq_lock);
33 
34 	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
35 
36 	/*
37 	 * NB: Specs do not specify how long to spin wait,
38 	 * so we do ~100us as an educated guess.
39 	 */
40 	timeout_ts = (local_clock() >> 10) + 100;
41 	do {
42 		ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
43 	} while (!(ident & GEN11_INTR_DATA_VALID) &&
44 		 !time_after32(local_clock() >> 10, timeout_ts));
45 
46 	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
47 		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
48 			  bank, bit, ident);
49 		return 0;
50 	}
51 
52 	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
53 		      GEN11_INTR_DATA_VALID);
54 
55 	return ident;
56 }
57 
58 static void
59 gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
60 			const u16 iir)
61 {
62 	if (instance == OTHER_GUC_INSTANCE)
63 		return guc_irq_handler(&gt->uc.guc, iir);
64 
65 	if (instance == OTHER_GTPM_INSTANCE)
66 		return gen11_rps_irq_handler(&gt->rps, iir);
67 
68 	if (instance == OTHER_KCR_INSTANCE)
69 		return intel_pxp_irq_handler(&gt->pxp, iir);
70 
71 	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
72 		  instance, iir);
73 }
74 
75 static void
76 gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
77 			 const u8 instance, const u16 iir)
78 {
79 	struct intel_engine_cs *engine;
80 
81 	if (instance <= MAX_ENGINE_INSTANCE)
82 		engine = gt->engine_class[class][instance];
83 	else
84 		engine = NULL;
85 
86 	if (likely(engine))
87 		return intel_engine_cs_irq(engine, iir);
88 
89 	WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
90 		  class, instance);
91 }
92 
93 static void
94 gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
95 {
96 	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
97 	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
98 	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
99 
100 	if (unlikely(!intr))
101 		return;
102 
103 	if (class <= COPY_ENGINE_CLASS || class == COMPUTE_CLASS)
104 		return gen11_engine_irq_handler(gt, class, instance, intr);
105 
106 	if (class == OTHER_CLASS)
107 		return gen11_other_irq_handler(gt, instance, intr);
108 
109 	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
110 		  class, instance, intr);
111 }
112 
113 static void
114 gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
115 {
116 	void __iomem * const regs = gt->uncore->regs;
117 	unsigned long intr_dw;
118 	unsigned int bit;
119 
120 	lockdep_assert_held(&gt->irq_lock);
121 
122 	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
123 
124 	for_each_set_bit(bit, &intr_dw, 32) {
125 		const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
126 
127 		gen11_gt_identity_handler(gt, ident);
128 	}
129 
130 	/* Clear must be after shared has been served for engine */
131 	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
132 }
133 
134 void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
135 {
136 	unsigned int bank;
137 
138 	spin_lock(&gt->irq_lock);
139 
140 	for (bank = 0; bank < 2; bank++) {
141 		if (master_ctl & GEN11_GT_DW_IRQ(bank))
142 			gen11_gt_bank_handler(gt, bank);
143 	}
144 
145 	spin_unlock(&gt->irq_lock);
146 }
147 
148 bool gen11_gt_reset_one_iir(struct intel_gt *gt,
149 			    const unsigned int bank, const unsigned int bit)
150 {
151 	void __iomem * const regs = gt->uncore->regs;
152 	u32 dw;
153 
154 	lockdep_assert_held(&gt->irq_lock);
155 
156 	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
157 	if (dw & BIT(bit)) {
158 		/*
159 		 * According to the BSpec, DW_IIR bits cannot be cleared without
160 		 * first servicing the Selector & Shared IIR registers.
161 		 */
162 		gen11_gt_engine_identity(gt, bank, bit);
163 
164 		/*
165 		 * We locked GT INT DW by reading it. If we want to (try
166 		 * to) recover from this successfully, we need to clear
167 		 * our bit, otherwise we are locking the register for
168 		 * everybody.
169 		 */
170 		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
171 
172 		return true;
173 	}
174 
175 	return false;
176 }
177 
178 void gen11_gt_irq_reset(struct intel_gt *gt)
179 {
180 	struct intel_uncore *uncore = gt->uncore;
181 
182 	/* Disable RCS, BCS, VCS and VECS class engines. */
183 	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
184 	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE,	  0);
185 	if (CCS_MASK(gt))
186 		intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, 0);
187 
188 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
189 	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK,	~0);
190 	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK,	~0);
191 	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK,	~0);
192 	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK,	~0);
193 	if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
194 		intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK,   ~0);
195 	if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
196 		intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK,   ~0);
197 	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK,	~0);
198 	if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
199 		intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~0);
200 	if (HAS_ENGINE(gt, CCS0) || HAS_ENGINE(gt, CCS1))
201 		intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~0);
202 	if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
203 		intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~0);
204 
205 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
206 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
207 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
208 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
209 
210 	intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, 0);
211 	intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK,  ~0);
212 }
213 
214 void gen11_gt_irq_postinstall(struct intel_gt *gt)
215 {
216 	struct intel_uncore *uncore = gt->uncore;
217 	u32 irqs = GT_RENDER_USER_INTERRUPT;
218 	u32 dmask;
219 	u32 smask;
220 
221 	if (!intel_uc_wants_guc_submission(&gt->uc))
222 		irqs |= GT_CS_MASTER_ERROR_INTERRUPT |
223 			GT_CONTEXT_SWITCH_INTERRUPT |
224 			GT_WAIT_SEMAPHORE_INTERRUPT;
225 
226 	dmask = irqs << 16 | irqs;
227 	smask = irqs << 16;
228 
229 	BUILD_BUG_ON(irqs & 0xffff0000);
230 
231 	/* Enable RCS, BCS, VCS and VECS class interrupts. */
232 	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
233 	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
234 	if (CCS_MASK(gt))
235 		intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, smask);
236 
237 	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
238 	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
239 	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
240 	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
241 	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
242 	if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
243 		intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK, ~dmask);
244 	if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
245 		intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK, ~dmask);
246 	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
247 	if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
248 		intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~dmask);
249 	if (HAS_ENGINE(gt, CCS0) || HAS_ENGINE(gt, CCS1))
250 		intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~dmask);
251 	if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
252 		intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~dmask);
253 
254 	/*
255 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
256 	 * is enabled/disabled.
257 	 */
258 	gt->pm_ier = 0x0;
259 	gt->pm_imr = ~gt->pm_ier;
260 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
261 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
262 
263 	/* Same thing for GuC interrupts */
264 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
265 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
266 }
267 
268 void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
269 {
270 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
271 		intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
272 				    gt_iir);
273 
274 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
275 		intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
276 				    gt_iir);
277 }
278 
279 static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
280 {
281 	if (!HAS_L3_DPF(gt->i915))
282 		return;
283 
284 	spin_lock(&gt->irq_lock);
285 	gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
286 	spin_unlock(&gt->irq_lock);
287 
288 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
289 		gt->i915->l3_parity.which_slice |= 1 << 1;
290 
291 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
292 		gt->i915->l3_parity.which_slice |= 1 << 0;
293 
294 	schedule_work(&gt->i915->l3_parity.error_work);
295 }
296 
297 void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
298 {
299 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
300 		intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
301 				    gt_iir);
302 
303 	if (gt_iir & GT_BSD_USER_INTERRUPT)
304 		intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
305 				    gt_iir >> 12);
306 
307 	if (gt_iir & GT_BLT_USER_INTERRUPT)
308 		intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
309 				    gt_iir >> 22);
310 
311 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
312 		      GT_BSD_CS_ERROR_INTERRUPT |
313 		      GT_CS_MASTER_ERROR_INTERRUPT))
314 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
315 
316 	if (gt_iir & GT_PARITY_ERROR(gt->i915))
317 		gen7_parity_error_irq_handler(gt, gt_iir);
318 }
319 
320 void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
321 {
322 	void __iomem * const regs = gt->uncore->regs;
323 	u32 iir;
324 
325 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
326 		iir = raw_reg_read(regs, GEN8_GT_IIR(0));
327 		if (likely(iir)) {
328 			intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
329 					    iir >> GEN8_RCS_IRQ_SHIFT);
330 			intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
331 					    iir >> GEN8_BCS_IRQ_SHIFT);
332 			raw_reg_write(regs, GEN8_GT_IIR(0), iir);
333 		}
334 	}
335 
336 	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
337 		iir = raw_reg_read(regs, GEN8_GT_IIR(1));
338 		if (likely(iir)) {
339 			intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
340 					    iir >> GEN8_VCS0_IRQ_SHIFT);
341 			intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][1],
342 					    iir >> GEN8_VCS1_IRQ_SHIFT);
343 			raw_reg_write(regs, GEN8_GT_IIR(1), iir);
344 		}
345 	}
346 
347 	if (master_ctl & GEN8_GT_VECS_IRQ) {
348 		iir = raw_reg_read(regs, GEN8_GT_IIR(3));
349 		if (likely(iir)) {
350 			intel_engine_cs_irq(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
351 					    iir >> GEN8_VECS_IRQ_SHIFT);
352 			raw_reg_write(regs, GEN8_GT_IIR(3), iir);
353 		}
354 	}
355 
356 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
357 		iir = raw_reg_read(regs, GEN8_GT_IIR(2));
358 		if (likely(iir)) {
359 			gen6_rps_irq_handler(&gt->rps, iir);
360 			guc_irq_handler(&gt->uc.guc, iir >> 16);
361 			raw_reg_write(regs, GEN8_GT_IIR(2), iir);
362 		}
363 	}
364 }
365 
366 void gen8_gt_irq_reset(struct intel_gt *gt)
367 {
368 	struct intel_uncore *uncore = gt->uncore;
369 
370 	GEN8_IRQ_RESET_NDX(uncore, GT, 0);
371 	GEN8_IRQ_RESET_NDX(uncore, GT, 1);
372 	GEN8_IRQ_RESET_NDX(uncore, GT, 2);
373 	GEN8_IRQ_RESET_NDX(uncore, GT, 3);
374 }
375 
376 void gen8_gt_irq_postinstall(struct intel_gt *gt)
377 {
378 	/* These are interrupts we'll toggle with the ring mask register */
379 	const u32 irqs =
380 		GT_CS_MASTER_ERROR_INTERRUPT |
381 		GT_RENDER_USER_INTERRUPT |
382 		GT_CONTEXT_SWITCH_INTERRUPT |
383 		GT_WAIT_SEMAPHORE_INTERRUPT;
384 	const u32 gt_interrupts[] = {
385 		irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
386 		irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
387 		0,
388 		irqs << GEN8_VECS_IRQ_SHIFT,
389 	};
390 	struct intel_uncore *uncore = gt->uncore;
391 
392 	gt->pm_ier = 0x0;
393 	gt->pm_imr = ~gt->pm_ier;
394 	GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
395 	GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
396 	/*
397 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
398 	 * is enabled/disabled. Same wil be the case for GuC interrupts.
399 	 */
400 	GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
401 	GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
402 }
403 
404 static void gen5_gt_update_irq(struct intel_gt *gt,
405 			       u32 interrupt_mask,
406 			       u32 enabled_irq_mask)
407 {
408 	lockdep_assert_held(&gt->irq_lock);
409 
410 	GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
411 
412 	gt->gt_imr &= ~interrupt_mask;
413 	gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
414 	intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
415 }
416 
417 void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
418 {
419 	gen5_gt_update_irq(gt, mask, mask);
420 	intel_uncore_posting_read_fw(gt->uncore, GTIMR);
421 }
422 
423 void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
424 {
425 	gen5_gt_update_irq(gt, mask, 0);
426 }
427 
428 void gen5_gt_irq_reset(struct intel_gt *gt)
429 {
430 	struct intel_uncore *uncore = gt->uncore;
431 
432 	GEN3_IRQ_RESET(uncore, GT);
433 	if (GRAPHICS_VER(gt->i915) >= 6)
434 		GEN3_IRQ_RESET(uncore, GEN6_PM);
435 }
436 
437 void gen5_gt_irq_postinstall(struct intel_gt *gt)
438 {
439 	struct intel_uncore *uncore = gt->uncore;
440 	u32 pm_irqs = 0;
441 	u32 gt_irqs = 0;
442 
443 	gt->gt_imr = ~0;
444 	if (HAS_L3_DPF(gt->i915)) {
445 		/* L3 parity interrupt is always unmasked. */
446 		gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
447 		gt_irqs |= GT_PARITY_ERROR(gt->i915);
448 	}
449 
450 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
451 	if (GRAPHICS_VER(gt->i915) == 5)
452 		gt_irqs |= ILK_BSD_USER_INTERRUPT;
453 	else
454 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
455 
456 	GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
457 
458 	if (GRAPHICS_VER(gt->i915) >= 6) {
459 		/*
460 		 * RPS interrupts will get enabled/disabled on demand when RPS
461 		 * itself is enabled/disabled.
462 		 */
463 		if (HAS_ENGINE(gt, VECS0)) {
464 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
465 			gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
466 		}
467 
468 		gt->pm_imr = 0xffffffff;
469 		GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);
470 	}
471 }
472