xref: /openbsd/sys/dev/pci/drm/i915/gt/intel_reset.c (revision ddf58b8f)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2008-2018 Intel Corporation
4  */
5 
6 #include <linux/sched/mm.h>
7 #include <linux/stop_machine.h>
8 #include <linux/string_helpers.h>
9 
10 #include "display/intel_display_reset.h"
11 #include "display/intel_overlay.h"
12 
13 #include "gem/i915_gem_context.h"
14 
15 #include "gt/intel_gt_regs.h"
16 
17 #include "gt/uc/intel_gsc_fw.h"
18 
19 #include "i915_drv.h"
20 #include "i915_file_private.h"
21 #include "i915_gpu_error.h"
22 #include "i915_irq.h"
23 #include "i915_reg.h"
24 #include "intel_breadcrumbs.h"
25 #include "intel_engine_pm.h"
26 #include "intel_engine_regs.h"
27 #include "intel_gt.h"
28 #include "intel_gt_pm.h"
29 #include "intel_gt_requests.h"
30 #include "intel_mchbar_regs.h"
31 #include "intel_pci_config.h"
32 #include "intel_reset.h"
33 
34 #include "uc/intel_guc.h"
35 
36 #define RESET_MAX_RETRIES 3
37 
client_mark_guilty(struct i915_gem_context * ctx,bool banned)38 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
39 {
40 	struct drm_i915_file_private *file_priv = ctx->file_priv;
41 	unsigned long prev_hang;
42 	unsigned int score;
43 
44 	if (IS_ERR_OR_NULL(file_priv))
45 		return;
46 
47 	score = 0;
48 	if (banned)
49 		score = I915_CLIENT_SCORE_CONTEXT_BAN;
50 
51 	prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
52 	if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
53 		score += I915_CLIENT_SCORE_HANG_FAST;
54 
55 	if (score) {
56 		atomic_add(score, &file_priv->ban_score);
57 
58 		drm_dbg(&ctx->i915->drm,
59 			"client %s: gained %u ban score, now %u\n",
60 			ctx->name, score,
61 			atomic_read(&file_priv->ban_score));
62 	}
63 }
64 
mark_guilty(struct i915_request * rq)65 static bool mark_guilty(struct i915_request *rq)
66 {
67 	struct i915_gem_context *ctx;
68 	unsigned long prev_hang;
69 	bool banned;
70 	int i;
71 
72 	if (intel_context_is_closed(rq->context))
73 		return true;
74 
75 	rcu_read_lock();
76 	ctx = rcu_dereference(rq->context->gem_context);
77 	if (ctx && !kref_get_unless_zero(&ctx->ref))
78 		ctx = NULL;
79 	rcu_read_unlock();
80 	if (!ctx)
81 		return intel_context_is_banned(rq->context);
82 
83 	atomic_inc(&ctx->guilty_count);
84 
85 	/* Cool contexts are too cool to be banned! (Used for reset testing.) */
86 	if (!i915_gem_context_is_bannable(ctx)) {
87 		banned = false;
88 		goto out;
89 	}
90 
91 	drm_notice(&ctx->i915->drm,
92 		   "%s context reset due to GPU hang\n",
93 		   ctx->name);
94 
95 	/* Record the timestamp for the last N hangs */
96 	prev_hang = ctx->hang_timestamp[0];
97 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
98 		ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
99 	ctx->hang_timestamp[i] = jiffies;
100 
101 	/* If we have hung N+1 times in rapid succession, we ban the context! */
102 	banned = !i915_gem_context_is_recoverable(ctx);
103 	if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
104 		banned = true;
105 	if (banned)
106 		drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
107 			ctx->name, atomic_read(&ctx->guilty_count));
108 
109 	client_mark_guilty(ctx, banned);
110 
111 out:
112 	i915_gem_context_put(ctx);
113 	return banned;
114 }
115 
mark_innocent(struct i915_request * rq)116 static void mark_innocent(struct i915_request *rq)
117 {
118 	struct i915_gem_context *ctx;
119 
120 	rcu_read_lock();
121 	ctx = rcu_dereference(rq->context->gem_context);
122 	if (ctx)
123 		atomic_inc(&ctx->active_count);
124 	rcu_read_unlock();
125 }
126 
__i915_request_reset(struct i915_request * rq,bool guilty)127 void __i915_request_reset(struct i915_request *rq, bool guilty)
128 {
129 	bool banned = false;
130 
131 	RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty));
132 	GEM_BUG_ON(__i915_request_is_complete(rq));
133 
134 	rcu_read_lock(); /* protect the GEM context */
135 	if (guilty) {
136 		i915_request_set_error_once(rq, -EIO);
137 		__i915_request_skip(rq);
138 		banned = mark_guilty(rq);
139 	} else {
140 		i915_request_set_error_once(rq, -EAGAIN);
141 		mark_innocent(rq);
142 	}
143 	rcu_read_unlock();
144 
145 	if (banned)
146 		intel_context_ban(rq->context, rq);
147 }
148 
i915_in_reset(struct pci_dev * pdev)149 static bool i915_in_reset(struct pci_dev *pdev)
150 {
151 	u8 gdrst;
152 
153 	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
154 	return gdrst & GRDOM_RESET_STATUS;
155 }
156 
i915_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)157 static int i915_do_reset(struct intel_gt *gt,
158 			 intel_engine_mask_t engine_mask,
159 			 unsigned int retry)
160 {
161 	struct pci_dev *pdev = gt->i915->drm.pdev;
162 	int err;
163 
164 	/* Assert reset for at least 20 usec, and wait for acknowledgement. */
165 	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
166 	udelay(50);
167 	err = wait_for_atomic(i915_in_reset(pdev), 50);
168 
169 	/* Clear the reset request. */
170 	pci_write_config_byte(pdev, I915_GDRST, 0);
171 	udelay(50);
172 	if (!err)
173 		err = wait_for_atomic(!i915_in_reset(pdev), 50);
174 
175 	return err;
176 }
177 
g4x_reset_complete(struct pci_dev * pdev)178 static bool g4x_reset_complete(struct pci_dev *pdev)
179 {
180 	u8 gdrst;
181 
182 	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
183 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
184 }
185 
g33_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)186 static int g33_do_reset(struct intel_gt *gt,
187 			intel_engine_mask_t engine_mask,
188 			unsigned int retry)
189 {
190 	struct pci_dev *pdev = gt->i915->drm.pdev;
191 
192 	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
193 	return wait_for_atomic(g4x_reset_complete(pdev), 50);
194 }
195 
g4x_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)196 static int g4x_do_reset(struct intel_gt *gt,
197 			intel_engine_mask_t engine_mask,
198 			unsigned int retry)
199 {
200 	struct pci_dev *pdev = gt->i915->drm.pdev;
201 	struct intel_uncore *uncore = gt->uncore;
202 	int ret;
203 
204 	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
205 	intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, 0, VCP_UNIT_CLOCK_GATE_DISABLE);
206 	intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
207 
208 	pci_write_config_byte(pdev, I915_GDRST,
209 			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
210 	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
211 	if (ret) {
212 		GT_TRACE(gt, "Wait for media reset failed\n");
213 		goto out;
214 	}
215 
216 	pci_write_config_byte(pdev, I915_GDRST,
217 			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
218 	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
219 	if (ret) {
220 		GT_TRACE(gt, "Wait for render reset failed\n");
221 		goto out;
222 	}
223 
224 out:
225 	pci_write_config_byte(pdev, I915_GDRST, 0);
226 
227 	intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE, 0);
228 	intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
229 
230 	return ret;
231 }
232 
ilk_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)233 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
234 			unsigned int retry)
235 {
236 	struct intel_uncore *uncore = gt->uncore;
237 	int ret;
238 
239 	intel_uncore_write_fw(uncore, ILK_GDSR,
240 			      ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
241 	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
242 					   ILK_GRDOM_RESET_ENABLE, 0,
243 					   5000, 0,
244 					   NULL);
245 	if (ret) {
246 		GT_TRACE(gt, "Wait for render reset failed\n");
247 		goto out;
248 	}
249 
250 	intel_uncore_write_fw(uncore, ILK_GDSR,
251 			      ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
252 	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
253 					   ILK_GRDOM_RESET_ENABLE, 0,
254 					   5000, 0,
255 					   NULL);
256 	if (ret) {
257 		GT_TRACE(gt, "Wait for media reset failed\n");
258 		goto out;
259 	}
260 
261 out:
262 	intel_uncore_write_fw(uncore, ILK_GDSR, 0);
263 	intel_uncore_posting_read_fw(uncore, ILK_GDSR);
264 	return ret;
265 }
266 
267 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
gen6_hw_domain_reset(struct intel_gt * gt,u32 hw_domain_mask)268 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
269 {
270 	struct intel_uncore *uncore = gt->uncore;
271 	int loops;
272 	int err;
273 
274 	/*
275 	 * On some platforms, e.g. Jasperlake, we see that the engine register
276 	 * state is not cleared until shortly after GDRST reports completion,
277 	 * causing a failure as we try to immediately resume while the internal
278 	 * state is still in flux. If we immediately repeat the reset, the
279 	 * second reset appears to serialise with the first, and since it is a
280 	 * no-op, the registers should retain their reset value. However, there
281 	 * is still a concern that upon leaving the second reset, the internal
282 	 * engine state is still in flux and not ready for resuming.
283 	 *
284 	 * Starting on MTL, there are some prep steps that we need to do when
285 	 * resetting some engines that need to be applied every time we write to
286 	 * GEN6_GDRST. As those are time consuming (tens of ms), we don't want
287 	 * to perform that twice, so, since the Jasperlake issue hasn't been
288 	 * observed on MTL, we avoid repeating the reset on newer platforms.
289 	 */
290 	loops = GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70) ? 2 : 1;
291 
292 	/*
293 	 * GEN6_GDRST is not in the gt power well, no need to check
294 	 * for fifo space for the write or forcewake the chip for
295 	 * the read
296 	 */
297 	do {
298 		intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
299 
300 		/* Wait for the device to ack the reset requests. */
301 		err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
302 						   hw_domain_mask, 0,
303 						   2000, 0,
304 						   NULL);
305 	} while (err == 0 && --loops);
306 	if (err)
307 		GT_TRACE(gt,
308 			 "Wait for 0x%08x engines reset failed\n",
309 			 hw_domain_mask);
310 
311 	/*
312 	 * As we have observed that the engine state is still volatile
313 	 * after GDRST is acked, impose a small delay to let everything settle.
314 	 */
315 	udelay(50);
316 
317 	return err;
318 }
319 
__gen6_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)320 static int __gen6_reset_engines(struct intel_gt *gt,
321 				intel_engine_mask_t engine_mask,
322 				unsigned int retry)
323 {
324 	struct intel_engine_cs *engine;
325 	u32 hw_mask;
326 
327 	if (engine_mask == ALL_ENGINES) {
328 		hw_mask = GEN6_GRDOM_FULL;
329 	} else {
330 		intel_engine_mask_t tmp;
331 
332 		hw_mask = 0;
333 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
334 			hw_mask |= engine->reset_domain;
335 		}
336 	}
337 
338 	return gen6_hw_domain_reset(gt, hw_mask);
339 }
340 
gen6_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)341 static int gen6_reset_engines(struct intel_gt *gt,
342 			      intel_engine_mask_t engine_mask,
343 			      unsigned int retry)
344 {
345 	unsigned long flags;
346 	int ret;
347 
348 	spin_lock_irqsave(&gt->uncore->lock, flags);
349 	ret = __gen6_reset_engines(gt, engine_mask, retry);
350 	spin_unlock_irqrestore(&gt->uncore->lock, flags);
351 
352 	return ret;
353 }
354 
find_sfc_paired_vecs_engine(struct intel_engine_cs * engine)355 static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
356 {
357 	int vecs_id;
358 
359 	GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS);
360 
361 	vecs_id = _VECS((engine->instance) / 2);
362 
363 	return engine->gt->engine[vecs_id];
364 }
365 
366 struct sfc_lock_data {
367 	i915_reg_t lock_reg;
368 	i915_reg_t ack_reg;
369 	i915_reg_t usage_reg;
370 	u32 lock_bit;
371 	u32 ack_bit;
372 	u32 usage_bit;
373 	u32 reset_bit;
374 };
375 
get_sfc_forced_lock_data(struct intel_engine_cs * engine,struct sfc_lock_data * sfc_lock)376 static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
377 				     struct sfc_lock_data *sfc_lock)
378 {
379 	switch (engine->class) {
380 	default:
381 		MISSING_CASE(engine->class);
382 		fallthrough;
383 	case VIDEO_DECODE_CLASS:
384 		sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base);
385 		sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
386 
387 		sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
388 		sfc_lock->ack_bit  = GEN11_VCS_SFC_LOCK_ACK_BIT;
389 
390 		sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
391 		sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
392 		sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
393 
394 		break;
395 	case VIDEO_ENHANCEMENT_CLASS:
396 		sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base);
397 		sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
398 
399 		sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base);
400 		sfc_lock->ack_bit  = GEN11_VECS_SFC_LOCK_ACK_BIT;
401 
402 		sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base);
403 		sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
404 		sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
405 
406 		break;
407 	}
408 }
409 
gen11_lock_sfc(struct intel_engine_cs * engine,u32 * reset_mask,u32 * unlock_mask)410 static int gen11_lock_sfc(struct intel_engine_cs *engine,
411 			  u32 *reset_mask,
412 			  u32 *unlock_mask)
413 {
414 	struct intel_uncore *uncore = engine->uncore;
415 	u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
416 	struct sfc_lock_data sfc_lock;
417 	bool lock_obtained, lock_to_other = false;
418 	int ret;
419 
420 	switch (engine->class) {
421 	case VIDEO_DECODE_CLASS:
422 		if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
423 			return 0;
424 
425 		fallthrough;
426 	case VIDEO_ENHANCEMENT_CLASS:
427 		get_sfc_forced_lock_data(engine, &sfc_lock);
428 
429 		break;
430 	default:
431 		return 0;
432 	}
433 
434 	if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) {
435 		struct intel_engine_cs *paired_vecs;
436 
437 		if (engine->class != VIDEO_DECODE_CLASS ||
438 		    GRAPHICS_VER(engine->i915) != 12)
439 			return 0;
440 
441 		/*
442 		 * Wa_14010733141
443 		 *
444 		 * If the VCS-MFX isn't using the SFC, we also need to check
445 		 * whether VCS-HCP is using it.  If so, we need to issue a *VE*
446 		 * forced lock on the VE engine that shares the same SFC.
447 		 */
448 		if (!(intel_uncore_read_fw(uncore,
449 					   GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) &
450 		      GEN12_HCP_SFC_USAGE_BIT))
451 			return 0;
452 
453 		paired_vecs = find_sfc_paired_vecs_engine(engine);
454 		get_sfc_forced_lock_data(paired_vecs, &sfc_lock);
455 		lock_to_other = true;
456 		*unlock_mask |= paired_vecs->mask;
457 	} else {
458 		*unlock_mask |= engine->mask;
459 	}
460 
461 	/*
462 	 * If the engine is using an SFC, tell the engine that a software reset
463 	 * is going to happen. The engine will then try to force lock the SFC.
464 	 * If SFC ends up being locked to the engine we want to reset, we have
465 	 * to reset it as well (we will unlock it once the reset sequence is
466 	 * completed).
467 	 */
468 	intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, 0, sfc_lock.lock_bit);
469 
470 	ret = __intel_wait_for_register_fw(uncore,
471 					   sfc_lock.ack_reg,
472 					   sfc_lock.ack_bit,
473 					   sfc_lock.ack_bit,
474 					   1000, 0, NULL);
475 
476 	/*
477 	 * Was the SFC released while we were trying to lock it?
478 	 *
479 	 * We should reset both the engine and the SFC if:
480 	 *  - We were locking the SFC to this engine and the lock succeeded
481 	 *       OR
482 	 *  - We were locking the SFC to a different engine (Wa_14010733141)
483 	 *    but the SFC was released before the lock was obtained.
484 	 *
485 	 * Otherwise we need only reset the engine by itself and we can
486 	 * leave the SFC alone.
487 	 */
488 	lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) &
489 			sfc_lock.usage_bit) != 0;
490 	if (lock_obtained == lock_to_other)
491 		return 0;
492 
493 	if (ret) {
494 		ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
495 		return ret;
496 	}
497 
498 	*reset_mask |= sfc_lock.reset_bit;
499 	return 0;
500 }
501 
gen11_unlock_sfc(struct intel_engine_cs * engine)502 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
503 {
504 	struct intel_uncore *uncore = engine->uncore;
505 	u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
506 	struct sfc_lock_data sfc_lock = {};
507 
508 	if (engine->class != VIDEO_DECODE_CLASS &&
509 	    engine->class != VIDEO_ENHANCEMENT_CLASS)
510 		return;
511 
512 	if (engine->class == VIDEO_DECODE_CLASS &&
513 	    (BIT(engine->instance) & vdbox_sfc_access) == 0)
514 		return;
515 
516 	get_sfc_forced_lock_data(engine, &sfc_lock);
517 
518 	intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit, 0);
519 }
520 
__gen11_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)521 static int __gen11_reset_engines(struct intel_gt *gt,
522 				 intel_engine_mask_t engine_mask,
523 				 unsigned int retry)
524 {
525 	struct intel_engine_cs *engine;
526 	intel_engine_mask_t tmp;
527 	u32 reset_mask, unlock_mask = 0;
528 	int ret;
529 
530 	if (engine_mask == ALL_ENGINES) {
531 		reset_mask = GEN11_GRDOM_FULL;
532 	} else {
533 		reset_mask = 0;
534 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
535 			reset_mask |= engine->reset_domain;
536 			ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
537 			if (ret)
538 				goto sfc_unlock;
539 		}
540 	}
541 
542 	ret = gen6_hw_domain_reset(gt, reset_mask);
543 
544 sfc_unlock:
545 	/*
546 	 * We unlock the SFC based on the lock status and not the result of
547 	 * gen11_lock_sfc to make sure that we clean properly if something
548 	 * wrong happened during the lock (e.g. lock acquired after timeout
549 	 * expiration).
550 	 *
551 	 * Due to Wa_14010733141, we may have locked an SFC to an engine that
552 	 * wasn't being reset.  So instead of calling gen11_unlock_sfc()
553 	 * on engine_mask, we instead call it on the mask of engines that our
554 	 * gen11_lock_sfc() calls told us actually had locks attempted.
555 	 */
556 	for_each_engine_masked(engine, gt, unlock_mask, tmp)
557 		gen11_unlock_sfc(engine);
558 
559 	return ret;
560 }
561 
gen8_engine_reset_prepare(struct intel_engine_cs * engine)562 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
563 {
564 	struct intel_uncore *uncore = engine->uncore;
565 	const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
566 	u32 request, mask, ack;
567 	int ret;
568 
569 	if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
570 		return -ETIMEDOUT;
571 
572 	ack = intel_uncore_read_fw(uncore, reg);
573 	if (ack & RESET_CTL_CAT_ERROR) {
574 		/*
575 		 * For catastrophic errors, ready-for-reset sequence
576 		 * needs to be bypassed: HAS#396813
577 		 */
578 		request = RESET_CTL_CAT_ERROR;
579 		mask = RESET_CTL_CAT_ERROR;
580 
581 		/* Catastrophic errors need to be cleared by HW */
582 		ack = 0;
583 	} else if (!(ack & RESET_CTL_READY_TO_RESET)) {
584 		request = RESET_CTL_REQUEST_RESET;
585 		mask = RESET_CTL_READY_TO_RESET;
586 		ack = RESET_CTL_READY_TO_RESET;
587 	} else {
588 		return 0;
589 	}
590 
591 	intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
592 	ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
593 					   700, 0, NULL);
594 	if (ret)
595 		drm_err(&engine->i915->drm,
596 			"%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
597 			engine->name, request,
598 			intel_uncore_read_fw(uncore, reg));
599 
600 	return ret;
601 }
602 
gen8_engine_reset_cancel(struct intel_engine_cs * engine)603 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
604 {
605 	intel_uncore_write_fw(engine->uncore,
606 			      RING_RESET_CTL(engine->mmio_base),
607 			      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
608 }
609 
gen8_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)610 static int gen8_reset_engines(struct intel_gt *gt,
611 			      intel_engine_mask_t engine_mask,
612 			      unsigned int retry)
613 {
614 	struct intel_engine_cs *engine;
615 	const bool reset_non_ready = retry >= 1;
616 	intel_engine_mask_t tmp;
617 	unsigned long flags;
618 	int ret;
619 
620 	spin_lock_irqsave(&gt->uncore->lock, flags);
621 
622 	for_each_engine_masked(engine, gt, engine_mask, tmp) {
623 		ret = gen8_engine_reset_prepare(engine);
624 		if (ret && !reset_non_ready)
625 			goto skip_reset;
626 
627 		/*
628 		 * If this is not the first failed attempt to prepare,
629 		 * we decide to proceed anyway.
630 		 *
631 		 * By doing so we risk context corruption and with
632 		 * some gens (kbl), possible system hang if reset
633 		 * happens during active bb execution.
634 		 *
635 		 * We rather take context corruption instead of
636 		 * failed reset with a wedged driver/gpu. And
637 		 * active bb execution case should be covered by
638 		 * stop_engines() we have before the reset.
639 		 */
640 	}
641 
642 	/*
643 	 * Wa_22011100796:dg2, whenever Full soft reset is required,
644 	 * reset all individual engines firstly, and then do a full soft reset.
645 	 *
646 	 * This is best effort, so ignore any error from the initial reset.
647 	 */
648 	if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
649 		__gen11_reset_engines(gt, gt->info.engine_mask, 0);
650 
651 	if (GRAPHICS_VER(gt->i915) >= 11)
652 		ret = __gen11_reset_engines(gt, engine_mask, retry);
653 	else
654 		ret = __gen6_reset_engines(gt, engine_mask, retry);
655 
656 skip_reset:
657 	for_each_engine_masked(engine, gt, engine_mask, tmp)
658 		gen8_engine_reset_cancel(engine);
659 
660 	spin_unlock_irqrestore(&gt->uncore->lock, flags);
661 
662 	return ret;
663 }
664 
mock_reset(struct intel_gt * gt,intel_engine_mask_t mask,unsigned int retry)665 static int mock_reset(struct intel_gt *gt,
666 		      intel_engine_mask_t mask,
667 		      unsigned int retry)
668 {
669 	return 0;
670 }
671 
672 typedef int (*reset_func)(struct intel_gt *,
673 			  intel_engine_mask_t engine_mask,
674 			  unsigned int retry);
675 
intel_get_gpu_reset(const struct intel_gt * gt)676 static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
677 {
678 	struct drm_i915_private *i915 = gt->i915;
679 
680 	if (is_mock_gt(gt))
681 		return mock_reset;
682 	else if (GRAPHICS_VER(i915) >= 8)
683 		return gen8_reset_engines;
684 	else if (GRAPHICS_VER(i915) >= 6)
685 		return gen6_reset_engines;
686 	else if (GRAPHICS_VER(i915) >= 5)
687 		return ilk_do_reset;
688 	else if (IS_G4X(i915))
689 		return g4x_do_reset;
690 	else if (IS_G33(i915) || IS_PINEVIEW(i915))
691 		return g33_do_reset;
692 	else if (GRAPHICS_VER(i915) >= 3)
693 		return i915_do_reset;
694 	else
695 		return NULL;
696 }
697 
__reset_guc(struct intel_gt * gt)698 static int __reset_guc(struct intel_gt *gt)
699 {
700 	u32 guc_domain =
701 		GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
702 
703 	return gen6_hw_domain_reset(gt, guc_domain);
704 }
705 
needs_wa_14015076503(struct intel_gt * gt,intel_engine_mask_t engine_mask)706 static bool needs_wa_14015076503(struct intel_gt *gt, intel_engine_mask_t engine_mask)
707 {
708 	if (MEDIA_VER_FULL(gt->i915) != IP_VER(13, 0) || !HAS_ENGINE(gt, GSC0))
709 		return false;
710 
711 	if (!__HAS_ENGINE(engine_mask, GSC0))
712 		return false;
713 
714 	return intel_gsc_uc_fw_init_done(&gt->uc.gsc);
715 }
716 
717 static intel_engine_mask_t
wa_14015076503_start(struct intel_gt * gt,intel_engine_mask_t engine_mask,bool first)718 wa_14015076503_start(struct intel_gt *gt, intel_engine_mask_t engine_mask, bool first)
719 {
720 	if (!needs_wa_14015076503(gt, engine_mask))
721 		return engine_mask;
722 
723 	/*
724 	 * wa_14015076503: if the GSC FW is loaded, we need to alert it that
725 	 * we're going to do a GSC engine reset and then wait for 200ms for the
726 	 * FW to get ready for it. However, if this is the first ALL_ENGINES
727 	 * reset attempt and the GSC is not busy, we can try to instead reset
728 	 * the GuC and all the other engines individually to avoid the 200ms
729 	 * wait.
730 	 * Skipping the GSC engine is safe because, differently from other
731 	 * engines, the GSCCS only role is to forward the commands to the GSC
732 	 * FW, so it doesn't have any HW outside of the CS itself and therefore
733 	 * it has no state that we don't explicitly re-init on resume or on
734 	 * context switch LRC or power context). The HW for the GSC uC is
735 	 * managed by the GSC FW so we don't need to care about that.
736 	 */
737 	if (engine_mask == ALL_ENGINES && first && intel_engine_is_idle(gt->engine[GSC0])) {
738 		__reset_guc(gt);
739 		engine_mask = gt->info.engine_mask & ~BIT(GSC0);
740 	} else {
741 		intel_uncore_rmw(gt->uncore,
742 				 HECI_H_GS1(MTL_GSC_HECI2_BASE),
743 				 0, HECI_H_GS1_ER_PREP);
744 
745 		/* make sure the reset bit is clear when writing the CSR reg */
746 		intel_uncore_rmw(gt->uncore,
747 				 HECI_H_CSR(MTL_GSC_HECI2_BASE),
748 				 HECI_H_CSR_RST, HECI_H_CSR_IG);
749 		drm_msleep(200);
750 	}
751 
752 	return engine_mask;
753 }
754 
755 static void
wa_14015076503_end(struct intel_gt * gt,intel_engine_mask_t engine_mask)756 wa_14015076503_end(struct intel_gt *gt, intel_engine_mask_t engine_mask)
757 {
758 	if (!needs_wa_14015076503(gt, engine_mask))
759 		return;
760 
761 	intel_uncore_rmw(gt->uncore,
762 			 HECI_H_GS1(MTL_GSC_HECI2_BASE),
763 			 HECI_H_GS1_ER_PREP, 0);
764 }
765 
__intel_gt_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask)766 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
767 {
768 	const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
769 	reset_func reset;
770 	int ret = -ETIMEDOUT;
771 	int retry;
772 
773 	reset = intel_get_gpu_reset(gt);
774 	if (!reset)
775 		return -ENODEV;
776 
777 	/*
778 	 * If the power well sleeps during the reset, the reset
779 	 * request may be dropped and never completes (causing -EIO).
780 	 */
781 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
782 	for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
783 		intel_engine_mask_t reset_mask;
784 
785 		reset_mask = wa_14015076503_start(gt, engine_mask, !retry);
786 
787 		GT_TRACE(gt, "engine_mask=%x\n", reset_mask);
788 		preempt_disable();
789 		ret = reset(gt, reset_mask, retry);
790 		preempt_enable();
791 
792 		wa_14015076503_end(gt, reset_mask);
793 	}
794 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
795 
796 	return ret;
797 }
798 
intel_has_gpu_reset(const struct intel_gt * gt)799 bool intel_has_gpu_reset(const struct intel_gt *gt)
800 {
801 	if (!gt->i915->params.reset)
802 		return NULL;
803 
804 	return intel_get_gpu_reset(gt);
805 }
806 
intel_has_reset_engine(const struct intel_gt * gt)807 bool intel_has_reset_engine(const struct intel_gt *gt)
808 {
809 	if (gt->i915->params.reset < 2)
810 		return false;
811 
812 	return INTEL_INFO(gt->i915)->has_reset_engine;
813 }
814 
intel_reset_guc(struct intel_gt * gt)815 int intel_reset_guc(struct intel_gt *gt)
816 {
817 	int ret;
818 
819 	GEM_BUG_ON(!HAS_GT_UC(gt->i915));
820 
821 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
822 	ret = __reset_guc(gt);
823 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
824 
825 	return ret;
826 }
827 
828 /*
829  * Ensure irq handler finishes, and not run again.
830  * Also return the active request so that we only search for it once.
831  */
reset_prepare_engine(struct intel_engine_cs * engine)832 static void reset_prepare_engine(struct intel_engine_cs *engine)
833 {
834 	/*
835 	 * During the reset sequence, we must prevent the engine from
836 	 * entering RC6. As the context state is undefined until we restart
837 	 * the engine, if it does enter RC6 during the reset, the state
838 	 * written to the powercontext is undefined and so we may lose
839 	 * GPU state upon resume, i.e. fail to restart after a reset.
840 	 */
841 	intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
842 	if (engine->reset.prepare)
843 		engine->reset.prepare(engine);
844 }
845 
revoke_mmaps(struct intel_gt * gt)846 static void revoke_mmaps(struct intel_gt *gt)
847 {
848 	int i;
849 
850 	for (i = 0; i < gt->ggtt->num_fences; i++) {
851 		struct drm_vma_offset_node *node;
852 		struct i915_vma *vma;
853 		u64 vma_offset;
854 
855 		vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
856 		if (!vma)
857 			continue;
858 
859 		if (!i915_vma_has_userfault(vma))
860 			continue;
861 
862 		GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
863 
864 		if (!vma->mmo)
865 			continue;
866 
867 		node = &vma->mmo->vma_node;
868 		vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
869 
870 #ifdef __linux__
871 		unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
872 				    drm_vma_node_offset_addr(node) + vma_offset,
873 				    vma->size,
874 				    1);
875 #else
876 {
877 		struct drm_i915_private *dev_priv = vma->obj->base.dev->dev_private;
878 		struct vm_page *pg;
879 
880 		for (pg = &dev_priv->pgs[atop(vma->node.start)];
881 		     pg != &dev_priv->pgs[atop(vma->node.start + vma->size)];
882 		     pg++)
883 			pmap_page_protect(pg, PROT_NONE);
884 }
885 #endif
886 	}
887 }
888 
reset_prepare(struct intel_gt * gt)889 static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
890 {
891 	struct intel_engine_cs *engine;
892 	intel_engine_mask_t awake = 0;
893 	enum intel_engine_id id;
894 
895 	/* For GuC mode, ensure submission is disabled before stopping ring */
896 	intel_uc_reset_prepare(&gt->uc);
897 
898 	for_each_engine(engine, gt, id) {
899 		if (intel_engine_pm_get_if_awake(engine))
900 			awake |= engine->mask;
901 		reset_prepare_engine(engine);
902 	}
903 
904 	return awake;
905 }
906 
gt_revoke(struct intel_gt * gt)907 static void gt_revoke(struct intel_gt *gt)
908 {
909 	revoke_mmaps(gt);
910 }
911 
gt_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask)912 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
913 {
914 	struct intel_engine_cs *engine;
915 	enum intel_engine_id id;
916 	int err;
917 
918 	/*
919 	 * Everything depends on having the GTT running, so we need to start
920 	 * there.
921 	 */
922 	err = i915_ggtt_enable_hw(gt->i915);
923 	if (err)
924 		return err;
925 
926 	local_bh_disable();
927 	for_each_engine(engine, gt, id)
928 		__intel_engine_reset(engine, stalled_mask & engine->mask);
929 	local_bh_enable();
930 
931 	intel_uc_reset(&gt->uc, ALL_ENGINES);
932 
933 	intel_ggtt_restore_fences(gt->ggtt);
934 
935 	return err;
936 }
937 
reset_finish_engine(struct intel_engine_cs * engine)938 static void reset_finish_engine(struct intel_engine_cs *engine)
939 {
940 	if (engine->reset.finish)
941 		engine->reset.finish(engine);
942 	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
943 
944 	intel_engine_signal_breadcrumbs(engine);
945 }
946 
reset_finish(struct intel_gt * gt,intel_engine_mask_t awake)947 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
948 {
949 	struct intel_engine_cs *engine;
950 	enum intel_engine_id id;
951 
952 	for_each_engine(engine, gt, id) {
953 		reset_finish_engine(engine);
954 		if (awake & engine->mask)
955 			intel_engine_pm_put(engine);
956 	}
957 
958 	intel_uc_reset_finish(&gt->uc);
959 }
960 
nop_submit_request(struct i915_request * request)961 static void nop_submit_request(struct i915_request *request)
962 {
963 	RQ_TRACE(request, "-EIO\n");
964 
965 	request = i915_request_mark_eio(request);
966 	if (request) {
967 		i915_request_submit(request);
968 		intel_engine_signal_breadcrumbs(request->engine);
969 
970 		i915_request_put(request);
971 	}
972 }
973 
__intel_gt_set_wedged(struct intel_gt * gt)974 static void __intel_gt_set_wedged(struct intel_gt *gt)
975 {
976 	struct intel_engine_cs *engine;
977 	intel_engine_mask_t awake;
978 	enum intel_engine_id id;
979 
980 	if (test_bit(I915_WEDGED, &gt->reset.flags))
981 		return;
982 
983 	GT_TRACE(gt, "start\n");
984 
985 	/*
986 	 * First, stop submission to hw, but do not yet complete requests by
987 	 * rolling the global seqno forward (since this would complete requests
988 	 * for which we haven't set the fence error to EIO yet).
989 	 */
990 	awake = reset_prepare(gt);
991 
992 	/* Even if the GPU reset fails, it should still stop the engines */
993 	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
994 		__intel_gt_reset(gt, ALL_ENGINES);
995 
996 	for_each_engine(engine, gt, id)
997 		engine->submit_request = nop_submit_request;
998 
999 	/*
1000 	 * Make sure no request can slip through without getting completed by
1001 	 * either this call here to intel_engine_write_global_seqno, or the one
1002 	 * in nop_submit_request.
1003 	 */
1004 	synchronize_rcu_expedited();
1005 	set_bit(I915_WEDGED, &gt->reset.flags);
1006 
1007 	/* Mark all executing requests as skipped */
1008 	local_bh_disable();
1009 	for_each_engine(engine, gt, id)
1010 		if (engine->reset.cancel)
1011 			engine->reset.cancel(engine);
1012 	intel_uc_cancel_requests(&gt->uc);
1013 	local_bh_enable();
1014 
1015 	reset_finish(gt, awake);
1016 
1017 	GT_TRACE(gt, "end\n");
1018 }
1019 
intel_gt_set_wedged(struct intel_gt * gt)1020 void intel_gt_set_wedged(struct intel_gt *gt)
1021 {
1022 	intel_wakeref_t wakeref;
1023 
1024 	if (test_bit(I915_WEDGED, &gt->reset.flags))
1025 		return;
1026 
1027 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1028 	mutex_lock(&gt->reset.mutex);
1029 
1030 	if (GEM_SHOW_DEBUG()) {
1031 		struct drm_printer p = drm_debug_printer(__func__);
1032 		struct intel_engine_cs *engine;
1033 		enum intel_engine_id id;
1034 
1035 		drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
1036 		for_each_engine(engine, gt, id) {
1037 			if (intel_engine_is_idle(engine))
1038 				continue;
1039 
1040 			intel_engine_dump(engine, &p, "%s\n", engine->name);
1041 		}
1042 	}
1043 
1044 	__intel_gt_set_wedged(gt);
1045 
1046 	mutex_unlock(&gt->reset.mutex);
1047 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1048 }
1049 
__intel_gt_unset_wedged(struct intel_gt * gt)1050 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
1051 {
1052 	struct intel_gt_timelines *timelines = &gt->timelines;
1053 	struct intel_timeline *tl;
1054 	bool ok;
1055 
1056 	if (!test_bit(I915_WEDGED, &gt->reset.flags))
1057 		return true;
1058 
1059 	/* Never fully initialised, recovery impossible */
1060 	if (intel_gt_has_unrecoverable_error(gt))
1061 		return false;
1062 
1063 	GT_TRACE(gt, "start\n");
1064 
1065 	/*
1066 	 * Before unwedging, make sure that all pending operations
1067 	 * are flushed and errored out - we may have requests waiting upon
1068 	 * third party fences. We marked all inflight requests as EIO, and
1069 	 * every execbuf since returned EIO, for consistency we want all
1070 	 * the currently pending requests to also be marked as EIO, which
1071 	 * is done inside our nop_submit_request - and so we must wait.
1072 	 *
1073 	 * No more can be submitted until we reset the wedged bit.
1074 	 */
1075 	spin_lock(&timelines->lock);
1076 	list_for_each_entry(tl, &timelines->active_list, link) {
1077 		struct dma_fence *fence;
1078 
1079 		fence = i915_active_fence_get(&tl->last_request);
1080 		if (!fence)
1081 			continue;
1082 
1083 		spin_unlock(&timelines->lock);
1084 
1085 		/*
1086 		 * All internal dependencies (i915_requests) will have
1087 		 * been flushed by the set-wedge, but we may be stuck waiting
1088 		 * for external fences. These should all be capped to 10s
1089 		 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
1090 		 * in the worst case.
1091 		 */
1092 		dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
1093 		dma_fence_put(fence);
1094 
1095 		/* Restart iteration after droping lock */
1096 		spin_lock(&timelines->lock);
1097 		tl = list_entry(&timelines->active_list, typeof(*tl), link);
1098 	}
1099 	spin_unlock(&timelines->lock);
1100 
1101 	/* We must reset pending GPU events before restoring our submission */
1102 	ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
1103 	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1104 		ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
1105 	if (!ok) {
1106 		/*
1107 		 * Warn CI about the unrecoverable wedged condition.
1108 		 * Time for a reboot.
1109 		 */
1110 		add_taint_for_CI(gt->i915, TAINT_WARN);
1111 		return false;
1112 	}
1113 
1114 	/*
1115 	 * Undo nop_submit_request. We prevent all new i915 requests from
1116 	 * being queued (by disallowing execbuf whilst wedged) so having
1117 	 * waited for all active requests above, we know the system is idle
1118 	 * and do not have to worry about a thread being inside
1119 	 * engine->submit_request() as we swap over. So unlike installing
1120 	 * the nop_submit_request on reset, we can do this from normal
1121 	 * context and do not require stop_machine().
1122 	 */
1123 	intel_engines_reset_default_submission(gt);
1124 
1125 	GT_TRACE(gt, "end\n");
1126 
1127 	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
1128 	clear_bit(I915_WEDGED, &gt->reset.flags);
1129 
1130 	return true;
1131 }
1132 
intel_gt_unset_wedged(struct intel_gt * gt)1133 bool intel_gt_unset_wedged(struct intel_gt *gt)
1134 {
1135 	bool result;
1136 
1137 	mutex_lock(&gt->reset.mutex);
1138 	result = __intel_gt_unset_wedged(gt);
1139 	mutex_unlock(&gt->reset.mutex);
1140 
1141 	return result;
1142 }
1143 
do_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask)1144 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
1145 {
1146 	int err, i;
1147 
1148 	err = __intel_gt_reset(gt, ALL_ENGINES);
1149 	for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
1150 		drm_msleep(10 * (i + 1));
1151 		err = __intel_gt_reset(gt, ALL_ENGINES);
1152 	}
1153 	if (err)
1154 		return err;
1155 
1156 	return gt_reset(gt, stalled_mask);
1157 }
1158 
resume(struct intel_gt * gt)1159 static int resume(struct intel_gt *gt)
1160 {
1161 	struct intel_engine_cs *engine;
1162 	enum intel_engine_id id;
1163 	int ret;
1164 
1165 	for_each_engine(engine, gt, id) {
1166 		ret = intel_engine_resume(engine);
1167 		if (ret)
1168 			return ret;
1169 	}
1170 
1171 	return 0;
1172 }
1173 
1174 /**
1175  * intel_gt_reset - reset chip after a hang
1176  * @gt: #intel_gt to reset
1177  * @stalled_mask: mask of the stalled engines with the guilty requests
1178  * @reason: user error message for why we are resetting
1179  *
1180  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
1181  * on failure.
1182  *
1183  * Procedure is fairly simple:
1184  *   - reset the chip using the reset reg
1185  *   - re-init context state
1186  *   - re-init hardware status page
1187  *   - re-init ring buffer
1188  *   - re-init interrupt state
1189  *   - re-init display
1190  */
intel_gt_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask,const char * reason)1191 void intel_gt_reset(struct intel_gt *gt,
1192 		    intel_engine_mask_t stalled_mask,
1193 		    const char *reason)
1194 {
1195 	intel_engine_mask_t awake;
1196 	int ret;
1197 
1198 	GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
1199 
1200 	might_sleep();
1201 	GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
1202 
1203 	/*
1204 	 * FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence
1205 	 * critical section like gpu reset.
1206 	 */
1207 	gt_revoke(gt);
1208 
1209 	mutex_lock(&gt->reset.mutex);
1210 
1211 	/* Clear any previous failed attempts at recovery. Time to try again. */
1212 	if (!__intel_gt_unset_wedged(gt))
1213 		goto unlock;
1214 
1215 	if (reason)
1216 		drm_notice(&gt->i915->drm,
1217 			   "Resetting chip for %s\n", reason);
1218 	atomic_inc(&gt->i915->gpu_error.reset_count);
1219 
1220 	awake = reset_prepare(gt);
1221 
1222 	if (!intel_has_gpu_reset(gt)) {
1223 		if (gt->i915->params.reset)
1224 			drm_err(&gt->i915->drm, "GPU reset not supported\n");
1225 		else
1226 			drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
1227 		goto error;
1228 	}
1229 
1230 	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1231 		intel_runtime_pm_disable_interrupts(gt->i915);
1232 
1233 	if (do_reset(gt, stalled_mask)) {
1234 		drm_err(&gt->i915->drm, "Failed to reset chip\n");
1235 		goto taint;
1236 	}
1237 
1238 	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1239 		intel_runtime_pm_enable_interrupts(gt->i915);
1240 
1241 	intel_overlay_reset(gt->i915);
1242 
1243 	/*
1244 	 * Next we need to restore the context, but we don't use those
1245 	 * yet either...
1246 	 *
1247 	 * Ring buffer needs to be re-initialized in the KMS case, or if X
1248 	 * was running at the time of the reset (i.e. we weren't VT
1249 	 * switched away).
1250 	 */
1251 	ret = intel_gt_init_hw(gt);
1252 	if (ret) {
1253 		drm_err(&gt->i915->drm,
1254 			"Failed to initialise HW following reset (%d)\n",
1255 			ret);
1256 		goto taint;
1257 	}
1258 
1259 	ret = resume(gt);
1260 	if (ret)
1261 		goto taint;
1262 
1263 finish:
1264 	reset_finish(gt, awake);
1265 unlock:
1266 	mutex_unlock(&gt->reset.mutex);
1267 	return;
1268 
1269 taint:
1270 	/*
1271 	 * History tells us that if we cannot reset the GPU now, we
1272 	 * never will. This then impacts everything that is run
1273 	 * subsequently. On failing the reset, we mark the driver
1274 	 * as wedged, preventing further execution on the GPU.
1275 	 * We also want to go one step further and add a taint to the
1276 	 * kernel so that any subsequent faults can be traced back to
1277 	 * this failure. This is important for CI, where if the
1278 	 * GPU/driver fails we would like to reboot and restart testing
1279 	 * rather than continue on into oblivion. For everyone else,
1280 	 * the system should still plod along, but they have been warned!
1281 	 */
1282 	add_taint_for_CI(gt->i915, TAINT_WARN);
1283 error:
1284 	__intel_gt_set_wedged(gt);
1285 	goto finish;
1286 }
1287 
intel_gt_reset_engine(struct intel_engine_cs * engine)1288 static int intel_gt_reset_engine(struct intel_engine_cs *engine)
1289 {
1290 	return __intel_gt_reset(engine->gt, engine->mask);
1291 }
1292 
__intel_engine_reset_bh(struct intel_engine_cs * engine,const char * msg)1293 int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
1294 {
1295 	struct intel_gt *gt = engine->gt;
1296 	int ret;
1297 
1298 	ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
1299 	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
1300 
1301 	if (intel_engine_uses_guc(engine))
1302 		return -ENODEV;
1303 
1304 	if (!intel_engine_pm_get_if_awake(engine))
1305 		return 0;
1306 
1307 	reset_prepare_engine(engine);
1308 
1309 	if (msg)
1310 		drm_notice(&engine->i915->drm,
1311 			   "Resetting %s for %s\n", engine->name, msg);
1312 	i915_increase_reset_engine_count(&engine->i915->gpu_error, engine);
1313 
1314 	ret = intel_gt_reset_engine(engine);
1315 	if (ret) {
1316 		/* If we fail here, we expect to fallback to a global reset */
1317 		ENGINE_TRACE(engine, "Failed to reset %s, err: %d\n", engine->name, ret);
1318 		goto out;
1319 	}
1320 
1321 	/*
1322 	 * The request that caused the hang is stuck on elsp, we know the
1323 	 * active request and can drop it, adjust head to skip the offending
1324 	 * request to resume executing remaining requests in the queue.
1325 	 */
1326 	__intel_engine_reset(engine, true);
1327 
1328 	/*
1329 	 * The engine and its registers (and workarounds in case of render)
1330 	 * have been reset to their default values. Follow the init_ring
1331 	 * process to program RING_MODE, HWSP and re-enable submission.
1332 	 */
1333 	ret = intel_engine_resume(engine);
1334 
1335 out:
1336 	intel_engine_cancel_stop_cs(engine);
1337 	reset_finish_engine(engine);
1338 	intel_engine_pm_put_async(engine);
1339 	return ret;
1340 }
1341 
1342 /**
1343  * intel_engine_reset - reset GPU engine to recover from a hang
1344  * @engine: engine to reset
1345  * @msg: reason for GPU reset; or NULL for no drm_notice()
1346  *
1347  * Reset a specific GPU engine. Useful if a hang is detected.
1348  * Returns zero on successful reset or otherwise an error code.
1349  *
1350  * Procedure is:
1351  *  - identifies the request that caused the hang and it is dropped
1352  *  - reset engine (which will force the engine to idle)
1353  *  - re-init/configure engine
1354  */
intel_engine_reset(struct intel_engine_cs * engine,const char * msg)1355 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1356 {
1357 	int err;
1358 
1359 	local_bh_disable();
1360 	err = __intel_engine_reset_bh(engine, msg);
1361 	local_bh_enable();
1362 
1363 	return err;
1364 }
1365 
intel_gt_reset_global(struct intel_gt * gt,u32 engine_mask,const char * reason)1366 static void intel_gt_reset_global(struct intel_gt *gt,
1367 				  u32 engine_mask,
1368 				  const char *reason)
1369 {
1370 #ifdef notyet
1371 	struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj;
1372 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1373 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1374 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1375 #endif
1376 	struct intel_wedge_me w;
1377 
1378 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1379 
1380 	GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
1381 	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1382 
1383 	/* Use a watchdog to ensure that our reset completes */
1384 	intel_wedge_on_timeout(&w, gt, 60 * HZ) {
1385 		intel_display_reset_prepare(gt->i915);
1386 
1387 		intel_gt_reset(gt, engine_mask, reason);
1388 
1389 		intel_display_reset_finish(gt->i915);
1390 	}
1391 
1392 	if (!test_bit(I915_WEDGED, &gt->reset.flags))
1393 		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1394 }
1395 
1396 /**
1397  * intel_gt_handle_error - handle a gpu error
1398  * @gt: the intel_gt
1399  * @engine_mask: mask representing engines that are hung
1400  * @flags: control flags
1401  * @fmt: Error message format string
1402  *
1403  * Do some basic checking of register state at error time and
1404  * dump it to the syslog.  Also call i915_capture_error_state() to make
1405  * sure we get a record and make it available in debugfs.  Fire a uevent
1406  * so userspace knows something bad happened (should trigger collection
1407  * of a ring dump etc.).
1408  */
intel_gt_handle_error(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned long flags,const char * fmt,...)1409 void intel_gt_handle_error(struct intel_gt *gt,
1410 			   intel_engine_mask_t engine_mask,
1411 			   unsigned long flags,
1412 			   const char *fmt, ...)
1413 {
1414 	struct intel_engine_cs *engine;
1415 	intel_wakeref_t wakeref;
1416 	intel_engine_mask_t tmp;
1417 	char error_msg[80];
1418 	char *msg = NULL;
1419 
1420 	if (fmt) {
1421 		va_list args;
1422 
1423 		va_start(args, fmt);
1424 		vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1425 		va_end(args);
1426 
1427 		msg = error_msg;
1428 	}
1429 
1430 	/*
1431 	 * In most cases it's guaranteed that we get here with an RPM
1432 	 * reference held, for example because there is a pending GPU
1433 	 * request that won't finish until the reset is done. This
1434 	 * isn't the case at least when we get here by doing a
1435 	 * simulated reset via debugfs, so get an RPM reference.
1436 	 */
1437 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1438 
1439 	engine_mask &= gt->info.engine_mask;
1440 
1441 	if (flags & I915_ERROR_CAPTURE) {
1442 		i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE);
1443 		intel_gt_clear_error_registers(gt, engine_mask);
1444 	}
1445 
1446 	/*
1447 	 * Try engine reset when available. We fall back to full reset if
1448 	 * single reset fails.
1449 	 */
1450 	if (!intel_uc_uses_guc_submission(&gt->uc) &&
1451 	    intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1452 		local_bh_disable();
1453 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
1454 			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1455 			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1456 					     &gt->reset.flags))
1457 				continue;
1458 
1459 			if (__intel_engine_reset_bh(engine, msg) == 0)
1460 				engine_mask &= ~engine->mask;
1461 
1462 			clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1463 					      &gt->reset.flags);
1464 		}
1465 		local_bh_enable();
1466 	}
1467 
1468 	if (!engine_mask)
1469 		goto out;
1470 
1471 	/* Full reset needs the mutex, stop any other user trying to do so. */
1472 	if (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1473 		wait_event(gt->reset.queue,
1474 			   !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
1475 		goto out; /* piggy-back on the other reset */
1476 	}
1477 
1478 	/* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1479 	synchronize_rcu_expedited();
1480 
1481 	/*
1482 	 * Prevent any other reset-engine attempt. We don't do this for GuC
1483 	 * submission the GuC owns the per-engine reset, not the i915.
1484 	 */
1485 	if (!intel_uc_uses_guc_submission(&gt->uc)) {
1486 		for_each_engine(engine, gt, tmp) {
1487 			while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1488 						&gt->reset.flags))
1489 				wait_on_bit(&gt->reset.flags,
1490 					    I915_RESET_ENGINE + engine->id,
1491 					    TASK_UNINTERRUPTIBLE);
1492 		}
1493 	}
1494 
1495 	/* Flush everyone using a resource about to be clobbered */
1496 	synchronize_srcu_expedited(&gt->reset.backoff_srcu);
1497 
1498 	intel_gt_reset_global(gt, engine_mask, msg);
1499 
1500 	if (!intel_uc_uses_guc_submission(&gt->uc)) {
1501 		for_each_engine(engine, gt, tmp)
1502 			clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1503 					 &gt->reset.flags);
1504 	}
1505 	clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
1506 	smp_mb__after_atomic();
1507 	wake_up_all(&gt->reset.queue);
1508 
1509 out:
1510 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1511 }
1512 
_intel_gt_reset_lock(struct intel_gt * gt,int * srcu,bool retry)1513 static int _intel_gt_reset_lock(struct intel_gt *gt, int *srcu, bool retry)
1514 {
1515 	might_lock(&gt->reset.backoff_srcu);
1516 	if (retry)
1517 		might_sleep();
1518 
1519 	rcu_read_lock();
1520 	while (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1521 		rcu_read_unlock();
1522 
1523 		if (!retry)
1524 			return -EBUSY;
1525 
1526 		if (wait_event_interruptible(gt->reset.queue,
1527 					     !test_bit(I915_RESET_BACKOFF,
1528 						       &gt->reset.flags)))
1529 			return -EINTR;
1530 
1531 		rcu_read_lock();
1532 	}
1533 	*srcu = srcu_read_lock(&gt->reset.backoff_srcu);
1534 	rcu_read_unlock();
1535 
1536 	return 0;
1537 }
1538 
intel_gt_reset_trylock(struct intel_gt * gt,int * srcu)1539 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1540 {
1541 	return _intel_gt_reset_lock(gt, srcu, false);
1542 }
1543 
intel_gt_reset_lock_interruptible(struct intel_gt * gt,int * srcu)1544 int intel_gt_reset_lock_interruptible(struct intel_gt *gt, int *srcu)
1545 {
1546 	return _intel_gt_reset_lock(gt, srcu, true);
1547 }
1548 
intel_gt_reset_unlock(struct intel_gt * gt,int tag)1549 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1550 __releases(&gt->reset.backoff_srcu)
1551 {
1552 	srcu_read_unlock(&gt->reset.backoff_srcu, tag);
1553 }
1554 
intel_gt_terminally_wedged(struct intel_gt * gt)1555 int intel_gt_terminally_wedged(struct intel_gt *gt)
1556 {
1557 	might_sleep();
1558 
1559 	if (!intel_gt_is_wedged(gt))
1560 		return 0;
1561 
1562 	if (intel_gt_has_unrecoverable_error(gt))
1563 		return -EIO;
1564 
1565 	/* Reset still in progress? Maybe we will recover? */
1566 	if (wait_event_interruptible(gt->reset.queue,
1567 				     !test_bit(I915_RESET_BACKOFF,
1568 					       &gt->reset.flags)))
1569 		return -EINTR;
1570 
1571 	return intel_gt_is_wedged(gt) ? -EIO : 0;
1572 }
1573 
intel_gt_set_wedged_on_init(struct intel_gt * gt)1574 void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1575 {
1576 	BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
1577 		     I915_WEDGED_ON_INIT);
1578 	intel_gt_set_wedged(gt);
1579 	i915_disable_error_state(gt->i915, -ENODEV);
1580 	set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
1581 
1582 	/* Wedged on init is non-recoverable */
1583 	add_taint_for_CI(gt->i915, TAINT_WARN);
1584 }
1585 
intel_gt_set_wedged_on_fini(struct intel_gt * gt)1586 void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
1587 {
1588 	intel_gt_set_wedged(gt);
1589 	i915_disable_error_state(gt->i915, -ENODEV);
1590 	set_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
1591 	intel_gt_retire_requests(gt); /* cleanup any wedged requests */
1592 }
1593 
intel_gt_init_reset(struct intel_gt * gt)1594 void intel_gt_init_reset(struct intel_gt *gt)
1595 {
1596 	init_waitqueue_head(&gt->reset.queue);
1597 	rw_init(&gt->reset.mutex, "gtres");
1598 	init_srcu_struct(&gt->reset.backoff_srcu);
1599 
1600 	/*
1601 	 * While undesirable to wait inside the shrinker, complain anyway.
1602 	 *
1603 	 * If we have to wait during shrinking, we guarantee forward progress
1604 	 * by forcing the reset. Therefore during the reset we must not
1605 	 * re-enter the shrinker. By declaring that we take the reset mutex
1606 	 * within the shrinker, we forbid ourselves from performing any
1607 	 * fs-reclaim or taking related locks during reset.
1608 	 */
1609 	i915_gem_shrinker_taints_mutex(gt->i915, &gt->reset.mutex);
1610 
1611 	/* no GPU until we are ready! */
1612 	__set_bit(I915_WEDGED, &gt->reset.flags);
1613 }
1614 
intel_gt_fini_reset(struct intel_gt * gt)1615 void intel_gt_fini_reset(struct intel_gt *gt)
1616 {
1617 	cleanup_srcu_struct(&gt->reset.backoff_srcu);
1618 }
1619 
intel_wedge_me(struct work_struct * work)1620 static void intel_wedge_me(struct work_struct *work)
1621 {
1622 	struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1623 
1624 	drm_err(&w->gt->i915->drm,
1625 		"%s timed out, cancelling all in-flight rendering.\n",
1626 		w->name);
1627 	intel_gt_set_wedged(w->gt);
1628 }
1629 
__intel_init_wedge(struct intel_wedge_me * w,struct intel_gt * gt,long timeout,const char * name)1630 void __intel_init_wedge(struct intel_wedge_me *w,
1631 			struct intel_gt *gt,
1632 			long timeout,
1633 			const char *name)
1634 {
1635 	w->gt = gt;
1636 	w->name = name;
1637 
1638 	INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1639 	queue_delayed_work(gt->i915->unordered_wq, &w->work, timeout);
1640 }
1641 
__intel_fini_wedge(struct intel_wedge_me * w)1642 void __intel_fini_wedge(struct intel_wedge_me *w)
1643 {
1644 	cancel_delayed_work_sync(&w->work);
1645 	destroy_delayed_work_on_stack(&w->work);
1646 	w->gt = NULL;
1647 }
1648 
1649 /*
1650  * Wa_22011802037 requires that we (or the GuC) ensure that no command
1651  * streamers are executing MI_FORCE_WAKE while an engine reset is initiated.
1652  */
intel_engine_reset_needs_wa_22011802037(struct intel_gt * gt)1653 bool intel_engine_reset_needs_wa_22011802037(struct intel_gt *gt)
1654 {
1655 	if (GRAPHICS_VER(gt->i915) < 11)
1656 		return false;
1657 
1658 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0))
1659 		return true;
1660 
1661 	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
1662 		return false;
1663 
1664 	return true;
1665 }
1666 
1667 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1668 #include "selftest_reset.c"
1669 #include "selftest_hangcheck.c"
1670 #endif
1671