xref: /openbsd/sys/dev/pci/drm/i915/gt/intel_reset.c (revision 73471bf0)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2018 Intel Corporation
5  */
6 
7 #include <linux/sched/mm.h>
8 #include <linux/stop_machine.h>
9 
10 #include "display/intel_display_types.h"
11 #include "display/intel_overlay.h"
12 
13 #include "gem/i915_gem_context.h"
14 
15 #include "i915_drv.h"
16 #include "i915_gpu_error.h"
17 #include "i915_irq.h"
18 #include "intel_breadcrumbs.h"
19 #include "intel_engine_pm.h"
20 #include "intel_gt.h"
21 #include "intel_gt_pm.h"
22 #include "intel_reset.h"
23 
24 #include "uc/intel_guc.h"
25 #include "uc/intel_guc_submission.h"
26 
27 #define RESET_MAX_RETRIES 3
28 
29 /* XXX How to handle concurrent GGTT updates using tiling registers? */
30 #define RESET_UNDER_STOP_MACHINE 0
31 
32 static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
33 {
34 	intel_uncore_rmw_fw(uncore, reg, 0, set);
35 }
36 
37 static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
38 {
39 	intel_uncore_rmw_fw(uncore, reg, clr, 0);
40 }
41 
42 static void engine_skip_context(struct i915_request *rq)
43 {
44 	struct intel_engine_cs *engine = rq->engine;
45 	struct intel_context *hung_ctx = rq->context;
46 
47 	if (!i915_request_is_active(rq))
48 		return;
49 
50 	lockdep_assert_held(&engine->active.lock);
51 	list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
52 		if (rq->context == hung_ctx) {
53 			i915_request_set_error_once(rq, -EIO);
54 			__i915_request_skip(rq);
55 		}
56 }
57 
58 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
59 {
60 	struct drm_i915_file_private *file_priv = ctx->file_priv;
61 	unsigned long prev_hang;
62 	unsigned int score;
63 
64 	if (IS_ERR_OR_NULL(file_priv))
65 		return;
66 
67 	score = 0;
68 	if (banned)
69 		score = I915_CLIENT_SCORE_CONTEXT_BAN;
70 
71 	prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
72 	if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
73 		score += I915_CLIENT_SCORE_HANG_FAST;
74 
75 	if (score) {
76 		atomic_add(score, &file_priv->ban_score);
77 
78 		drm_dbg(&ctx->i915->drm,
79 			"client %s: gained %u ban score, now %u\n",
80 			ctx->name, score,
81 			atomic_read(&file_priv->ban_score));
82 	}
83 }
84 
85 static bool mark_guilty(struct i915_request *rq)
86 {
87 	struct i915_gem_context *ctx;
88 	unsigned long prev_hang;
89 	bool banned;
90 	int i;
91 
92 	if (intel_context_is_closed(rq->context)) {
93 		intel_context_set_banned(rq->context);
94 		return true;
95 	}
96 
97 	rcu_read_lock();
98 	ctx = rcu_dereference(rq->context->gem_context);
99 	if (ctx && !kref_get_unless_zero(&ctx->ref))
100 		ctx = NULL;
101 	rcu_read_unlock();
102 	if (!ctx)
103 		return intel_context_is_banned(rq->context);
104 
105 	atomic_inc(&ctx->guilty_count);
106 
107 	/* Cool contexts are too cool to be banned! (Used for reset testing.) */
108 	if (!i915_gem_context_is_bannable(ctx)) {
109 		banned = false;
110 		goto out;
111 	}
112 
113 	drm_notice(&ctx->i915->drm,
114 		   "%s context reset due to GPU hang\n",
115 		   ctx->name);
116 
117 	/* Record the timestamp for the last N hangs */
118 	prev_hang = ctx->hang_timestamp[0];
119 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
120 		ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
121 	ctx->hang_timestamp[i] = jiffies;
122 
123 	/* If we have hung N+1 times in rapid succession, we ban the context! */
124 	banned = !i915_gem_context_is_recoverable(ctx);
125 	if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
126 		banned = true;
127 	if (banned) {
128 		drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
129 			ctx->name, atomic_read(&ctx->guilty_count));
130 		intel_context_set_banned(rq->context);
131 	}
132 
133 	client_mark_guilty(ctx, banned);
134 
135 out:
136 	i915_gem_context_put(ctx);
137 	return banned;
138 }
139 
140 static void mark_innocent(struct i915_request *rq)
141 {
142 	struct i915_gem_context *ctx;
143 
144 	rcu_read_lock();
145 	ctx = rcu_dereference(rq->context->gem_context);
146 	if (ctx)
147 		atomic_inc(&ctx->active_count);
148 	rcu_read_unlock();
149 }
150 
151 void __i915_request_reset(struct i915_request *rq, bool guilty)
152 {
153 	RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
154 
155 	GEM_BUG_ON(i915_request_completed(rq));
156 
157 	rcu_read_lock(); /* protect the GEM context */
158 	if (guilty) {
159 		i915_request_set_error_once(rq, -EIO);
160 		__i915_request_skip(rq);
161 		if (mark_guilty(rq))
162 			engine_skip_context(rq);
163 	} else {
164 		i915_request_set_error_once(rq, -EAGAIN);
165 		mark_innocent(rq);
166 	}
167 	rcu_read_unlock();
168 }
169 
170 static bool i915_in_reset(struct pci_dev *pdev)
171 {
172 	u8 gdrst;
173 
174 	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
175 	return gdrst & GRDOM_RESET_STATUS;
176 }
177 
178 static int i915_do_reset(struct intel_gt *gt,
179 			 intel_engine_mask_t engine_mask,
180 			 unsigned int retry)
181 {
182 	struct pci_dev *pdev = gt->i915->drm.pdev;
183 	int err;
184 
185 	/* Assert reset for at least 20 usec, and wait for acknowledgement. */
186 	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
187 	udelay(50);
188 	err = wait_for_atomic(i915_in_reset(pdev), 50);
189 
190 	/* Clear the reset request. */
191 	pci_write_config_byte(pdev, I915_GDRST, 0);
192 	udelay(50);
193 	if (!err)
194 		err = wait_for_atomic(!i915_in_reset(pdev), 50);
195 
196 	return err;
197 }
198 
199 static bool g4x_reset_complete(struct pci_dev *pdev)
200 {
201 	u8 gdrst;
202 
203 	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
204 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
205 }
206 
207 static int g33_do_reset(struct intel_gt *gt,
208 			intel_engine_mask_t engine_mask,
209 			unsigned int retry)
210 {
211 	struct pci_dev *pdev = gt->i915->drm.pdev;
212 
213 	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
214 	return wait_for_atomic(g4x_reset_complete(pdev), 50);
215 }
216 
217 static int g4x_do_reset(struct intel_gt *gt,
218 			intel_engine_mask_t engine_mask,
219 			unsigned int retry)
220 {
221 	struct pci_dev *pdev = gt->i915->drm.pdev;
222 	struct intel_uncore *uncore = gt->uncore;
223 	int ret;
224 
225 	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
226 	rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
227 	intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
228 
229 	pci_write_config_byte(pdev, I915_GDRST,
230 			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
231 	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
232 	if (ret) {
233 		drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
234 		goto out;
235 	}
236 
237 	pci_write_config_byte(pdev, I915_GDRST,
238 			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
239 	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
240 	if (ret) {
241 		drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
242 		goto out;
243 	}
244 
245 out:
246 	pci_write_config_byte(pdev, I915_GDRST, 0);
247 
248 	rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
249 	intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
250 
251 	return ret;
252 }
253 
254 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
255 			unsigned int retry)
256 {
257 	struct intel_uncore *uncore = gt->uncore;
258 	int ret;
259 
260 	intel_uncore_write_fw(uncore, ILK_GDSR,
261 			      ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
262 	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
263 					   ILK_GRDOM_RESET_ENABLE, 0,
264 					   5000, 0,
265 					   NULL);
266 	if (ret) {
267 		drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
268 		goto out;
269 	}
270 
271 	intel_uncore_write_fw(uncore, ILK_GDSR,
272 			      ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
273 	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
274 					   ILK_GRDOM_RESET_ENABLE, 0,
275 					   5000, 0,
276 					   NULL);
277 	if (ret) {
278 		drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
279 		goto out;
280 	}
281 
282 out:
283 	intel_uncore_write_fw(uncore, ILK_GDSR, 0);
284 	intel_uncore_posting_read_fw(uncore, ILK_GDSR);
285 	return ret;
286 }
287 
288 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
289 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
290 {
291 	struct intel_uncore *uncore = gt->uncore;
292 	int err;
293 
294 	/*
295 	 * GEN6_GDRST is not in the gt power well, no need to check
296 	 * for fifo space for the write or forcewake the chip for
297 	 * the read
298 	 */
299 	intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
300 
301 	/* Wait for the device to ack the reset requests */
302 	err = __intel_wait_for_register_fw(uncore,
303 					   GEN6_GDRST, hw_domain_mask, 0,
304 					   500, 0,
305 					   NULL);
306 	if (err)
307 		drm_dbg(&gt->i915->drm,
308 			"Wait for 0x%08x engines reset failed\n",
309 			hw_domain_mask);
310 
311 	return err;
312 }
313 
314 static int gen6_reset_engines(struct intel_gt *gt,
315 			      intel_engine_mask_t engine_mask,
316 			      unsigned int retry)
317 {
318 	static const u32 hw_engine_mask[] = {
319 		[RCS0]  = GEN6_GRDOM_RENDER,
320 		[BCS0]  = GEN6_GRDOM_BLT,
321 		[VCS0]  = GEN6_GRDOM_MEDIA,
322 		[VCS1]  = GEN8_GRDOM_MEDIA2,
323 		[VECS0] = GEN6_GRDOM_VECS,
324 	};
325 	struct intel_engine_cs *engine;
326 	u32 hw_mask;
327 
328 	if (engine_mask == ALL_ENGINES) {
329 		hw_mask = GEN6_GRDOM_FULL;
330 	} else {
331 		intel_engine_mask_t tmp;
332 
333 		hw_mask = 0;
334 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
335 			GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
336 			hw_mask |= hw_engine_mask[engine->id];
337 		}
338 	}
339 
340 	return gen6_hw_domain_reset(gt, hw_mask);
341 }
342 
343 static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
344 {
345 	struct intel_uncore *uncore = engine->uncore;
346 	u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
347 	i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
348 	u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
349 	i915_reg_t sfc_usage;
350 	u32 sfc_usage_bit;
351 	u32 sfc_reset_bit;
352 	int ret;
353 
354 	switch (engine->class) {
355 	case VIDEO_DECODE_CLASS:
356 		if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
357 			return 0;
358 
359 		sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
360 		sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
361 
362 		sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
363 		sfc_forced_lock_ack_bit  = GEN11_VCS_SFC_LOCK_ACK_BIT;
364 
365 		sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
366 		sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
367 		sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
368 		break;
369 
370 	case VIDEO_ENHANCEMENT_CLASS:
371 		sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
372 		sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
373 
374 		sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
375 		sfc_forced_lock_ack_bit  = GEN11_VECS_SFC_LOCK_ACK_BIT;
376 
377 		sfc_usage = GEN11_VECS_SFC_USAGE(engine);
378 		sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
379 		sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
380 		break;
381 
382 	default:
383 		return 0;
384 	}
385 
386 	/*
387 	 * If the engine is using a SFC, tell the engine that a software reset
388 	 * is going to happen. The engine will then try to force lock the SFC.
389 	 * If SFC ends up being locked to the engine we want to reset, we have
390 	 * to reset it as well (we will unlock it once the reset sequence is
391 	 * completed).
392 	 */
393 	if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
394 		return 0;
395 
396 	rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
397 
398 	ret = __intel_wait_for_register_fw(uncore,
399 					   sfc_forced_lock_ack,
400 					   sfc_forced_lock_ack_bit,
401 					   sfc_forced_lock_ack_bit,
402 					   1000, 0, NULL);
403 
404 	/* Was the SFC released while we were trying to lock it? */
405 	if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
406 		return 0;
407 
408 	if (ret) {
409 		drm_dbg(&engine->i915->drm,
410 			"Wait for SFC forced lock ack failed\n");
411 		return ret;
412 	}
413 
414 	*hw_mask |= sfc_reset_bit;
415 	return 0;
416 }
417 
418 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
419 {
420 	struct intel_uncore *uncore = engine->uncore;
421 	u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
422 	i915_reg_t sfc_forced_lock;
423 	u32 sfc_forced_lock_bit;
424 
425 	switch (engine->class) {
426 	case VIDEO_DECODE_CLASS:
427 		if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
428 			return;
429 
430 		sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
431 		sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
432 		break;
433 
434 	case VIDEO_ENHANCEMENT_CLASS:
435 		sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
436 		sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
437 		break;
438 
439 	default:
440 		return;
441 	}
442 
443 	rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
444 }
445 
446 static int gen11_reset_engines(struct intel_gt *gt,
447 			       intel_engine_mask_t engine_mask,
448 			       unsigned int retry)
449 {
450 	static const u32 hw_engine_mask[] = {
451 		[RCS0]  = GEN11_GRDOM_RENDER,
452 		[BCS0]  = GEN11_GRDOM_BLT,
453 		[VCS0]  = GEN11_GRDOM_MEDIA,
454 		[VCS1]  = GEN11_GRDOM_MEDIA2,
455 		[VCS2]  = GEN11_GRDOM_MEDIA3,
456 		[VCS3]  = GEN11_GRDOM_MEDIA4,
457 		[VECS0] = GEN11_GRDOM_VECS,
458 		[VECS1] = GEN11_GRDOM_VECS2,
459 	};
460 	struct intel_engine_cs *engine;
461 	intel_engine_mask_t tmp;
462 	u32 hw_mask;
463 	int ret;
464 
465 	if (engine_mask == ALL_ENGINES) {
466 		hw_mask = GEN11_GRDOM_FULL;
467 	} else {
468 		hw_mask = 0;
469 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
470 			GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
471 			hw_mask |= hw_engine_mask[engine->id];
472 			ret = gen11_lock_sfc(engine, &hw_mask);
473 			if (ret)
474 				goto sfc_unlock;
475 		}
476 	}
477 
478 	ret = gen6_hw_domain_reset(gt, hw_mask);
479 
480 sfc_unlock:
481 	/*
482 	 * We unlock the SFC based on the lock status and not the result of
483 	 * gen11_lock_sfc to make sure that we clean properly if something
484 	 * wrong happened during the lock (e.g. lock acquired after timeout
485 	 * expiration).
486 	 */
487 	if (engine_mask != ALL_ENGINES)
488 		for_each_engine_masked(engine, gt, engine_mask, tmp)
489 			gen11_unlock_sfc(engine);
490 
491 	return ret;
492 }
493 
494 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
495 {
496 	struct intel_uncore *uncore = engine->uncore;
497 	const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
498 	u32 request, mask, ack;
499 	int ret;
500 
501 	ack = intel_uncore_read_fw(uncore, reg);
502 	if (ack & RESET_CTL_CAT_ERROR) {
503 		/*
504 		 * For catastrophic errors, ready-for-reset sequence
505 		 * needs to be bypassed: HAS#396813
506 		 */
507 		request = RESET_CTL_CAT_ERROR;
508 		mask = RESET_CTL_CAT_ERROR;
509 
510 		/* Catastrophic errors need to be cleared by HW */
511 		ack = 0;
512 	} else if (!(ack & RESET_CTL_READY_TO_RESET)) {
513 		request = RESET_CTL_REQUEST_RESET;
514 		mask = RESET_CTL_READY_TO_RESET;
515 		ack = RESET_CTL_READY_TO_RESET;
516 	} else {
517 		return 0;
518 	}
519 
520 	intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
521 	ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
522 					   700, 0, NULL);
523 	if (ret)
524 		drm_err(&engine->i915->drm,
525 			"%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
526 			engine->name, request,
527 			intel_uncore_read_fw(uncore, reg));
528 
529 	return ret;
530 }
531 
532 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
533 {
534 	intel_uncore_write_fw(engine->uncore,
535 			      RING_RESET_CTL(engine->mmio_base),
536 			      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
537 }
538 
539 static int gen8_reset_engines(struct intel_gt *gt,
540 			      intel_engine_mask_t engine_mask,
541 			      unsigned int retry)
542 {
543 	struct intel_engine_cs *engine;
544 	const bool reset_non_ready = retry >= 1;
545 	intel_engine_mask_t tmp;
546 	int ret;
547 
548 	for_each_engine_masked(engine, gt, engine_mask, tmp) {
549 		ret = gen8_engine_reset_prepare(engine);
550 		if (ret && !reset_non_ready)
551 			goto skip_reset;
552 
553 		/*
554 		 * If this is not the first failed attempt to prepare,
555 		 * we decide to proceed anyway.
556 		 *
557 		 * By doing so we risk context corruption and with
558 		 * some gens (kbl), possible system hang if reset
559 		 * happens during active bb execution.
560 		 *
561 		 * We rather take context corruption instead of
562 		 * failed reset with a wedged driver/gpu. And
563 		 * active bb execution case should be covered by
564 		 * stop_engines() we have before the reset.
565 		 */
566 	}
567 
568 	if (INTEL_GEN(gt->i915) >= 11)
569 		ret = gen11_reset_engines(gt, engine_mask, retry);
570 	else
571 		ret = gen6_reset_engines(gt, engine_mask, retry);
572 
573 skip_reset:
574 	for_each_engine_masked(engine, gt, engine_mask, tmp)
575 		gen8_engine_reset_cancel(engine);
576 
577 	return ret;
578 }
579 
580 static int mock_reset(struct intel_gt *gt,
581 		      intel_engine_mask_t mask,
582 		      unsigned int retry)
583 {
584 	return 0;
585 }
586 
587 typedef int (*reset_func)(struct intel_gt *,
588 			  intel_engine_mask_t engine_mask,
589 			  unsigned int retry);
590 
591 static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
592 {
593 	struct drm_i915_private *i915 = gt->i915;
594 
595 	if (is_mock_gt(gt))
596 		return mock_reset;
597 	else if (INTEL_GEN(i915) >= 8)
598 		return gen8_reset_engines;
599 	else if (INTEL_GEN(i915) >= 6)
600 		return gen6_reset_engines;
601 	else if (INTEL_GEN(i915) >= 5)
602 		return ilk_do_reset;
603 	else if (IS_G4X(i915))
604 		return g4x_do_reset;
605 	else if (IS_G33(i915) || IS_PINEVIEW(i915))
606 		return g33_do_reset;
607 	else if (INTEL_GEN(i915) >= 3)
608 		return i915_do_reset;
609 	else
610 		return NULL;
611 }
612 
613 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
614 {
615 	const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
616 	reset_func reset;
617 	int ret = -ETIMEDOUT;
618 	int retry;
619 
620 	reset = intel_get_gpu_reset(gt);
621 	if (!reset)
622 		return -ENODEV;
623 
624 	/*
625 	 * If the power well sleeps during the reset, the reset
626 	 * request may be dropped and never completes (causing -EIO).
627 	 */
628 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
629 	for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
630 		GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
631 		preempt_disable();
632 		ret = reset(gt, engine_mask, retry);
633 		preempt_enable();
634 	}
635 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
636 
637 	return ret;
638 }
639 
640 bool intel_has_gpu_reset(const struct intel_gt *gt)
641 {
642 	if (!gt->i915->params.reset)
643 		return NULL;
644 
645 	return intel_get_gpu_reset(gt);
646 }
647 
648 bool intel_has_reset_engine(const struct intel_gt *gt)
649 {
650 	if (gt->i915->params.reset < 2)
651 		return false;
652 
653 	return INTEL_INFO(gt->i915)->has_reset_engine;
654 }
655 
656 int intel_reset_guc(struct intel_gt *gt)
657 {
658 	u32 guc_domain =
659 		INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
660 	int ret;
661 
662 	GEM_BUG_ON(!HAS_GT_UC(gt->i915));
663 
664 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
665 	ret = gen6_hw_domain_reset(gt, guc_domain);
666 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
667 
668 	return ret;
669 }
670 
671 /*
672  * Ensure irq handler finishes, and not run again.
673  * Also return the active request so that we only search for it once.
674  */
675 static void reset_prepare_engine(struct intel_engine_cs *engine)
676 {
677 	/*
678 	 * During the reset sequence, we must prevent the engine from
679 	 * entering RC6. As the context state is undefined until we restart
680 	 * the engine, if it does enter RC6 during the reset, the state
681 	 * written to the powercontext is undefined and so we may lose
682 	 * GPU state upon resume, i.e. fail to restart after a reset.
683 	 */
684 	intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
685 	if (engine->reset.prepare)
686 		engine->reset.prepare(engine);
687 }
688 
689 static void revoke_mmaps(struct intel_gt *gt)
690 {
691 	int i;
692 
693 	for (i = 0; i < gt->ggtt->num_fences; i++) {
694 		struct drm_vma_offset_node *node;
695 		struct i915_vma *vma;
696 		u64 vma_offset;
697 
698 		vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
699 		if (!vma)
700 			continue;
701 
702 		if (!i915_vma_has_userfault(vma))
703 			continue;
704 
705 		GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
706 
707 		if (!vma->mmo)
708 			continue;
709 
710 		node = &vma->mmo->vma_node;
711 		vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
712 
713 #ifdef __linux__
714 		unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
715 				    drm_vma_node_offset_addr(node) + vma_offset,
716 				    vma->size,
717 				    1);
718 #else
719 {
720 		struct drm_i915_private *dev_priv = vma->obj->base.dev->dev_private;
721 		struct vm_page *pg;
722 
723 		for (pg = &dev_priv->pgs[atop(vma->node.start)];
724 		     pg != &dev_priv->pgs[atop(vma->node.start + vma->size)];
725 		     pg++)
726 			pmap_page_protect(pg, PROT_NONE);
727 }
728 #endif
729 	}
730 }
731 
732 static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
733 {
734 	struct intel_engine_cs *engine;
735 	intel_engine_mask_t awake = 0;
736 	enum intel_engine_id id;
737 
738 	for_each_engine(engine, gt, id) {
739 		if (intel_engine_pm_get_if_awake(engine))
740 			awake |= engine->mask;
741 		reset_prepare_engine(engine);
742 	}
743 
744 	intel_uc_reset_prepare(&gt->uc);
745 
746 	return awake;
747 }
748 
749 static void gt_revoke(struct intel_gt *gt)
750 {
751 	revoke_mmaps(gt);
752 }
753 
754 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
755 {
756 	struct intel_engine_cs *engine;
757 	enum intel_engine_id id;
758 	int err;
759 
760 	/*
761 	 * Everything depends on having the GTT running, so we need to start
762 	 * there.
763 	 */
764 	err = i915_ggtt_enable_hw(gt->i915);
765 	if (err)
766 		return err;
767 
768 	for_each_engine(engine, gt, id)
769 		__intel_engine_reset(engine, stalled_mask & engine->mask);
770 
771 	intel_ggtt_restore_fences(gt->ggtt);
772 
773 	return err;
774 }
775 
776 static void reset_finish_engine(struct intel_engine_cs *engine)
777 {
778 	if (engine->reset.finish)
779 		engine->reset.finish(engine);
780 	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
781 
782 	intel_engine_signal_breadcrumbs(engine);
783 }
784 
785 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
786 {
787 	struct intel_engine_cs *engine;
788 	enum intel_engine_id id;
789 
790 	for_each_engine(engine, gt, id) {
791 		reset_finish_engine(engine);
792 		if (awake & engine->mask)
793 			intel_engine_pm_put(engine);
794 	}
795 }
796 
797 static void nop_submit_request(struct i915_request *request)
798 {
799 	struct intel_engine_cs *engine = request->engine;
800 	unsigned long flags;
801 
802 	RQ_TRACE(request, "-EIO\n");
803 	i915_request_set_error_once(request, -EIO);
804 
805 	spin_lock_irqsave(&engine->active.lock, flags);
806 	__i915_request_submit(request);
807 	i915_request_mark_complete(request);
808 	spin_unlock_irqrestore(&engine->active.lock, flags);
809 
810 	intel_engine_signal_breadcrumbs(engine);
811 }
812 
813 static void __intel_gt_set_wedged(struct intel_gt *gt)
814 {
815 	struct intel_engine_cs *engine;
816 	intel_engine_mask_t awake;
817 	enum intel_engine_id id;
818 
819 	if (test_bit(I915_WEDGED, &gt->reset.flags))
820 		return;
821 
822 	GT_TRACE(gt, "start\n");
823 
824 	/*
825 	 * First, stop submission to hw, but do not yet complete requests by
826 	 * rolling the global seqno forward (since this would complete requests
827 	 * for which we haven't set the fence error to EIO yet).
828 	 */
829 	awake = reset_prepare(gt);
830 
831 	/* Even if the GPU reset fails, it should still stop the engines */
832 	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
833 		__intel_gt_reset(gt, ALL_ENGINES);
834 
835 	for_each_engine(engine, gt, id)
836 		engine->submit_request = nop_submit_request;
837 
838 	/*
839 	 * Make sure no request can slip through without getting completed by
840 	 * either this call here to intel_engine_write_global_seqno, or the one
841 	 * in nop_submit_request.
842 	 */
843 	synchronize_rcu_expedited();
844 	set_bit(I915_WEDGED, &gt->reset.flags);
845 
846 	/* Mark all executing requests as skipped */
847 	for_each_engine(engine, gt, id)
848 		if (engine->reset.cancel)
849 			engine->reset.cancel(engine);
850 
851 	reset_finish(gt, awake);
852 
853 	GT_TRACE(gt, "end\n");
854 }
855 
856 void intel_gt_set_wedged(struct intel_gt *gt)
857 {
858 	intel_wakeref_t wakeref;
859 
860 	if (test_bit(I915_WEDGED, &gt->reset.flags))
861 		return;
862 
863 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
864 	mutex_lock(&gt->reset.mutex);
865 
866 	if (GEM_SHOW_DEBUG()) {
867 		struct drm_printer p = drm_debug_printer(__func__);
868 		struct intel_engine_cs *engine;
869 		enum intel_engine_id id;
870 
871 		drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
872 		for_each_engine(engine, gt, id) {
873 			if (intel_engine_is_idle(engine))
874 				continue;
875 
876 			intel_engine_dump(engine, &p, "%s\n", engine->name);
877 		}
878 	}
879 
880 	__intel_gt_set_wedged(gt);
881 
882 	mutex_unlock(&gt->reset.mutex);
883 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
884 }
885 
886 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
887 {
888 	struct intel_gt_timelines *timelines = &gt->timelines;
889 	struct intel_timeline *tl;
890 	bool ok;
891 
892 	if (!test_bit(I915_WEDGED, &gt->reset.flags))
893 		return true;
894 
895 	/* Never fully initialised, recovery impossible */
896 	if (intel_gt_has_unrecoverable_error(gt))
897 		return false;
898 
899 	GT_TRACE(gt, "start\n");
900 
901 	/*
902 	 * Before unwedging, make sure that all pending operations
903 	 * are flushed and errored out - we may have requests waiting upon
904 	 * third party fences. We marked all inflight requests as EIO, and
905 	 * every execbuf since returned EIO, for consistency we want all
906 	 * the currently pending requests to also be marked as EIO, which
907 	 * is done inside our nop_submit_request - and so we must wait.
908 	 *
909 	 * No more can be submitted until we reset the wedged bit.
910 	 */
911 	spin_lock(&timelines->lock);
912 	list_for_each_entry(tl, &timelines->active_list, link) {
913 		struct dma_fence *fence;
914 
915 		fence = i915_active_fence_get(&tl->last_request);
916 		if (!fence)
917 			continue;
918 
919 		spin_unlock(&timelines->lock);
920 
921 		/*
922 		 * All internal dependencies (i915_requests) will have
923 		 * been flushed by the set-wedge, but we may be stuck waiting
924 		 * for external fences. These should all be capped to 10s
925 		 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
926 		 * in the worst case.
927 		 */
928 		dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
929 		dma_fence_put(fence);
930 
931 		/* Restart iteration after droping lock */
932 		spin_lock(&timelines->lock);
933 		tl = list_entry(&timelines->active_list, typeof(*tl), link);
934 	}
935 	spin_unlock(&timelines->lock);
936 
937 	/* We must reset pending GPU events before restoring our submission */
938 	ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
939 	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
940 		ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
941 	if (!ok) {
942 		/*
943 		 * Warn CI about the unrecoverable wedged condition.
944 		 * Time for a reboot.
945 		 */
946 		add_taint_for_CI(gt->i915, TAINT_WARN);
947 		return false;
948 	}
949 
950 	/*
951 	 * Undo nop_submit_request. We prevent all new i915 requests from
952 	 * being queued (by disallowing execbuf whilst wedged) so having
953 	 * waited for all active requests above, we know the system is idle
954 	 * and do not have to worry about a thread being inside
955 	 * engine->submit_request() as we swap over. So unlike installing
956 	 * the nop_submit_request on reset, we can do this from normal
957 	 * context and do not require stop_machine().
958 	 */
959 	intel_engines_reset_default_submission(gt);
960 
961 	GT_TRACE(gt, "end\n");
962 
963 	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
964 	clear_bit(I915_WEDGED, &gt->reset.flags);
965 
966 	return true;
967 }
968 
969 bool intel_gt_unset_wedged(struct intel_gt *gt)
970 {
971 	bool result;
972 
973 	mutex_lock(&gt->reset.mutex);
974 	result = __intel_gt_unset_wedged(gt);
975 	mutex_unlock(&gt->reset.mutex);
976 
977 	return result;
978 }
979 
980 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
981 {
982 	int err, i;
983 
984 	gt_revoke(gt);
985 
986 	err = __intel_gt_reset(gt, ALL_ENGINES);
987 	for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
988 		drm_msleep(10 * (i + 1));
989 		err = __intel_gt_reset(gt, ALL_ENGINES);
990 	}
991 	if (err)
992 		return err;
993 
994 	return gt_reset(gt, stalled_mask);
995 }
996 
997 static int resume(struct intel_gt *gt)
998 {
999 	struct intel_engine_cs *engine;
1000 	enum intel_engine_id id;
1001 	int ret;
1002 
1003 	for_each_engine(engine, gt, id) {
1004 		ret = intel_engine_resume(engine);
1005 		if (ret)
1006 			return ret;
1007 	}
1008 
1009 	return 0;
1010 }
1011 
1012 /**
1013  * intel_gt_reset - reset chip after a hang
1014  * @gt: #intel_gt to reset
1015  * @stalled_mask: mask of the stalled engines with the guilty requests
1016  * @reason: user error message for why we are resetting
1017  *
1018  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
1019  * on failure.
1020  *
1021  * Procedure is fairly simple:
1022  *   - reset the chip using the reset reg
1023  *   - re-init context state
1024  *   - re-init hardware status page
1025  *   - re-init ring buffer
1026  *   - re-init interrupt state
1027  *   - re-init display
1028  */
1029 void intel_gt_reset(struct intel_gt *gt,
1030 		    intel_engine_mask_t stalled_mask,
1031 		    const char *reason)
1032 {
1033 	intel_engine_mask_t awake;
1034 	int ret;
1035 
1036 	GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
1037 
1038 	might_sleep();
1039 	GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
1040 	mutex_lock(&gt->reset.mutex);
1041 
1042 	/* Clear any previous failed attempts at recovery. Time to try again. */
1043 	if (!__intel_gt_unset_wedged(gt))
1044 		goto unlock;
1045 
1046 	if (reason)
1047 		drm_notice(&gt->i915->drm,
1048 			   "Resetting chip for %s\n", reason);
1049 	atomic_inc(&gt->i915->gpu_error.reset_count);
1050 
1051 	awake = reset_prepare(gt);
1052 
1053 	if (!intel_has_gpu_reset(gt)) {
1054 		if (gt->i915->params.reset)
1055 			drm_err(&gt->i915->drm, "GPU reset not supported\n");
1056 		else
1057 			drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
1058 		goto error;
1059 	}
1060 
1061 	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1062 		intel_runtime_pm_disable_interrupts(gt->i915);
1063 
1064 	if (do_reset(gt, stalled_mask)) {
1065 		drm_err(&gt->i915->drm, "Failed to reset chip\n");
1066 		goto taint;
1067 	}
1068 
1069 	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1070 		intel_runtime_pm_enable_interrupts(gt->i915);
1071 
1072 	intel_overlay_reset(gt->i915);
1073 
1074 	/*
1075 	 * Next we need to restore the context, but we don't use those
1076 	 * yet either...
1077 	 *
1078 	 * Ring buffer needs to be re-initialized in the KMS case, or if X
1079 	 * was running at the time of the reset (i.e. we weren't VT
1080 	 * switched away).
1081 	 */
1082 	ret = intel_gt_init_hw(gt);
1083 	if (ret) {
1084 		drm_err(&gt->i915->drm,
1085 			"Failed to initialise HW following reset (%d)\n",
1086 			ret);
1087 		goto taint;
1088 	}
1089 
1090 	ret = resume(gt);
1091 	if (ret)
1092 		goto taint;
1093 
1094 finish:
1095 	reset_finish(gt, awake);
1096 unlock:
1097 	mutex_unlock(&gt->reset.mutex);
1098 	return;
1099 
1100 taint:
1101 	/*
1102 	 * History tells us that if we cannot reset the GPU now, we
1103 	 * never will. This then impacts everything that is run
1104 	 * subsequently. On failing the reset, we mark the driver
1105 	 * as wedged, preventing further execution on the GPU.
1106 	 * We also want to go one step further and add a taint to the
1107 	 * kernel so that any subsequent faults can be traced back to
1108 	 * this failure. This is important for CI, where if the
1109 	 * GPU/driver fails we would like to reboot and restart testing
1110 	 * rather than continue on into oblivion. For everyone else,
1111 	 * the system should still plod along, but they have been warned!
1112 	 */
1113 	add_taint_for_CI(gt->i915, TAINT_WARN);
1114 error:
1115 	__intel_gt_set_wedged(gt);
1116 	goto finish;
1117 }
1118 
1119 static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
1120 {
1121 	return __intel_gt_reset(engine->gt, engine->mask);
1122 }
1123 
1124 /**
1125  * intel_engine_reset - reset GPU engine to recover from a hang
1126  * @engine: engine to reset
1127  * @msg: reason for GPU reset; or NULL for no drm_notice()
1128  *
1129  * Reset a specific GPU engine. Useful if a hang is detected.
1130  * Returns zero on successful reset or otherwise an error code.
1131  *
1132  * Procedure is:
1133  *  - identifies the request that caused the hang and it is dropped
1134  *  - reset engine (which will force the engine to idle)
1135  *  - re-init/configure engine
1136  */
1137 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1138 {
1139 	struct intel_gt *gt = engine->gt;
1140 	bool uses_guc = intel_engine_in_guc_submission_mode(engine);
1141 	int ret;
1142 
1143 	ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
1144 	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
1145 
1146 	if (!intel_engine_pm_get_if_awake(engine))
1147 		return 0;
1148 
1149 	reset_prepare_engine(engine);
1150 
1151 	if (msg)
1152 		drm_notice(&engine->i915->drm,
1153 			   "Resetting %s for %s\n", engine->name, msg);
1154 	atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
1155 
1156 	if (!uses_guc)
1157 		ret = intel_gt_reset_engine(engine);
1158 	else
1159 		ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
1160 	if (ret) {
1161 		/* If we fail here, we expect to fallback to a global reset */
1162 		drm_dbg(&gt->i915->drm, "%sFailed to reset %s, ret=%d\n",
1163 			uses_guc ? "GuC " : "", engine->name, ret);
1164 		goto out;
1165 	}
1166 
1167 	/*
1168 	 * The request that caused the hang is stuck on elsp, we know the
1169 	 * active request and can drop it, adjust head to skip the offending
1170 	 * request to resume executing remaining requests in the queue.
1171 	 */
1172 	__intel_engine_reset(engine, true);
1173 
1174 	/*
1175 	 * The engine and its registers (and workarounds in case of render)
1176 	 * have been reset to their default values. Follow the init_ring
1177 	 * process to program RING_MODE, HWSP and re-enable submission.
1178 	 */
1179 	ret = intel_engine_resume(engine);
1180 
1181 out:
1182 	intel_engine_cancel_stop_cs(engine);
1183 	reset_finish_engine(engine);
1184 	intel_engine_pm_put_async(engine);
1185 	return ret;
1186 }
1187 
1188 static void intel_gt_reset_global(struct intel_gt *gt,
1189 				  u32 engine_mask,
1190 				  const char *reason)
1191 {
1192 #ifdef notyet
1193 	struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj;
1194 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1195 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1196 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1197 #endif
1198 	struct intel_wedge_me w;
1199 
1200 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1201 
1202 	drm_dbg(&gt->i915->drm, "resetting chip, engines=%x\n", engine_mask);
1203 	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1204 
1205 	/* Use a watchdog to ensure that our reset completes */
1206 	intel_wedge_on_timeout(&w, gt, 5 * HZ) {
1207 		intel_prepare_reset(gt->i915);
1208 
1209 		/* Flush everyone using a resource about to be clobbered */
1210 		synchronize_srcu_expedited(&gt->reset.backoff_srcu);
1211 
1212 		intel_gt_reset(gt, engine_mask, reason);
1213 
1214 		intel_finish_reset(gt->i915);
1215 	}
1216 
1217 	if (!test_bit(I915_WEDGED, &gt->reset.flags))
1218 		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1219 }
1220 
1221 /**
1222  * intel_gt_handle_error - handle a gpu error
1223  * @gt: the intel_gt
1224  * @engine_mask: mask representing engines that are hung
1225  * @flags: control flags
1226  * @fmt: Error message format string
1227  *
1228  * Do some basic checking of register state at error time and
1229  * dump it to the syslog.  Also call i915_capture_error_state() to make
1230  * sure we get a record and make it available in debugfs.  Fire a uevent
1231  * so userspace knows something bad happened (should trigger collection
1232  * of a ring dump etc.).
1233  */
1234 void intel_gt_handle_error(struct intel_gt *gt,
1235 			   intel_engine_mask_t engine_mask,
1236 			   unsigned long flags,
1237 			   const char *fmt, ...)
1238 {
1239 	struct intel_engine_cs *engine;
1240 	intel_wakeref_t wakeref;
1241 	intel_engine_mask_t tmp;
1242 	char error_msg[80];
1243 	char *msg = NULL;
1244 
1245 	if (fmt) {
1246 		va_list args;
1247 
1248 		va_start(args, fmt);
1249 		vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1250 		va_end(args);
1251 
1252 		msg = error_msg;
1253 	}
1254 
1255 	/*
1256 	 * In most cases it's guaranteed that we get here with an RPM
1257 	 * reference held, for example because there is a pending GPU
1258 	 * request that won't finish until the reset is done. This
1259 	 * isn't the case at least when we get here by doing a
1260 	 * simulated reset via debugfs, so get an RPM reference.
1261 	 */
1262 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1263 
1264 	engine_mask &= gt->info.engine_mask;
1265 
1266 	if (flags & I915_ERROR_CAPTURE) {
1267 		i915_capture_error_state(gt->i915);
1268 		intel_gt_clear_error_registers(gt, engine_mask);
1269 	}
1270 
1271 	/*
1272 	 * Try engine reset when available. We fall back to full reset if
1273 	 * single reset fails.
1274 	 */
1275 	if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1276 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
1277 			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1278 			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1279 					     &gt->reset.flags))
1280 				continue;
1281 
1282 			if (intel_engine_reset(engine, msg) == 0)
1283 				engine_mask &= ~engine->mask;
1284 
1285 			clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1286 					      &gt->reset.flags);
1287 		}
1288 	}
1289 
1290 	if (!engine_mask)
1291 		goto out;
1292 
1293 	/* Full reset needs the mutex, stop any other user trying to do so. */
1294 	if (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1295 		wait_event(gt->reset.queue,
1296 			   !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
1297 		goto out; /* piggy-back on the other reset */
1298 	}
1299 
1300 	/* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1301 	synchronize_rcu_expedited();
1302 
1303 	/* Prevent any other reset-engine attempt. */
1304 	for_each_engine(engine, gt, tmp) {
1305 		while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1306 					&gt->reset.flags))
1307 			wait_on_bit(&gt->reset.flags,
1308 				    I915_RESET_ENGINE + engine->id,
1309 				    TASK_UNINTERRUPTIBLE);
1310 	}
1311 
1312 	intel_gt_reset_global(gt, engine_mask, msg);
1313 
1314 	for_each_engine(engine, gt, tmp)
1315 		clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1316 				 &gt->reset.flags);
1317 	clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
1318 	smp_mb__after_atomic();
1319 	wake_up_all(&gt->reset.queue);
1320 
1321 out:
1322 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1323 }
1324 
1325 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1326 {
1327 	might_lock(&gt->reset.backoff_srcu);
1328 	might_sleep();
1329 
1330 	rcu_read_lock();
1331 	while (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1332 		rcu_read_unlock();
1333 
1334 		if (wait_event_interruptible(gt->reset.queue,
1335 					     !test_bit(I915_RESET_BACKOFF,
1336 						       &gt->reset.flags)))
1337 			return -EINTR;
1338 
1339 		rcu_read_lock();
1340 	}
1341 	*srcu = srcu_read_lock(&gt->reset.backoff_srcu);
1342 	rcu_read_unlock();
1343 
1344 	return 0;
1345 }
1346 
1347 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1348 __releases(&gt->reset.backoff_srcu)
1349 {
1350 	srcu_read_unlock(&gt->reset.backoff_srcu, tag);
1351 }
1352 
1353 int intel_gt_terminally_wedged(struct intel_gt *gt)
1354 {
1355 	might_sleep();
1356 
1357 	if (!intel_gt_is_wedged(gt))
1358 		return 0;
1359 
1360 	if (intel_gt_has_unrecoverable_error(gt))
1361 		return -EIO;
1362 
1363 	/* Reset still in progress? Maybe we will recover? */
1364 	if (wait_event_interruptible(gt->reset.queue,
1365 				     !test_bit(I915_RESET_BACKOFF,
1366 					       &gt->reset.flags)))
1367 		return -EINTR;
1368 
1369 	return intel_gt_is_wedged(gt) ? -EIO : 0;
1370 }
1371 
1372 void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1373 {
1374 	BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
1375 		     I915_WEDGED_ON_INIT);
1376 	intel_gt_set_wedged(gt);
1377 	set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
1378 
1379 	/* Wedged on init is non-recoverable */
1380 	add_taint_for_CI(gt->i915, TAINT_WARN);
1381 }
1382 
1383 void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
1384 {
1385 	intel_gt_set_wedged(gt);
1386 	set_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
1387 }
1388 
1389 void intel_gt_init_reset(struct intel_gt *gt)
1390 {
1391 	init_waitqueue_head(&gt->reset.queue);
1392 	rw_init(&gt->reset.mutex, "gtres");
1393 	init_srcu_struct(&gt->reset.backoff_srcu);
1394 
1395 	/* no GPU until we are ready! */
1396 	__set_bit(I915_WEDGED, &gt->reset.flags);
1397 }
1398 
1399 void intel_gt_fini_reset(struct intel_gt *gt)
1400 {
1401 	cleanup_srcu_struct(&gt->reset.backoff_srcu);
1402 }
1403 
1404 static void intel_wedge_me(struct work_struct *work)
1405 {
1406 	struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1407 
1408 	drm_err(&w->gt->i915->drm,
1409 		"%s timed out, cancelling all in-flight rendering.\n",
1410 		w->name);
1411 	intel_gt_set_wedged(w->gt);
1412 }
1413 
1414 void __intel_init_wedge(struct intel_wedge_me *w,
1415 			struct intel_gt *gt,
1416 			long timeout,
1417 			const char *name)
1418 {
1419 	w->gt = gt;
1420 	w->name = name;
1421 
1422 	INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1423 	schedule_delayed_work(&w->work, timeout);
1424 }
1425 
1426 void __intel_fini_wedge(struct intel_wedge_me *w)
1427 {
1428 	cancel_delayed_work_sync(&w->work);
1429 	destroy_delayed_work_on_stack(&w->work);
1430 	w->gt = NULL;
1431 }
1432 
1433 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1434 #include "selftest_reset.c"
1435 #include "selftest_hangcheck.c"
1436 #endif
1437