xref: /linux/drivers/gpu/drm/i915/i915_gpu_error.c (revision 7a61a6aa)
1 /*
2  * Copyright (c) 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *    Mika Kuoppala <mika.kuoppala@intel.com>
27  *
28  */
29 
30 #include <linux/ascii85.h>
31 #include <linux/highmem.h>
32 #include <linux/nmi.h>
33 #include <linux/pagevec.h>
34 #include <linux/scatterlist.h>
35 #include <linux/string_helpers.h>
36 #include <linux/utsname.h>
37 #include <linux/zlib.h>
38 
39 #include <drm/drm_cache.h>
40 #include <drm/drm_print.h>
41 
42 #include "display/intel_dmc.h"
43 #include "display/intel_overlay.h"
44 
45 #include "gem/i915_gem_context.h"
46 #include "gem/i915_gem_lmem.h"
47 #include "gt/intel_engine_regs.h"
48 #include "gt/intel_gt.h"
49 #include "gt/intel_gt_mcr.h"
50 #include "gt/intel_gt_pm.h"
51 #include "gt/intel_gt_regs.h"
52 #include "gt/uc/intel_guc_capture.h"
53 
54 #include "i915_driver.h"
55 #include "i915_drv.h"
56 #include "i915_gpu_error.h"
57 #include "i915_memcpy.h"
58 #include "i915_reg.h"
59 #include "i915_scatterlist.h"
60 #include "i915_utils.h"
61 
62 #define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
63 #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
64 
65 static void __sg_set_buf(struct scatterlist *sg,
66 			 void *addr, unsigned int len, loff_t it)
67 {
68 	sg->page_link = (unsigned long)virt_to_page(addr);
69 	sg->offset = offset_in_page(addr);
70 	sg->length = len;
71 	sg->dma_address = it;
72 }
73 
74 static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
75 {
76 	if (!len)
77 		return false;
78 
79 	if (e->bytes + len + 1 <= e->size)
80 		return true;
81 
82 	if (e->bytes) {
83 		__sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
84 		e->iter += e->bytes;
85 		e->buf = NULL;
86 		e->bytes = 0;
87 	}
88 
89 	if (e->cur == e->end) {
90 		struct scatterlist *sgl;
91 
92 		sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
93 		if (!sgl) {
94 			e->err = -ENOMEM;
95 			return false;
96 		}
97 
98 		if (e->cur) {
99 			e->cur->offset = 0;
100 			e->cur->length = 0;
101 			e->cur->page_link =
102 				(unsigned long)sgl | SG_CHAIN;
103 		} else {
104 			e->sgl = sgl;
105 		}
106 
107 		e->cur = sgl;
108 		e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
109 	}
110 
111 	e->size = ALIGN(len + 1, SZ_64K);
112 	e->buf = kmalloc(e->size, ALLOW_FAIL);
113 	if (!e->buf) {
114 		e->size = PAGE_ALIGN(len + 1);
115 		e->buf = kmalloc(e->size, GFP_KERNEL);
116 	}
117 	if (!e->buf) {
118 		e->err = -ENOMEM;
119 		return false;
120 	}
121 
122 	return true;
123 }
124 
125 __printf(2, 0)
126 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
127 			       const char *fmt, va_list args)
128 {
129 	va_list ap;
130 	int len;
131 
132 	if (e->err)
133 		return;
134 
135 	va_copy(ap, args);
136 	len = vsnprintf(NULL, 0, fmt, ap);
137 	va_end(ap);
138 	if (len <= 0) {
139 		e->err = len;
140 		return;
141 	}
142 
143 	if (!__i915_error_grow(e, len))
144 		return;
145 
146 	GEM_BUG_ON(e->bytes >= e->size);
147 	len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
148 	if (len < 0) {
149 		e->err = len;
150 		return;
151 	}
152 	e->bytes += len;
153 }
154 
155 static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
156 {
157 	unsigned len;
158 
159 	if (e->err || !str)
160 		return;
161 
162 	len = strlen(str);
163 	if (!__i915_error_grow(e, len))
164 		return;
165 
166 	GEM_BUG_ON(e->bytes + len > e->size);
167 	memcpy(e->buf + e->bytes, str, len);
168 	e->bytes += len;
169 }
170 
171 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
172 #define err_puts(e, s) i915_error_puts(e, s)
173 
174 static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
175 {
176 	i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
177 }
178 
179 static inline struct drm_printer
180 i915_error_printer(struct drm_i915_error_state_buf *e)
181 {
182 	struct drm_printer p = {
183 		.printfn = __i915_printfn_error,
184 		.arg = e,
185 	};
186 	return p;
187 }
188 
189 /* single threaded page allocator with a reserved stash for emergencies */
190 static void pool_fini(struct folio_batch *fbatch)
191 {
192 	folio_batch_release(fbatch);
193 }
194 
195 static int pool_refill(struct folio_batch *fbatch, gfp_t gfp)
196 {
197 	while (folio_batch_space(fbatch)) {
198 		struct folio *folio;
199 
200 		folio = folio_alloc(gfp, 0);
201 		if (!folio)
202 			return -ENOMEM;
203 
204 		folio_batch_add(fbatch, folio);
205 	}
206 
207 	return 0;
208 }
209 
210 static int pool_init(struct folio_batch *fbatch, gfp_t gfp)
211 {
212 	int err;
213 
214 	folio_batch_init(fbatch);
215 
216 	err = pool_refill(fbatch, gfp);
217 	if (err)
218 		pool_fini(fbatch);
219 
220 	return err;
221 }
222 
223 static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp)
224 {
225 	struct folio *folio;
226 
227 	folio = folio_alloc(gfp, 0);
228 	if (!folio && folio_batch_count(fbatch))
229 		folio = fbatch->folios[--fbatch->nr];
230 
231 	return folio ? folio_address(folio) : NULL;
232 }
233 
234 static void pool_free(struct folio_batch *fbatch, void *addr)
235 {
236 	struct folio *folio = virt_to_folio(addr);
237 
238 	if (folio_batch_space(fbatch))
239 		folio_batch_add(fbatch, folio);
240 	else
241 		folio_put(folio);
242 }
243 
244 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
245 
246 struct i915_vma_compress {
247 	struct folio_batch pool;
248 	struct z_stream_s zstream;
249 	void *tmp;
250 };
251 
252 static bool compress_init(struct i915_vma_compress *c)
253 {
254 	struct z_stream_s *zstream = &c->zstream;
255 
256 	if (pool_init(&c->pool, ALLOW_FAIL))
257 		return false;
258 
259 	zstream->workspace =
260 		kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
261 			ALLOW_FAIL);
262 	if (!zstream->workspace) {
263 		pool_fini(&c->pool);
264 		return false;
265 	}
266 
267 	c->tmp = NULL;
268 	if (i915_has_memcpy_from_wc())
269 		c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
270 
271 	return true;
272 }
273 
274 static bool compress_start(struct i915_vma_compress *c)
275 {
276 	struct z_stream_s *zstream = &c->zstream;
277 	void *workspace = zstream->workspace;
278 
279 	memset(zstream, 0, sizeof(*zstream));
280 	zstream->workspace = workspace;
281 
282 	return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
283 }
284 
285 static void *compress_next_page(struct i915_vma_compress *c,
286 				struct i915_vma_coredump *dst)
287 {
288 	void *page_addr;
289 	struct page *page;
290 
291 	page_addr = pool_alloc(&c->pool, ALLOW_FAIL);
292 	if (!page_addr)
293 		return ERR_PTR(-ENOMEM);
294 
295 	page = virt_to_page(page_addr);
296 	list_add_tail(&page->lru, &dst->page_list);
297 	return page_addr;
298 }
299 
300 static int compress_page(struct i915_vma_compress *c,
301 			 void *src,
302 			 struct i915_vma_coredump *dst,
303 			 bool wc)
304 {
305 	struct z_stream_s *zstream = &c->zstream;
306 
307 	zstream->next_in = src;
308 	if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
309 		zstream->next_in = c->tmp;
310 	zstream->avail_in = PAGE_SIZE;
311 
312 	do {
313 		if (zstream->avail_out == 0) {
314 			zstream->next_out = compress_next_page(c, dst);
315 			if (IS_ERR(zstream->next_out))
316 				return PTR_ERR(zstream->next_out);
317 
318 			zstream->avail_out = PAGE_SIZE;
319 		}
320 
321 		if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
322 			return -EIO;
323 
324 		cond_resched();
325 	} while (zstream->avail_in);
326 
327 	/* Fallback to uncompressed if we increase size? */
328 	if (0 && zstream->total_out > zstream->total_in)
329 		return -E2BIG;
330 
331 	return 0;
332 }
333 
334 static int compress_flush(struct i915_vma_compress *c,
335 			  struct i915_vma_coredump *dst)
336 {
337 	struct z_stream_s *zstream = &c->zstream;
338 
339 	do {
340 		switch (zlib_deflate(zstream, Z_FINISH)) {
341 		case Z_OK: /* more space requested */
342 			zstream->next_out = compress_next_page(c, dst);
343 			if (IS_ERR(zstream->next_out))
344 				return PTR_ERR(zstream->next_out);
345 
346 			zstream->avail_out = PAGE_SIZE;
347 			break;
348 
349 		case Z_STREAM_END:
350 			goto end;
351 
352 		default: /* any error */
353 			return -EIO;
354 		}
355 	} while (1);
356 
357 end:
358 	memset(zstream->next_out, 0, zstream->avail_out);
359 	dst->unused = zstream->avail_out;
360 	return 0;
361 }
362 
363 static void compress_finish(struct i915_vma_compress *c)
364 {
365 	zlib_deflateEnd(&c->zstream);
366 }
367 
368 static void compress_fini(struct i915_vma_compress *c)
369 {
370 	kfree(c->zstream.workspace);
371 	if (c->tmp)
372 		pool_free(&c->pool, c->tmp);
373 	pool_fini(&c->pool);
374 }
375 
376 static void err_compression_marker(struct drm_i915_error_state_buf *m)
377 {
378 	err_puts(m, ":");
379 }
380 
381 #else
382 
383 struct i915_vma_compress {
384 	struct folio_batch pool;
385 };
386 
387 static bool compress_init(struct i915_vma_compress *c)
388 {
389 	return pool_init(&c->pool, ALLOW_FAIL) == 0;
390 }
391 
392 static bool compress_start(struct i915_vma_compress *c)
393 {
394 	return true;
395 }
396 
397 static int compress_page(struct i915_vma_compress *c,
398 			 void *src,
399 			 struct i915_vma_coredump *dst,
400 			 bool wc)
401 {
402 	void *ptr;
403 
404 	ptr = pool_alloc(&c->pool, ALLOW_FAIL);
405 	if (!ptr)
406 		return -ENOMEM;
407 
408 	if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
409 		memcpy(ptr, src, PAGE_SIZE);
410 	list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list);
411 	cond_resched();
412 
413 	return 0;
414 }
415 
416 static int compress_flush(struct i915_vma_compress *c,
417 			  struct i915_vma_coredump *dst)
418 {
419 	return 0;
420 }
421 
422 static void compress_finish(struct i915_vma_compress *c)
423 {
424 }
425 
426 static void compress_fini(struct i915_vma_compress *c)
427 {
428 	pool_fini(&c->pool);
429 }
430 
431 static void err_compression_marker(struct drm_i915_error_state_buf *m)
432 {
433 	err_puts(m, "~");
434 }
435 
436 #endif
437 
438 static void error_print_instdone(struct drm_i915_error_state_buf *m,
439 				 const struct intel_engine_coredump *ee)
440 {
441 	int slice;
442 	int subslice;
443 	int iter;
444 
445 	err_printf(m, "  INSTDONE: 0x%08x\n",
446 		   ee->instdone.instdone);
447 
448 	if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
449 		return;
450 
451 	err_printf(m, "  SC_INSTDONE: 0x%08x\n",
452 		   ee->instdone.slice_common);
453 
454 	if (GRAPHICS_VER(m->i915) <= 6)
455 		return;
456 
457 	for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
458 		err_printf(m, "  SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
459 			   slice, subslice,
460 			   ee->instdone.sampler[slice][subslice]);
461 
462 	for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
463 		err_printf(m, "  ROW_INSTDONE[%d][%d]: 0x%08x\n",
464 			   slice, subslice,
465 			   ee->instdone.row[slice][subslice]);
466 
467 	if (GRAPHICS_VER(m->i915) < 12)
468 		return;
469 
470 	if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) {
471 		for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
472 			err_printf(m, "  GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n",
473 				   slice, subslice,
474 				   ee->instdone.geom_svg[slice][subslice]);
475 	}
476 
477 	err_printf(m, "  SC_INSTDONE_EXTRA: 0x%08x\n",
478 		   ee->instdone.slice_common_extra[0]);
479 	err_printf(m, "  SC_INSTDONE_EXTRA2: 0x%08x\n",
480 		   ee->instdone.slice_common_extra[1]);
481 }
482 
483 static void error_print_request(struct drm_i915_error_state_buf *m,
484 				const char *prefix,
485 				const struct i915_request_coredump *erq)
486 {
487 	if (!erq->seqno)
488 		return;
489 
490 	err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
491 		   prefix, erq->pid, erq->context, erq->seqno,
492 		   test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
493 			    &erq->flags) ? "!" : "",
494 		   test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
495 			    &erq->flags) ? "+" : "",
496 		   erq->sched_attr.priority,
497 		   erq->head, erq->tail);
498 }
499 
500 static void error_print_context(struct drm_i915_error_state_buf *m,
501 				const char *header,
502 				const struct i915_gem_context_coredump *ctx)
503 {
504 	err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
505 		   header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
506 		   ctx->guilty, ctx->active,
507 		   ctx->total_runtime, ctx->avg_runtime);
508 	err_printf(m, "  context timeline seqno %u\n", ctx->hwsp_seqno);
509 }
510 
511 static struct i915_vma_coredump *
512 __find_vma(struct i915_vma_coredump *vma, const char *name)
513 {
514 	while (vma) {
515 		if (strcmp(vma->name, name) == 0)
516 			return vma;
517 		vma = vma->next;
518 	}
519 
520 	return NULL;
521 }
522 
523 struct i915_vma_coredump *
524 intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
525 {
526 	return __find_vma(ee->vma, "batch");
527 }
528 
529 static void error_print_engine(struct drm_i915_error_state_buf *m,
530 			       const struct intel_engine_coredump *ee)
531 {
532 	struct i915_vma_coredump *batch;
533 	int n;
534 
535 	err_printf(m, "%s command stream:\n", ee->engine->name);
536 	err_printf(m, "  CCID:  0x%08x\n", ee->ccid);
537 	err_printf(m, "  START: 0x%08x\n", ee->start);
538 	err_printf(m, "  HEAD:  0x%08x [0x%08x]\n", ee->head, ee->rq_head);
539 	err_printf(m, "  TAIL:  0x%08x [0x%08x, 0x%08x]\n",
540 		   ee->tail, ee->rq_post, ee->rq_tail);
541 	err_printf(m, "  CTL:   0x%08x\n", ee->ctl);
542 	err_printf(m, "  MODE:  0x%08x\n", ee->mode);
543 	err_printf(m, "  HWS:   0x%08x\n", ee->hws);
544 	err_printf(m, "  ACTHD: 0x%08x %08x\n",
545 		   (u32)(ee->acthd>>32), (u32)ee->acthd);
546 	err_printf(m, "  IPEIR: 0x%08x\n", ee->ipeir);
547 	err_printf(m, "  IPEHR: 0x%08x\n", ee->ipehr);
548 	err_printf(m, "  ESR:   0x%08x\n", ee->esr);
549 
550 	error_print_instdone(m, ee);
551 
552 	batch = intel_gpu_error_find_batch(ee);
553 	if (batch) {
554 		u64 start = batch->gtt_offset;
555 		u64 end = start + batch->gtt_size;
556 
557 		err_printf(m, "  batch: [0x%08x_%08x, 0x%08x_%08x]\n",
558 			   upper_32_bits(start), lower_32_bits(start),
559 			   upper_32_bits(end), lower_32_bits(end));
560 	}
561 	if (GRAPHICS_VER(m->i915) >= 4) {
562 		err_printf(m, "  BBADDR: 0x%08x_%08x\n",
563 			   (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
564 		err_printf(m, "  BB_STATE: 0x%08x\n", ee->bbstate);
565 		err_printf(m, "  INSTPS: 0x%08x\n", ee->instps);
566 	}
567 	err_printf(m, "  INSTPM: 0x%08x\n", ee->instpm);
568 	err_printf(m, "  FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
569 		   lower_32_bits(ee->faddr));
570 	if (GRAPHICS_VER(m->i915) >= 6) {
571 		err_printf(m, "  RC PSMI: 0x%08x\n", ee->rc_psmi);
572 		err_printf(m, "  FAULT_REG: 0x%08x\n", ee->fault_reg);
573 	}
574 	if (GRAPHICS_VER(m->i915) >= 11) {
575 		err_printf(m, "  NOPID: 0x%08x\n", ee->nopid);
576 		err_printf(m, "  EXCC: 0x%08x\n", ee->excc);
577 		err_printf(m, "  CMD_CCTL: 0x%08x\n", ee->cmd_cctl);
578 		err_printf(m, "  CSCMDOP: 0x%08x\n", ee->cscmdop);
579 		err_printf(m, "  CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl);
580 		err_printf(m, "  DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi);
581 		err_printf(m, "  DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo);
582 	}
583 	if (HAS_PPGTT(m->i915)) {
584 		err_printf(m, "  GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
585 
586 		if (GRAPHICS_VER(m->i915) >= 8) {
587 			int i;
588 			for (i = 0; i < 4; i++)
589 				err_printf(m, "  PDP%d: 0x%016llx\n",
590 					   i, ee->vm_info.pdp[i]);
591 		} else {
592 			err_printf(m, "  PP_DIR_BASE: 0x%08x\n",
593 				   ee->vm_info.pp_dir_base);
594 		}
595 	}
596 
597 	for (n = 0; n < ee->num_ports; n++) {
598 		err_printf(m, "  ELSP[%d]:", n);
599 		error_print_request(m, " ", &ee->execlist[n]);
600 	}
601 }
602 
603 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
604 {
605 	va_list args;
606 
607 	va_start(args, f);
608 	i915_error_vprintf(e, f, args);
609 	va_end(args);
610 }
611 
612 void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
613 			       const struct intel_engine_cs *engine,
614 			       const struct i915_vma_coredump *vma)
615 {
616 	char out[ASCII85_BUFSZ];
617 	struct page *page;
618 
619 	if (!vma)
620 		return;
621 
622 	err_printf(m, "%s --- %s = 0x%08x %08x\n",
623 		   engine ? engine->name : "global", vma->name,
624 		   upper_32_bits(vma->gtt_offset),
625 		   lower_32_bits(vma->gtt_offset));
626 
627 	if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
628 		err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
629 
630 	err_compression_marker(m);
631 	list_for_each_entry(page, &vma->page_list, lru) {
632 		int i, len;
633 		const u32 *addr = page_address(page);
634 
635 		len = PAGE_SIZE;
636 		if (page == list_last_entry(&vma->page_list, typeof(*page), lru))
637 			len -= vma->unused;
638 		len = ascii85_encode_len(len);
639 
640 		for (i = 0; i < len; i++)
641 			err_puts(m, ascii85_encode(addr[i], out));
642 	}
643 	err_puts(m, "\n");
644 }
645 
646 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
647 				   struct i915_gpu_coredump *error)
648 {
649 	struct drm_printer p = i915_error_printer(m);
650 
651 	intel_device_info_print(&error->device_info, &error->runtime_info, &p);
652 	intel_display_device_info_print(&error->display_device_info,
653 					&error->display_runtime_info, &p);
654 	intel_driver_caps_print(&error->driver_caps, &p);
655 }
656 
657 static void err_print_params(struct drm_i915_error_state_buf *m,
658 			     const struct i915_params *params)
659 {
660 	struct drm_printer p = i915_error_printer(m);
661 
662 	i915_params_dump(params, &p);
663 	intel_display_params_dump(m->i915, &p);
664 }
665 
666 static void err_print_pciid(struct drm_i915_error_state_buf *m,
667 			    struct drm_i915_private *i915)
668 {
669 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
670 
671 	err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
672 	err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
673 	err_printf(m, "PCI Subsystem: %04x:%04x\n",
674 		   pdev->subsystem_vendor,
675 		   pdev->subsystem_device);
676 }
677 
678 static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
679 			      const char *name,
680 			      const struct intel_ctb_coredump *ctb)
681 {
682 	if (!ctb->size)
683 		return;
684 
685 	err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n",
686 		   name, ctb->raw_status, ctb->raw_head, ctb->raw_tail,
687 		   ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size);
688 }
689 
690 static void err_print_uc(struct drm_i915_error_state_buf *m,
691 			 const struct intel_uc_coredump *error_uc)
692 {
693 	struct drm_printer p = i915_error_printer(m);
694 
695 	intel_uc_fw_dump(&error_uc->guc_fw, &p);
696 	intel_uc_fw_dump(&error_uc->huc_fw, &p);
697 	err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp);
698 	intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log);
699 	err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence);
700 	err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
701 	err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1);
702 	intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_ctb);
703 }
704 
705 static void err_free_sgl(struct scatterlist *sgl)
706 {
707 	while (sgl) {
708 		struct scatterlist *sg;
709 
710 		for (sg = sgl; !sg_is_chain(sg); sg++) {
711 			kfree(sg_virt(sg));
712 			if (sg_is_last(sg))
713 				break;
714 		}
715 
716 		sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
717 		free_page((unsigned long)sgl);
718 		sgl = sg;
719 	}
720 }
721 
722 static void err_print_gt_info(struct drm_i915_error_state_buf *m,
723 			      struct intel_gt_coredump *gt)
724 {
725 	struct drm_printer p = i915_error_printer(m);
726 
727 	intel_gt_info_print(&gt->info, &p);
728 	intel_sseu_print_topology(gt->_gt->i915, &gt->info.sseu, &p);
729 }
730 
731 static void err_print_gt_display(struct drm_i915_error_state_buf *m,
732 				 struct intel_gt_coredump *gt)
733 {
734 	err_printf(m, "IER: 0x%08x\n", gt->ier);
735 	err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
736 }
737 
738 static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
739 				       struct intel_gt_coredump *gt)
740 {
741 	int i;
742 
743 	err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake));
744 	err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
745 		   gt->clock_frequency, gt->clock_period_ns);
746 	err_printf(m, "EIR: 0x%08x\n", gt->eir);
747 	err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
748 
749 	for (i = 0; i < gt->ngtier; i++)
750 		err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
751 }
752 
753 static void err_print_gt_global(struct drm_i915_error_state_buf *m,
754 				struct intel_gt_coredump *gt)
755 {
756 	err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
757 
758 	if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
759 		err_printf(m, "ERROR: 0x%08x\n", gt->error);
760 		err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
761 	}
762 
763 	if (GRAPHICS_VER(m->i915) >= 8)
764 		err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
765 			   gt->fault_data1, gt->fault_data0);
766 
767 	if (GRAPHICS_VER(m->i915) == 7)
768 		err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
769 
770 	if (IS_GRAPHICS_VER(m->i915, 8, 11))
771 		err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
772 
773 	if (GRAPHICS_VER(m->i915) == 12)
774 		err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
775 
776 	if (GRAPHICS_VER(m->i915) >= 12) {
777 		int i;
778 
779 		for (i = 0; i < I915_MAX_SFC; i++) {
780 			/*
781 			 * SFC_DONE resides in the VD forcewake domain, so it
782 			 * only exists if the corresponding VCS engine is
783 			 * present.
784 			 */
785 			if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
786 			    !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
787 				continue;
788 
789 			err_printf(m, "  SFC_DONE[%d]: 0x%08x\n", i,
790 				   gt->sfc_done[i]);
791 		}
792 
793 		err_printf(m, "  GAM_DONE: 0x%08x\n", gt->gam_done);
794 	}
795 }
796 
797 static void err_print_gt_fences(struct drm_i915_error_state_buf *m,
798 				struct intel_gt_coredump *gt)
799 {
800 	int i;
801 
802 	for (i = 0; i < gt->nfence; i++)
803 		err_printf(m, "  fence[%d] = %08llx\n", i, gt->fence[i]);
804 }
805 
806 static void err_print_gt_engines(struct drm_i915_error_state_buf *m,
807 				 struct intel_gt_coredump *gt)
808 {
809 	const struct intel_engine_coredump *ee;
810 
811 	for (ee = gt->engine; ee; ee = ee->next) {
812 		const struct i915_vma_coredump *vma;
813 
814 		if (gt->uc && gt->uc->guc.is_guc_capture) {
815 			if (ee->guc_capture_node)
816 				intel_guc_capture_print_engine_node(m, ee);
817 			else
818 				err_printf(m, "  Missing GuC capture node for %s\n",
819 					   ee->engine->name);
820 		} else {
821 			error_print_engine(m, ee);
822 		}
823 
824 		err_printf(m, "  hung: %u\n", ee->hung);
825 		err_printf(m, "  engine reset count: %u\n", ee->reset_count);
826 		error_print_context(m, "  Active context: ", &ee->context);
827 
828 		for (vma = ee->vma; vma; vma = vma->next)
829 			intel_gpu_error_print_vma(m, ee->engine, vma);
830 	}
831 
832 }
833 
834 static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
835 			       struct i915_gpu_coredump *error)
836 {
837 	const struct intel_engine_coredump *ee;
838 	struct timespec64 ts;
839 
840 	if (*error->error_msg)
841 		err_printf(m, "%s\n", error->error_msg);
842 	err_printf(m, "Kernel: %s %s\n",
843 		   init_utsname()->release,
844 		   init_utsname()->machine);
845 	err_printf(m, "Driver: %s\n", DRIVER_DATE);
846 	ts = ktime_to_timespec64(error->time);
847 	err_printf(m, "Time: %lld s %ld us\n",
848 		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
849 	ts = ktime_to_timespec64(error->boottime);
850 	err_printf(m, "Boottime: %lld s %ld us\n",
851 		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
852 	ts = ktime_to_timespec64(error->uptime);
853 	err_printf(m, "Uptime: %lld s %ld us\n",
854 		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
855 	err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
856 		   error->capture, jiffies_to_msecs(jiffies - error->capture));
857 
858 	for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
859 		err_printf(m, "Active process (on ring %s): %s [%d]\n",
860 			   ee->engine->name,
861 			   ee->context.comm,
862 			   ee->context.pid);
863 
864 	err_printf(m, "Reset count: %u\n", error->reset_count);
865 	err_printf(m, "Suspend count: %u\n", error->suspend_count);
866 	err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
867 	err_printf(m, "Subplatform: 0x%x\n",
868 		   intel_subplatform(&error->runtime_info,
869 				     error->device_info.platform));
870 	err_print_pciid(m, m->i915);
871 
872 	err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
873 
874 	intel_dmc_print_error_state(m, m->i915);
875 
876 	err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock));
877 	err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended));
878 
879 	if (error->gt) {
880 		bool print_guc_capture = false;
881 
882 		if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
883 			print_guc_capture = true;
884 
885 		err_print_gt_display(m, error->gt);
886 		err_print_gt_global_nonguc(m, error->gt);
887 		err_print_gt_fences(m, error->gt);
888 
889 		/*
890 		 * GuC dumped global, eng-class and eng-instance registers together
891 		 * as part of engine state dump so we print in err_print_gt_engines
892 		 */
893 		if (!print_guc_capture)
894 			err_print_gt_global(m, error->gt);
895 
896 		err_print_gt_engines(m, error->gt);
897 
898 		if (error->gt->uc)
899 			err_print_uc(m, error->gt->uc);
900 
901 		err_print_gt_info(m, error->gt);
902 	}
903 
904 	if (error->overlay)
905 		intel_overlay_print_error_state(m, error->overlay);
906 
907 	err_print_capabilities(m, error);
908 	err_print_params(m, &error->params);
909 }
910 
911 static int err_print_to_sgl(struct i915_gpu_coredump *error)
912 {
913 	struct drm_i915_error_state_buf m;
914 
915 	if (IS_ERR(error))
916 		return PTR_ERR(error);
917 
918 	if (READ_ONCE(error->sgl))
919 		return 0;
920 
921 	memset(&m, 0, sizeof(m));
922 	m.i915 = error->i915;
923 
924 	__err_print_to_sgl(&m, error);
925 
926 	if (m.buf) {
927 		__sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
928 		m.bytes = 0;
929 		m.buf = NULL;
930 	}
931 	if (m.cur) {
932 		GEM_BUG_ON(m.end < m.cur);
933 		sg_mark_end(m.cur - 1);
934 	}
935 	GEM_BUG_ON(m.sgl && !m.cur);
936 
937 	if (m.err) {
938 		err_free_sgl(m.sgl);
939 		return m.err;
940 	}
941 
942 	if (cmpxchg(&error->sgl, NULL, m.sgl))
943 		err_free_sgl(m.sgl);
944 
945 	return 0;
946 }
947 
948 ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
949 					 char *buf, loff_t off, size_t rem)
950 {
951 	struct scatterlist *sg;
952 	size_t count;
953 	loff_t pos;
954 	int err;
955 
956 	if (!error || !rem)
957 		return 0;
958 
959 	err = err_print_to_sgl(error);
960 	if (err)
961 		return err;
962 
963 	sg = READ_ONCE(error->fit);
964 	if (!sg || off < sg->dma_address)
965 		sg = error->sgl;
966 	if (!sg)
967 		return 0;
968 
969 	pos = sg->dma_address;
970 	count = 0;
971 	do {
972 		size_t len, start;
973 
974 		if (sg_is_chain(sg)) {
975 			sg = sg_chain_ptr(sg);
976 			GEM_BUG_ON(sg_is_chain(sg));
977 		}
978 
979 		len = sg->length;
980 		if (pos + len <= off) {
981 			pos += len;
982 			continue;
983 		}
984 
985 		start = sg->offset;
986 		if (pos < off) {
987 			GEM_BUG_ON(off - pos > len);
988 			len -= off - pos;
989 			start += off - pos;
990 			pos = off;
991 		}
992 
993 		len = min(len, rem);
994 		GEM_BUG_ON(!len || len > sg->length);
995 
996 		memcpy(buf, page_address(sg_page(sg)) + start, len);
997 
998 		count += len;
999 		pos += len;
1000 
1001 		buf += len;
1002 		rem -= len;
1003 		if (!rem) {
1004 			WRITE_ONCE(error->fit, sg);
1005 			break;
1006 		}
1007 	} while (!sg_is_last(sg++));
1008 
1009 	return count;
1010 }
1011 
1012 static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
1013 {
1014 	while (vma) {
1015 		struct i915_vma_coredump *next = vma->next;
1016 		struct page *page, *n;
1017 
1018 		list_for_each_entry_safe(page, n, &vma->page_list, lru) {
1019 			list_del_init(&page->lru);
1020 			__free_page(page);
1021 		}
1022 
1023 		kfree(vma);
1024 		vma = next;
1025 	}
1026 }
1027 
1028 static void cleanup_params(struct i915_gpu_coredump *error)
1029 {
1030 	i915_params_free(&error->params);
1031 	intel_display_params_free(&error->display_params);
1032 }
1033 
1034 static void cleanup_uc(struct intel_uc_coredump *uc)
1035 {
1036 	kfree(uc->guc_fw.file_selected.path);
1037 	kfree(uc->huc_fw.file_selected.path);
1038 	kfree(uc->guc_fw.file_wanted.path);
1039 	kfree(uc->huc_fw.file_wanted.path);
1040 	i915_vma_coredump_free(uc->guc.vma_log);
1041 	i915_vma_coredump_free(uc->guc.vma_ctb);
1042 
1043 	kfree(uc);
1044 }
1045 
1046 static void cleanup_gt(struct intel_gt_coredump *gt)
1047 {
1048 	while (gt->engine) {
1049 		struct intel_engine_coredump *ee = gt->engine;
1050 
1051 		gt->engine = ee->next;
1052 
1053 		i915_vma_coredump_free(ee->vma);
1054 		intel_guc_capture_free_node(ee);
1055 		kfree(ee);
1056 	}
1057 
1058 	if (gt->uc)
1059 		cleanup_uc(gt->uc);
1060 
1061 	kfree(gt);
1062 }
1063 
1064 void __i915_gpu_coredump_free(struct kref *error_ref)
1065 {
1066 	struct i915_gpu_coredump *error =
1067 		container_of(error_ref, typeof(*error), ref);
1068 
1069 	while (error->gt) {
1070 		struct intel_gt_coredump *gt = error->gt;
1071 
1072 		error->gt = gt->next;
1073 		cleanup_gt(gt);
1074 	}
1075 
1076 	kfree(error->overlay);
1077 
1078 	cleanup_params(error);
1079 
1080 	err_free_sgl(error->sgl);
1081 	kfree(error);
1082 }
1083 
1084 static struct i915_vma_coredump *
1085 i915_vma_coredump_create(const struct intel_gt *gt,
1086 			 const struct i915_vma_resource *vma_res,
1087 			 struct i915_vma_compress *compress,
1088 			 const char *name)
1089 
1090 {
1091 	struct i915_ggtt *ggtt = gt->ggtt;
1092 	const u64 slot = ggtt->error_capture.start;
1093 	struct i915_vma_coredump *dst;
1094 	struct sgt_iter iter;
1095 	int ret;
1096 
1097 	might_sleep();
1098 
1099 	if (!vma_res || !vma_res->bi.pages || !compress)
1100 		return NULL;
1101 
1102 	dst = kmalloc(sizeof(*dst), ALLOW_FAIL);
1103 	if (!dst)
1104 		return NULL;
1105 
1106 	if (!compress_start(compress)) {
1107 		kfree(dst);
1108 		return NULL;
1109 	}
1110 
1111 	INIT_LIST_HEAD(&dst->page_list);
1112 	strcpy(dst->name, name);
1113 	dst->next = NULL;
1114 
1115 	dst->gtt_offset = vma_res->start;
1116 	dst->gtt_size = vma_res->node_size;
1117 	dst->gtt_page_sizes = vma_res->page_sizes_gtt;
1118 	dst->unused = 0;
1119 
1120 	ret = -EINVAL;
1121 	if (drm_mm_node_allocated(&ggtt->error_capture)) {
1122 		void __iomem *s;
1123 		dma_addr_t dma;
1124 
1125 		for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
1126 			mutex_lock(&ggtt->error_mutex);
1127 			if (ggtt->vm.raw_insert_page)
1128 				ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
1129 							 i915_gem_get_pat_index(gt->i915,
1130 										I915_CACHE_NONE),
1131 							 0);
1132 			else
1133 				ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1134 						     i915_gem_get_pat_index(gt->i915,
1135 									    I915_CACHE_NONE),
1136 						     0);
1137 			mb();
1138 
1139 			s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
1140 			ret = compress_page(compress,
1141 					    (void  __force *)s, dst,
1142 					    true);
1143 			io_mapping_unmap(s);
1144 
1145 			mb();
1146 			ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1147 			mutex_unlock(&ggtt->error_mutex);
1148 			if (ret)
1149 				break;
1150 		}
1151 	} else if (vma_res->bi.lmem) {
1152 		struct intel_memory_region *mem = vma_res->mr;
1153 		dma_addr_t dma;
1154 
1155 		for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
1156 			dma_addr_t offset = dma - mem->region.start;
1157 			void __iomem *s;
1158 
1159 			if (offset + PAGE_SIZE > mem->io_size) {
1160 				ret = -EINVAL;
1161 				break;
1162 			}
1163 
1164 			s = io_mapping_map_wc(&mem->iomap, offset, PAGE_SIZE);
1165 			ret = compress_page(compress,
1166 					    (void __force *)s, dst,
1167 					    true);
1168 			io_mapping_unmap(s);
1169 			if (ret)
1170 				break;
1171 		}
1172 	} else {
1173 		struct page *page;
1174 
1175 		for_each_sgt_page(page, iter, vma_res->bi.pages) {
1176 			void *s;
1177 
1178 			drm_clflush_pages(&page, 1);
1179 
1180 			s = kmap_local_page(page);
1181 			ret = compress_page(compress, s, dst, false);
1182 			kunmap_local(s);
1183 
1184 			drm_clflush_pages(&page, 1);
1185 
1186 			if (ret)
1187 				break;
1188 		}
1189 	}
1190 
1191 	if (ret || compress_flush(compress, dst)) {
1192 		struct page *page, *n;
1193 
1194 		list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) {
1195 			list_del_init(&page->lru);
1196 			pool_free(&compress->pool, page_address(page));
1197 		}
1198 
1199 		kfree(dst);
1200 		dst = NULL;
1201 	}
1202 	compress_finish(compress);
1203 
1204 	return dst;
1205 }
1206 
1207 static void gt_record_fences(struct intel_gt_coredump *gt)
1208 {
1209 	struct i915_ggtt *ggtt = gt->_gt->ggtt;
1210 	struct intel_uncore *uncore = gt->_gt->uncore;
1211 	int i;
1212 
1213 	if (GRAPHICS_VER(uncore->i915) >= 6) {
1214 		for (i = 0; i < ggtt->num_fences; i++)
1215 			gt->fence[i] =
1216 				intel_uncore_read64(uncore,
1217 						    FENCE_REG_GEN6_LO(i));
1218 	} else if (GRAPHICS_VER(uncore->i915) >= 4) {
1219 		for (i = 0; i < ggtt->num_fences; i++)
1220 			gt->fence[i] =
1221 				intel_uncore_read64(uncore,
1222 						    FENCE_REG_965_LO(i));
1223 	} else {
1224 		for (i = 0; i < ggtt->num_fences; i++)
1225 			gt->fence[i] =
1226 				intel_uncore_read(uncore, FENCE_REG(i));
1227 	}
1228 	gt->nfence = i;
1229 }
1230 
1231 static void engine_record_registers(struct intel_engine_coredump *ee)
1232 {
1233 	const struct intel_engine_cs *engine = ee->engine;
1234 	struct drm_i915_private *i915 = engine->i915;
1235 
1236 	if (GRAPHICS_VER(i915) >= 6) {
1237 		ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
1238 
1239 		if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
1240 			ee->fault_reg = intel_gt_mcr_read_any(engine->gt,
1241 							      XEHP_RING_FAULT_REG);
1242 		else if (GRAPHICS_VER(i915) >= 12)
1243 			ee->fault_reg = intel_uncore_read(engine->uncore,
1244 							  GEN12_RING_FAULT_REG);
1245 		else if (GRAPHICS_VER(i915) >= 8)
1246 			ee->fault_reg = intel_uncore_read(engine->uncore,
1247 							  GEN8_RING_FAULT_REG);
1248 		else
1249 			ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
1250 	}
1251 
1252 	if (GRAPHICS_VER(i915) >= 4) {
1253 		ee->esr = ENGINE_READ(engine, RING_ESR);
1254 		ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1255 		ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1256 		ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1257 		ee->instps = ENGINE_READ(engine, RING_INSTPS);
1258 		ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
1259 		ee->ccid = ENGINE_READ(engine, CCID);
1260 		if (GRAPHICS_VER(i915) >= 8) {
1261 			ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1262 			ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
1263 		}
1264 		ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
1265 	} else {
1266 		ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1267 		ee->ipeir = ENGINE_READ(engine, IPEIR);
1268 		ee->ipehr = ENGINE_READ(engine, IPEHR);
1269 	}
1270 
1271 	if (GRAPHICS_VER(i915) >= 11) {
1272 		ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL);
1273 		ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP);
1274 		ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL);
1275 		ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW);
1276 		ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD);
1277 		ee->nopid = ENGINE_READ(engine, RING_NOPID);
1278 		ee->excc = ENGINE_READ(engine, RING_EXCC);
1279 	}
1280 
1281 	intel_engine_get_instdone(engine, &ee->instdone);
1282 
1283 	ee->instpm = ENGINE_READ(engine, RING_INSTPM);
1284 	ee->acthd = intel_engine_get_active_head(engine);
1285 	ee->start = ENGINE_READ(engine, RING_START);
1286 	ee->head = ENGINE_READ(engine, RING_HEAD);
1287 	ee->tail = ENGINE_READ(engine, RING_TAIL);
1288 	ee->ctl = ENGINE_READ(engine, RING_CTL);
1289 	if (GRAPHICS_VER(i915) > 2)
1290 		ee->mode = ENGINE_READ(engine, RING_MI_MODE);
1291 
1292 	if (!HWS_NEEDS_PHYSICAL(i915)) {
1293 		i915_reg_t mmio;
1294 
1295 		if (GRAPHICS_VER(i915) == 7) {
1296 			switch (engine->id) {
1297 			default:
1298 				MISSING_CASE(engine->id);
1299 				fallthrough;
1300 			case RCS0:
1301 				mmio = RENDER_HWS_PGA_GEN7;
1302 				break;
1303 			case BCS0:
1304 				mmio = BLT_HWS_PGA_GEN7;
1305 				break;
1306 			case VCS0:
1307 				mmio = BSD_HWS_PGA_GEN7;
1308 				break;
1309 			case VECS0:
1310 				mmio = VEBOX_HWS_PGA_GEN7;
1311 				break;
1312 			}
1313 		} else if (GRAPHICS_VER(engine->i915) == 6) {
1314 			mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1315 		} else {
1316 			/* XXX: gen8 returns to sanity */
1317 			mmio = RING_HWS_PGA(engine->mmio_base);
1318 		}
1319 
1320 		ee->hws = intel_uncore_read(engine->uncore, mmio);
1321 	}
1322 
1323 	ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1324 
1325 	if (HAS_PPGTT(i915)) {
1326 		int i;
1327 
1328 		ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
1329 
1330 		if (GRAPHICS_VER(i915) == 6) {
1331 			ee->vm_info.pp_dir_base =
1332 				ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
1333 		} else if (GRAPHICS_VER(i915) == 7) {
1334 			ee->vm_info.pp_dir_base =
1335 				ENGINE_READ(engine, RING_PP_DIR_BASE);
1336 		} else if (GRAPHICS_VER(i915) >= 8) {
1337 			u32 base = engine->mmio_base;
1338 
1339 			for (i = 0; i < 4; i++) {
1340 				ee->vm_info.pdp[i] =
1341 					intel_uncore_read(engine->uncore,
1342 							  GEN8_RING_PDP_UDW(base, i));
1343 				ee->vm_info.pdp[i] <<= 32;
1344 				ee->vm_info.pdp[i] |=
1345 					intel_uncore_read(engine->uncore,
1346 							  GEN8_RING_PDP_LDW(base, i));
1347 			}
1348 		}
1349 	}
1350 }
1351 
1352 static void record_request(const struct i915_request *request,
1353 			   struct i915_request_coredump *erq)
1354 {
1355 	erq->flags = request->fence.flags;
1356 	erq->context = request->fence.context;
1357 	erq->seqno = request->fence.seqno;
1358 	erq->sched_attr = request->sched.attr;
1359 	erq->head = request->head;
1360 	erq->tail = request->tail;
1361 
1362 	erq->pid = 0;
1363 	rcu_read_lock();
1364 	if (!intel_context_is_closed(request->context)) {
1365 		const struct i915_gem_context *ctx;
1366 
1367 		ctx = rcu_dereference(request->context->gem_context);
1368 		if (ctx)
1369 			erq->pid = pid_nr(ctx->pid);
1370 	}
1371 	rcu_read_unlock();
1372 }
1373 
1374 static void engine_record_execlists(struct intel_engine_coredump *ee)
1375 {
1376 	const struct intel_engine_execlists * const el = &ee->engine->execlists;
1377 	struct i915_request * const *port = el->active;
1378 	unsigned int n = 0;
1379 
1380 	while (*port)
1381 		record_request(*port++, &ee->execlist[n++]);
1382 
1383 	ee->num_ports = n;
1384 }
1385 
1386 static bool record_context(struct i915_gem_context_coredump *e,
1387 			   struct intel_context *ce)
1388 {
1389 	struct i915_gem_context *ctx;
1390 	struct task_struct *task;
1391 	bool simulated;
1392 
1393 	rcu_read_lock();
1394 	ctx = rcu_dereference(ce->gem_context);
1395 	if (ctx && !kref_get_unless_zero(&ctx->ref))
1396 		ctx = NULL;
1397 	rcu_read_unlock();
1398 	if (!ctx)
1399 		return true;
1400 
1401 	rcu_read_lock();
1402 	task = pid_task(ctx->pid, PIDTYPE_PID);
1403 	if (task) {
1404 		strcpy(e->comm, task->comm);
1405 		e->pid = task->pid;
1406 	}
1407 	rcu_read_unlock();
1408 
1409 	e->sched_attr = ctx->sched;
1410 	e->guilty = atomic_read(&ctx->guilty_count);
1411 	e->active = atomic_read(&ctx->active_count);
1412 	e->hwsp_seqno = (ce->timeline && ce->timeline->hwsp_seqno) ?
1413 				*ce->timeline->hwsp_seqno : ~0U;
1414 
1415 	e->total_runtime = intel_context_get_total_runtime_ns(ce);
1416 	e->avg_runtime = intel_context_get_avg_runtime_ns(ce);
1417 
1418 	simulated = i915_gem_context_no_error_capture(ctx);
1419 
1420 	i915_gem_context_put(ctx);
1421 	return simulated;
1422 }
1423 
1424 struct intel_engine_capture_vma {
1425 	struct intel_engine_capture_vma *next;
1426 	struct i915_vma_resource *vma_res;
1427 	char name[16];
1428 	bool lockdep_cookie;
1429 };
1430 
1431 static struct intel_engine_capture_vma *
1432 capture_vma_snapshot(struct intel_engine_capture_vma *next,
1433 		     struct i915_vma_resource *vma_res,
1434 		     gfp_t gfp, const char *name)
1435 {
1436 	struct intel_engine_capture_vma *c;
1437 
1438 	if (!vma_res)
1439 		return next;
1440 
1441 	c = kmalloc(sizeof(*c), gfp);
1442 	if (!c)
1443 		return next;
1444 
1445 	if (!i915_vma_resource_hold(vma_res, &c->lockdep_cookie)) {
1446 		kfree(c);
1447 		return next;
1448 	}
1449 
1450 	strcpy(c->name, name);
1451 	c->vma_res = i915_vma_resource_get(vma_res);
1452 
1453 	c->next = next;
1454 	return c;
1455 }
1456 
1457 static struct intel_engine_capture_vma *
1458 capture_vma(struct intel_engine_capture_vma *next,
1459 	    struct i915_vma *vma,
1460 	    const char *name,
1461 	    gfp_t gfp)
1462 {
1463 	if (!vma)
1464 		return next;
1465 
1466 	/*
1467 	 * If the vma isn't pinned, then the vma should be snapshotted
1468 	 * to a struct i915_vma_snapshot at command submission time.
1469 	 * Not here.
1470 	 */
1471 	if (GEM_WARN_ON(!i915_vma_is_pinned(vma)))
1472 		return next;
1473 
1474 	next = capture_vma_snapshot(next, vma->resource, gfp, name);
1475 
1476 	return next;
1477 }
1478 
1479 static struct intel_engine_capture_vma *
1480 capture_user(struct intel_engine_capture_vma *capture,
1481 	     const struct i915_request *rq,
1482 	     gfp_t gfp)
1483 {
1484 	struct i915_capture_list *c;
1485 
1486 	for (c = rq->capture_list; c; c = c->next)
1487 		capture = capture_vma_snapshot(capture, c->vma_res, gfp,
1488 					       "user");
1489 
1490 	return capture;
1491 }
1492 
1493 static void add_vma(struct intel_engine_coredump *ee,
1494 		    struct i915_vma_coredump *vma)
1495 {
1496 	if (vma) {
1497 		vma->next = ee->vma;
1498 		ee->vma = vma;
1499 	}
1500 }
1501 
1502 static struct i915_vma_coredump *
1503 create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma,
1504 		    const char *name, struct i915_vma_compress *compress)
1505 {
1506 	struct i915_vma_coredump *ret = NULL;
1507 	struct i915_vma_resource *vma_res;
1508 	bool lockdep_cookie;
1509 
1510 	if (!vma)
1511 		return NULL;
1512 
1513 	vma_res = vma->resource;
1514 
1515 	if (i915_vma_resource_hold(vma_res, &lockdep_cookie)) {
1516 		ret = i915_vma_coredump_create(gt, vma_res, compress, name);
1517 		i915_vma_resource_unhold(vma_res, lockdep_cookie);
1518 	}
1519 
1520 	return ret;
1521 }
1522 
1523 static void add_vma_coredump(struct intel_engine_coredump *ee,
1524 			     const struct intel_gt *gt,
1525 			     struct i915_vma *vma,
1526 			     const char *name,
1527 			     struct i915_vma_compress *compress)
1528 {
1529 	add_vma(ee, create_vma_coredump(gt, vma, name, compress));
1530 }
1531 
1532 struct intel_engine_coredump *
1533 intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags)
1534 {
1535 	struct intel_engine_coredump *ee;
1536 
1537 	ee = kzalloc(sizeof(*ee), gfp);
1538 	if (!ee)
1539 		return NULL;
1540 
1541 	ee->engine = engine;
1542 
1543 	if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) {
1544 		engine_record_registers(ee);
1545 		engine_record_execlists(ee);
1546 	}
1547 
1548 	return ee;
1549 }
1550 
1551 static struct intel_engine_capture_vma *
1552 engine_coredump_add_context(struct intel_engine_coredump *ee,
1553 			    struct intel_context *ce,
1554 			    gfp_t gfp)
1555 {
1556 	struct intel_engine_capture_vma *vma = NULL;
1557 
1558 	ee->simulated |= record_context(&ee->context, ce);
1559 	if (ee->simulated)
1560 		return NULL;
1561 
1562 	/*
1563 	 * We need to copy these to an anonymous buffer
1564 	 * as the simplest method to avoid being overwritten
1565 	 * by userspace.
1566 	 */
1567 	vma = capture_vma(vma, ce->ring->vma, "ring", gfp);
1568 	vma = capture_vma(vma, ce->state, "HW context", gfp);
1569 
1570 	return vma;
1571 }
1572 
1573 struct intel_engine_capture_vma *
1574 intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1575 				  struct i915_request *rq,
1576 				  gfp_t gfp)
1577 {
1578 	struct intel_engine_capture_vma *vma;
1579 
1580 	vma = engine_coredump_add_context(ee, rq->context, gfp);
1581 	if (!vma)
1582 		return NULL;
1583 
1584 	/*
1585 	 * We need to copy these to an anonymous buffer
1586 	 * as the simplest method to avoid being overwritten
1587 	 * by userspace.
1588 	 */
1589 	vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch");
1590 	vma = capture_user(vma, rq, gfp);
1591 
1592 	ee->rq_head = rq->head;
1593 	ee->rq_post = rq->postfix;
1594 	ee->rq_tail = rq->tail;
1595 
1596 	return vma;
1597 }
1598 
1599 void
1600 intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1601 			      struct intel_engine_capture_vma *capture,
1602 			      struct i915_vma_compress *compress)
1603 {
1604 	const struct intel_engine_cs *engine = ee->engine;
1605 
1606 	while (capture) {
1607 		struct intel_engine_capture_vma *this = capture;
1608 		struct i915_vma_resource *vma_res = this->vma_res;
1609 
1610 		add_vma(ee,
1611 			i915_vma_coredump_create(engine->gt, vma_res,
1612 						 compress, this->name));
1613 
1614 		i915_vma_resource_unhold(vma_res, this->lockdep_cookie);
1615 		i915_vma_resource_put(vma_res);
1616 
1617 		capture = this->next;
1618 		kfree(this);
1619 	}
1620 
1621 	add_vma_coredump(ee, engine->gt, engine->status_page.vma,
1622 			 "HW Status", compress);
1623 
1624 	add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma,
1625 			 "WA context", compress);
1626 }
1627 
1628 static struct intel_engine_coredump *
1629 capture_engine(struct intel_engine_cs *engine,
1630 	       struct i915_vma_compress *compress,
1631 	       u32 dump_flags)
1632 {
1633 	struct intel_engine_capture_vma *capture = NULL;
1634 	struct intel_engine_coredump *ee;
1635 	struct intel_context *ce = NULL;
1636 	struct i915_request *rq = NULL;
1637 
1638 	ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
1639 	if (!ee)
1640 		return NULL;
1641 
1642 	intel_engine_get_hung_entity(engine, &ce, &rq);
1643 	if (rq && !i915_request_started(rq))
1644 		drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
1645 			 engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
1646 
1647 	if (rq) {
1648 		capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
1649 		i915_request_put(rq);
1650 	} else if (ce) {
1651 		capture = engine_coredump_add_context(ee, ce, ATOMIC_MAYFAIL);
1652 	}
1653 
1654 	if (capture) {
1655 		intel_engine_coredump_add_vma(ee, capture, compress);
1656 
1657 		if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1658 			intel_guc_capture_get_matching_node(engine->gt, ee, ce);
1659 	} else {
1660 		kfree(ee);
1661 		ee = NULL;
1662 	}
1663 
1664 	return ee;
1665 }
1666 
1667 static void
1668 gt_record_engines(struct intel_gt_coredump *gt,
1669 		  intel_engine_mask_t engine_mask,
1670 		  struct i915_vma_compress *compress,
1671 		  u32 dump_flags)
1672 {
1673 	struct intel_engine_cs *engine;
1674 	enum intel_engine_id id;
1675 
1676 	for_each_engine(engine, gt->_gt, id) {
1677 		struct intel_engine_coredump *ee;
1678 
1679 		/* Refill our page pool before entering atomic section */
1680 		pool_refill(&compress->pool, ALLOW_FAIL);
1681 
1682 		ee = capture_engine(engine, compress, dump_flags);
1683 		if (!ee)
1684 			continue;
1685 
1686 		ee->hung = engine->mask & engine_mask;
1687 
1688 		gt->simulated |= ee->simulated;
1689 		if (ee->simulated) {
1690 			if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1691 				intel_guc_capture_free_node(ee);
1692 			kfree(ee);
1693 			continue;
1694 		}
1695 
1696 		ee->next = gt->engine;
1697 		gt->engine = ee;
1698 	}
1699 }
1700 
1701 static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
1702 			      const struct intel_guc_ct_buffer *ctb,
1703 			      const void *blob_ptr, struct intel_guc *guc)
1704 {
1705 	if (!ctb || !ctb->desc)
1706 		return;
1707 
1708 	saved->raw_status = ctb->desc->status;
1709 	saved->raw_head = ctb->desc->head;
1710 	saved->raw_tail = ctb->desc->tail;
1711 	saved->head = ctb->head;
1712 	saved->tail = ctb->tail;
1713 	saved->size = ctb->size;
1714 	saved->desc_offset = ((void *)ctb->desc) - blob_ptr;
1715 	saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
1716 }
1717 
1718 static struct intel_uc_coredump *
1719 gt_record_uc(struct intel_gt_coredump *gt,
1720 	     struct i915_vma_compress *compress)
1721 {
1722 	const struct intel_uc *uc = &gt->_gt->uc;
1723 	struct intel_uc_coredump *error_uc;
1724 
1725 	error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
1726 	if (!error_uc)
1727 		return NULL;
1728 
1729 	memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
1730 	memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
1731 
1732 	error_uc->guc_fw.file_selected.path = kstrdup(uc->guc.fw.file_selected.path, ALLOW_FAIL);
1733 	error_uc->huc_fw.file_selected.path = kstrdup(uc->huc.fw.file_selected.path, ALLOW_FAIL);
1734 	error_uc->guc_fw.file_wanted.path = kstrdup(uc->guc.fw.file_wanted.path, ALLOW_FAIL);
1735 	error_uc->huc_fw.file_wanted.path = kstrdup(uc->huc.fw.file_wanted.path, ALLOW_FAIL);
1736 
1737 	/*
1738 	 * Save the GuC log and include a timestamp reference for converting the
1739 	 * log times to system times (in conjunction with the error->boottime and
1740 	 * gt->clock_frequency fields saved elsewhere).
1741 	 */
1742 	error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP);
1743 	error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
1744 						    "GuC log buffer", compress);
1745 	error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma,
1746 						    "GuC CT buffer", compress);
1747 	error_uc->guc.last_fence = uc->guc.ct.requests.last_fence;
1748 	gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send,
1749 			  uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1750 	gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
1751 			  uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1752 
1753 	return error_uc;
1754 }
1755 
1756 /* Capture display registers. */
1757 static void gt_record_display_regs(struct intel_gt_coredump *gt)
1758 {
1759 	struct intel_uncore *uncore = gt->_gt->uncore;
1760 	struct drm_i915_private *i915 = uncore->i915;
1761 
1762 	if (DISPLAY_VER(i915) >= 6 && DISPLAY_VER(i915) < 20)
1763 		gt->derrmr = intel_uncore_read(uncore, DERRMR);
1764 
1765 	if (GRAPHICS_VER(i915) >= 8)
1766 		gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1767 	else if (IS_VALLEYVIEW(i915))
1768 		gt->ier = intel_uncore_read(uncore, VLV_IER);
1769 	else if (HAS_PCH_SPLIT(i915))
1770 		gt->ier = intel_uncore_read(uncore, DEIER);
1771 	else if (GRAPHICS_VER(i915) == 2)
1772 		gt->ier = intel_uncore_read16(uncore, GEN2_IER);
1773 	else
1774 		gt->ier = intel_uncore_read(uncore, GEN2_IER);
1775 }
1776 
1777 /* Capture all other registers that GuC doesn't capture. */
1778 static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
1779 {
1780 	struct intel_uncore *uncore = gt->_gt->uncore;
1781 	struct drm_i915_private *i915 = uncore->i915;
1782 	int i;
1783 
1784 	if (IS_VALLEYVIEW(i915)) {
1785 		gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1786 		gt->ngtier = 1;
1787 	} else if (GRAPHICS_VER(i915) >= 11) {
1788 		gt->gtier[0] =
1789 			intel_uncore_read(uncore,
1790 					  GEN11_RENDER_COPY_INTR_ENABLE);
1791 		gt->gtier[1] =
1792 			intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
1793 		gt->gtier[2] =
1794 			intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
1795 		gt->gtier[3] =
1796 			intel_uncore_read(uncore,
1797 					  GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1798 		gt->gtier[4] =
1799 			intel_uncore_read(uncore,
1800 					  GEN11_CRYPTO_RSVD_INTR_ENABLE);
1801 		gt->gtier[5] =
1802 			intel_uncore_read(uncore,
1803 					  GEN11_GUNIT_CSME_INTR_ENABLE);
1804 		gt->ngtier = 6;
1805 	} else if (GRAPHICS_VER(i915) >= 8) {
1806 		for (i = 0; i < 4; i++)
1807 			gt->gtier[i] =
1808 				intel_uncore_read(uncore, GEN8_GT_IER(i));
1809 		gt->ngtier = 4;
1810 	} else if (HAS_PCH_SPLIT(i915)) {
1811 		gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1812 		gt->ngtier = 1;
1813 	}
1814 
1815 	gt->eir = intel_uncore_read(uncore, EIR);
1816 	gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
1817 }
1818 
1819 /*
1820  * Capture all registers that relate to workload submission.
1821  * NOTE: In GuC submission, when GuC resets an engine, it can dump these for us
1822  */
1823 static void gt_record_global_regs(struct intel_gt_coredump *gt)
1824 {
1825 	struct intel_uncore *uncore = gt->_gt->uncore;
1826 	struct drm_i915_private *i915 = uncore->i915;
1827 	int i;
1828 
1829 	/*
1830 	 * General organization
1831 	 * 1. Registers specific to a single generation
1832 	 * 2. Registers which belong to multiple generations
1833 	 * 3. Feature specific registers.
1834 	 * 4. Everything else
1835 	 * Please try to follow the order.
1836 	 */
1837 
1838 	/* 1: Registers specific to a single generation */
1839 	if (IS_VALLEYVIEW(i915))
1840 		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
1841 
1842 	if (GRAPHICS_VER(i915) == 7)
1843 		gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
1844 
1845 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
1846 		gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
1847 							XEHP_FAULT_TLB_DATA0);
1848 		gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
1849 							XEHP_FAULT_TLB_DATA1);
1850 	} else if (GRAPHICS_VER(i915) >= 12) {
1851 		gt->fault_data0 = intel_uncore_read(uncore,
1852 						    GEN12_FAULT_TLB_DATA0);
1853 		gt->fault_data1 = intel_uncore_read(uncore,
1854 						    GEN12_FAULT_TLB_DATA1);
1855 	} else if (GRAPHICS_VER(i915) >= 8) {
1856 		gt->fault_data0 = intel_uncore_read(uncore,
1857 						    GEN8_FAULT_TLB_DATA0);
1858 		gt->fault_data1 = intel_uncore_read(uncore,
1859 						    GEN8_FAULT_TLB_DATA1);
1860 	}
1861 
1862 	if (GRAPHICS_VER(i915) == 6) {
1863 		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
1864 		gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
1865 		gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
1866 	}
1867 
1868 	/* 2: Registers which belong to multiple generations */
1869 	if (GRAPHICS_VER(i915) >= 7)
1870 		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
1871 
1872 	if (GRAPHICS_VER(i915) >= 6) {
1873 		if (GRAPHICS_VER(i915) < 12) {
1874 			gt->error = intel_uncore_read(uncore, ERROR_GEN6);
1875 			gt->done_reg = intel_uncore_read(uncore, DONE_REG);
1876 		}
1877 	}
1878 
1879 	/* 3: Feature specific registers */
1880 	if (IS_GRAPHICS_VER(i915, 6, 7)) {
1881 		gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1882 		gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
1883 	}
1884 
1885 	if (IS_GRAPHICS_VER(i915, 8, 11))
1886 		gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
1887 
1888 	if (GRAPHICS_VER(i915) == 12)
1889 		gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
1890 
1891 	if (GRAPHICS_VER(i915) >= 12) {
1892 		for (i = 0; i < I915_MAX_SFC; i++) {
1893 			/*
1894 			 * SFC_DONE resides in the VD forcewake domain, so it
1895 			 * only exists if the corresponding VCS engine is
1896 			 * present.
1897 			 */
1898 			if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
1899 			    !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
1900 				continue;
1901 
1902 			gt->sfc_done[i] =
1903 				intel_uncore_read(uncore, GEN12_SFC_DONE(i));
1904 		}
1905 
1906 		gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
1907 	}
1908 }
1909 
1910 static void gt_record_info(struct intel_gt_coredump *gt)
1911 {
1912 	memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info));
1913 	gt->clock_frequency = gt->_gt->clock_frequency;
1914 	gt->clock_period_ns = gt->_gt->clock_period_ns;
1915 }
1916 
1917 /*
1918  * Generate a semi-unique error code. The code is not meant to have meaning, The
1919  * code's only purpose is to try to prevent false duplicated bug reports by
1920  * grossly estimating a GPU error state.
1921  *
1922  * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1923  * the hang if we could strip the GTT offset information from it.
1924  *
1925  * It's only a small step better than a random number in its current form.
1926  */
1927 static u32 generate_ecode(const struct intel_engine_coredump *ee)
1928 {
1929 	/*
1930 	 * IPEHR would be an ideal way to detect errors, as it's the gross
1931 	 * measure of "the command that hung." However, has some very common
1932 	 * synchronization commands which almost always appear in the case
1933 	 * strictly a client bug. Use instdone to differentiate those some.
1934 	 */
1935 	return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1936 }
1937 
1938 static const char *error_msg(struct i915_gpu_coredump *error)
1939 {
1940 	struct intel_engine_coredump *first = NULL;
1941 	unsigned int hung_classes = 0;
1942 	struct intel_gt_coredump *gt;
1943 	int len;
1944 
1945 	for (gt = error->gt; gt; gt = gt->next) {
1946 		struct intel_engine_coredump *cs;
1947 
1948 		for (cs = gt->engine; cs; cs = cs->next) {
1949 			if (cs->hung) {
1950 				hung_classes |= BIT(cs->engine->uabi_class);
1951 				if (!first)
1952 					first = cs;
1953 			}
1954 		}
1955 	}
1956 
1957 	len = scnprintf(error->error_msg, sizeof(error->error_msg),
1958 			"GPU HANG: ecode %d:%x:%08x",
1959 			GRAPHICS_VER(error->i915), hung_classes,
1960 			generate_ecode(first));
1961 	if (first && first->context.pid) {
1962 		/* Just show the first executing process, more is confusing */
1963 		len += scnprintf(error->error_msg + len,
1964 				 sizeof(error->error_msg) - len,
1965 				 ", in %s [%d]",
1966 				 first->context.comm, first->context.pid);
1967 	}
1968 
1969 	return error->error_msg;
1970 }
1971 
1972 static void capture_gen(struct i915_gpu_coredump *error)
1973 {
1974 	struct drm_i915_private *i915 = error->i915;
1975 
1976 	error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1977 	error->suspended = pm_runtime_suspended(i915->drm.dev);
1978 
1979 	error->iommu = i915_vtd_active(i915);
1980 	error->reset_count = i915_reset_count(&i915->gpu_error);
1981 	error->suspend_count = i915->suspend_count;
1982 
1983 	i915_params_copy(&error->params, &i915->params);
1984 	intel_display_params_copy(&error->display_params);
1985 	memcpy(&error->device_info,
1986 	       INTEL_INFO(i915),
1987 	       sizeof(error->device_info));
1988 	memcpy(&error->runtime_info,
1989 	       RUNTIME_INFO(i915),
1990 	       sizeof(error->runtime_info));
1991 	memcpy(&error->display_device_info, DISPLAY_INFO(i915),
1992 	       sizeof(error->display_device_info));
1993 	memcpy(&error->display_runtime_info, DISPLAY_RUNTIME_INFO(i915),
1994 	       sizeof(error->display_runtime_info));
1995 	error->driver_caps = i915->caps;
1996 }
1997 
1998 struct i915_gpu_coredump *
1999 i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
2000 {
2001 	struct i915_gpu_coredump *error;
2002 
2003 	if (!i915->params.error_capture)
2004 		return NULL;
2005 
2006 	error = kzalloc(sizeof(*error), gfp);
2007 	if (!error)
2008 		return NULL;
2009 
2010 	kref_init(&error->ref);
2011 	error->i915 = i915;
2012 
2013 	error->time = ktime_get_real();
2014 	error->boottime = ktime_get_boottime();
2015 	error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time);
2016 	error->capture = jiffies;
2017 
2018 	capture_gen(error);
2019 
2020 	return error;
2021 }
2022 
2023 #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
2024 
2025 struct intel_gt_coredump *
2026 intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
2027 {
2028 	struct intel_gt_coredump *gc;
2029 
2030 	gc = kzalloc(sizeof(*gc), gfp);
2031 	if (!gc)
2032 		return NULL;
2033 
2034 	gc->_gt = gt;
2035 	gc->awake = intel_gt_pm_is_awake(gt);
2036 
2037 	gt_record_display_regs(gc);
2038 	gt_record_global_nonguc_regs(gc);
2039 
2040 	/*
2041 	 * GuC dumps global, eng-class and eng-instance registers
2042 	 * (that can change as part of engine state during execution)
2043 	 * before an engine is reset due to a hung context.
2044 	 * GuC captures and reports all three groups of registers
2045 	 * together as a single set before the engine is reset.
2046 	 * Thus, if GuC triggered the context reset we retrieve
2047 	 * the register values as part of gt_record_engines.
2048 	 */
2049 	if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE))
2050 		gt_record_global_regs(gc);
2051 
2052 	gt_record_fences(gc);
2053 
2054 	return gc;
2055 }
2056 
2057 struct i915_vma_compress *
2058 i915_vma_capture_prepare(struct intel_gt_coredump *gt)
2059 {
2060 	struct i915_vma_compress *compress;
2061 
2062 	compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
2063 	if (!compress)
2064 		return NULL;
2065 
2066 	if (!compress_init(compress)) {
2067 		kfree(compress);
2068 		return NULL;
2069 	}
2070 
2071 	return compress;
2072 }
2073 
2074 void i915_vma_capture_finish(struct intel_gt_coredump *gt,
2075 			     struct i915_vma_compress *compress)
2076 {
2077 	if (!compress)
2078 		return;
2079 
2080 	compress_fini(compress);
2081 	kfree(compress);
2082 }
2083 
2084 static struct i915_gpu_coredump *
2085 __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2086 {
2087 	struct drm_i915_private *i915 = gt->i915;
2088 	struct i915_gpu_coredump *error;
2089 
2090 	/* Check if GPU capture has been disabled */
2091 	error = READ_ONCE(i915->gpu_error.first_error);
2092 	if (IS_ERR(error))
2093 		return error;
2094 
2095 	error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
2096 	if (!error)
2097 		return ERR_PTR(-ENOMEM);
2098 
2099 	error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL, dump_flags);
2100 	if (error->gt) {
2101 		struct i915_vma_compress *compress;
2102 
2103 		compress = i915_vma_capture_prepare(error->gt);
2104 		if (!compress) {
2105 			kfree(error->gt);
2106 			kfree(error);
2107 			return ERR_PTR(-ENOMEM);
2108 		}
2109 
2110 		if (INTEL_INFO(i915)->has_gt_uc) {
2111 			error->gt->uc = gt_record_uc(error->gt, compress);
2112 			if (error->gt->uc) {
2113 				if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
2114 					error->gt->uc->guc.is_guc_capture = true;
2115 				else
2116 					GEM_BUG_ON(error->gt->uc->guc.is_guc_capture);
2117 			}
2118 		}
2119 
2120 		gt_record_info(error->gt);
2121 		gt_record_engines(error->gt, engine_mask, compress, dump_flags);
2122 
2123 
2124 		i915_vma_capture_finish(error->gt, compress);
2125 
2126 		error->simulated |= error->gt->simulated;
2127 	}
2128 
2129 	error->overlay = intel_overlay_capture_error_state(i915);
2130 
2131 	return error;
2132 }
2133 
2134 struct i915_gpu_coredump *
2135 i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2136 {
2137 	static DEFINE_MUTEX(capture_mutex);
2138 	int ret = mutex_lock_interruptible(&capture_mutex);
2139 	struct i915_gpu_coredump *dump;
2140 
2141 	if (ret)
2142 		return ERR_PTR(ret);
2143 
2144 	dump = __i915_gpu_coredump(gt, engine_mask, dump_flags);
2145 	mutex_unlock(&capture_mutex);
2146 
2147 	return dump;
2148 }
2149 
2150 void i915_error_state_store(struct i915_gpu_coredump *error)
2151 {
2152 	struct drm_i915_private *i915;
2153 	static bool warned;
2154 
2155 	if (IS_ERR_OR_NULL(error))
2156 		return;
2157 
2158 	i915 = error->i915;
2159 	drm_info(&i915->drm, "%s\n", error_msg(error));
2160 
2161 	if (error->simulated ||
2162 	    cmpxchg(&i915->gpu_error.first_error, NULL, error))
2163 		return;
2164 
2165 	i915_gpu_coredump_get(error);
2166 
2167 	if (!xchg(&warned, true) &&
2168 	    ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
2169 		pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
2170 		pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
2171 		pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
2172 		pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
2173 		pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
2174 		pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
2175 			i915->drm.primary->index);
2176 	}
2177 }
2178 
2179 /**
2180  * i915_capture_error_state - capture an error record for later analysis
2181  * @gt: intel_gt which originated the hang
2182  * @engine_mask: hung engines
2183  * @dump_flags: dump flags
2184  *
2185  * Should be called when an error is detected (either a hang or an error
2186  * interrupt) to capture error state from the time of the error.  Fills
2187  * out a structure which becomes available in debugfs for user level tools
2188  * to pick up.
2189  */
2190 void i915_capture_error_state(struct intel_gt *gt,
2191 			      intel_engine_mask_t engine_mask, u32 dump_flags)
2192 {
2193 	struct i915_gpu_coredump *error;
2194 
2195 	error = i915_gpu_coredump(gt, engine_mask, dump_flags);
2196 	if (IS_ERR(error)) {
2197 		cmpxchg(&gt->i915->gpu_error.first_error, NULL, error);
2198 		return;
2199 	}
2200 
2201 	i915_error_state_store(error);
2202 	i915_gpu_coredump_put(error);
2203 }
2204 
2205 struct i915_gpu_coredump *
2206 i915_first_error_state(struct drm_i915_private *i915)
2207 {
2208 	struct i915_gpu_coredump *error;
2209 
2210 	spin_lock_irq(&i915->gpu_error.lock);
2211 	error = i915->gpu_error.first_error;
2212 	if (!IS_ERR_OR_NULL(error))
2213 		i915_gpu_coredump_get(error);
2214 	spin_unlock_irq(&i915->gpu_error.lock);
2215 
2216 	return error;
2217 }
2218 
2219 void i915_reset_error_state(struct drm_i915_private *i915)
2220 {
2221 	struct i915_gpu_coredump *error;
2222 
2223 	spin_lock_irq(&i915->gpu_error.lock);
2224 	error = i915->gpu_error.first_error;
2225 	if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
2226 		i915->gpu_error.first_error = NULL;
2227 	spin_unlock_irq(&i915->gpu_error.lock);
2228 
2229 	if (!IS_ERR_OR_NULL(error))
2230 		i915_gpu_coredump_put(error);
2231 }
2232 
2233 void i915_disable_error_state(struct drm_i915_private *i915, int err)
2234 {
2235 	spin_lock_irq(&i915->gpu_error.lock);
2236 	if (!i915->gpu_error.first_error)
2237 		i915->gpu_error.first_error = ERR_PTR(err);
2238 	spin_unlock_irq(&i915->gpu_error.lock);
2239 }
2240 
2241 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
2242 void intel_klog_error_capture(struct intel_gt *gt,
2243 			      intel_engine_mask_t engine_mask)
2244 {
2245 	static int g_count;
2246 	struct drm_i915_private *i915 = gt->i915;
2247 	struct i915_gpu_coredump *error;
2248 	intel_wakeref_t wakeref;
2249 	size_t buf_size = PAGE_SIZE * 128;
2250 	size_t pos_err;
2251 	char *buf, *ptr, *next;
2252 	int l_count = g_count++;
2253 	int line = 0;
2254 
2255 	/* Can't allocate memory during a reset */
2256 	if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
2257 		drm_err(&gt->i915->drm, "[Capture/%d.%d] Inside GT reset, skipping error capture :(\n",
2258 			l_count, line++);
2259 		return;
2260 	}
2261 
2262 	error = READ_ONCE(i915->gpu_error.first_error);
2263 	if (error) {
2264 		drm_err(&i915->drm, "[Capture/%d.%d] Clearing existing error capture first...\n",
2265 			l_count, line++);
2266 		i915_reset_error_state(i915);
2267 	}
2268 
2269 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2270 		error = i915_gpu_coredump(gt, engine_mask, CORE_DUMP_FLAG_NONE);
2271 
2272 	if (IS_ERR(error)) {
2273 		drm_err(&i915->drm, "[Capture/%d.%d] Failed to capture error capture: %ld!\n",
2274 			l_count, line++, PTR_ERR(error));
2275 		return;
2276 	}
2277 
2278 	buf = kvmalloc(buf_size, GFP_KERNEL);
2279 	if (!buf) {
2280 		drm_err(&i915->drm, "[Capture/%d.%d] Failed to allocate buffer for error capture!\n",
2281 			l_count, line++);
2282 		i915_gpu_coredump_put(error);
2283 		return;
2284 	}
2285 
2286 	drm_info(&i915->drm, "[Capture/%d.%d] Dumping i915 error capture for %ps...\n",
2287 		 l_count, line++, __builtin_return_address(0));
2288 
2289 	/* Largest string length safe to print via dmesg */
2290 #	define MAX_CHUNK	800
2291 
2292 	pos_err = 0;
2293 	while (1) {
2294 		ssize_t got = i915_gpu_coredump_copy_to_buffer(error, buf, pos_err, buf_size - 1);
2295 
2296 		if (got <= 0)
2297 			break;
2298 
2299 		buf[got] = 0;
2300 		pos_err += got;
2301 
2302 		ptr = buf;
2303 		while (got > 0) {
2304 			size_t count;
2305 			char tag[2];
2306 
2307 			next = strnchr(ptr, got, '\n');
2308 			if (next) {
2309 				count = next - ptr;
2310 				*next = 0;
2311 				tag[0] = '>';
2312 				tag[1] = '<';
2313 			} else {
2314 				count = got;
2315 				tag[0] = '}';
2316 				tag[1] = '{';
2317 			}
2318 
2319 			if (count > MAX_CHUNK) {
2320 				size_t pos;
2321 				char *ptr2 = ptr;
2322 
2323 				for (pos = MAX_CHUNK; pos < count; pos += MAX_CHUNK) {
2324 					char chr = ptr[pos];
2325 
2326 					ptr[pos] = 0;
2327 					drm_info(&i915->drm, "[Capture/%d.%d] }%s{\n",
2328 						 l_count, line++, ptr2);
2329 					ptr[pos] = chr;
2330 					ptr2 = ptr + pos;
2331 
2332 					/*
2333 					 * If spewing large amounts of data via a serial console,
2334 					 * this can be a very slow process. So be friendly and try
2335 					 * not to cause 'softlockup on CPU' problems.
2336 					 */
2337 					cond_resched();
2338 				}
2339 
2340 				if (ptr2 < (ptr + count))
2341 					drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n",
2342 						 l_count, line++, tag[0], ptr2, tag[1]);
2343 				else if (tag[0] == '>')
2344 					drm_info(&i915->drm, "[Capture/%d.%d] ><\n",
2345 						 l_count, line++);
2346 			} else {
2347 				drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n",
2348 					 l_count, line++, tag[0], ptr, tag[1]);
2349 			}
2350 
2351 			ptr = next;
2352 			got -= count;
2353 			if (next) {
2354 				ptr++;
2355 				got--;
2356 			}
2357 
2358 			/* As above. */
2359 			cond_resched();
2360 		}
2361 
2362 		if (got)
2363 			drm_info(&i915->drm, "[Capture/%d.%d] Got %zd bytes remaining!\n",
2364 				 l_count, line++, got);
2365 	}
2366 
2367 	kvfree(buf);
2368 
2369 	drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err);
2370 }
2371 #endif
2372