xref: /linux/drivers/gpu/drm/i915/i915_gpu_error.c (revision 2efb81e5)
1 /*
2  * Copyright (c) 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *    Mika Kuoppala <mika.kuoppala@intel.com>
27  *
28  */
29 
30 #include <linux/ascii85.h>
31 #include <linux/highmem.h>
32 #include <linux/nmi.h>
33 #include <linux/pagevec.h>
34 #include <linux/scatterlist.h>
35 #include <linux/string_helpers.h>
36 #include <linux/utsname.h>
37 #include <linux/zlib.h>
38 
39 #include <drm/drm_cache.h>
40 #include <drm/drm_print.h>
41 
42 #include "display/intel_dmc.h"
43 #include "display/intel_overlay.h"
44 
45 #include "gem/i915_gem_context.h"
46 #include "gem/i915_gem_lmem.h"
47 #include "gt/intel_engine_regs.h"
48 #include "gt/intel_gt.h"
49 #include "gt/intel_gt_mcr.h"
50 #include "gt/intel_gt_pm.h"
51 #include "gt/intel_gt_regs.h"
52 #include "gt/uc/intel_guc_capture.h"
53 
54 #include "i915_driver.h"
55 #include "i915_drv.h"
56 #include "i915_gpu_error.h"
57 #include "i915_memcpy.h"
58 #include "i915_reg.h"
59 #include "i915_scatterlist.h"
60 #include "i915_utils.h"
61 
62 #define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
63 #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
64 
65 static void __sg_set_buf(struct scatterlist *sg,
66 			 void *addr, unsigned int len, loff_t it)
67 {
68 	sg->page_link = (unsigned long)virt_to_page(addr);
69 	sg->offset = offset_in_page(addr);
70 	sg->length = len;
71 	sg->dma_address = it;
72 }
73 
74 static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
75 {
76 	if (!len)
77 		return false;
78 
79 	if (e->bytes + len + 1 <= e->size)
80 		return true;
81 
82 	if (e->bytes) {
83 		__sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
84 		e->iter += e->bytes;
85 		e->buf = NULL;
86 		e->bytes = 0;
87 	}
88 
89 	if (e->cur == e->end) {
90 		struct scatterlist *sgl;
91 
92 		sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
93 		if (!sgl) {
94 			e->err = -ENOMEM;
95 			return false;
96 		}
97 
98 		if (e->cur) {
99 			e->cur->offset = 0;
100 			e->cur->length = 0;
101 			e->cur->page_link =
102 				(unsigned long)sgl | SG_CHAIN;
103 		} else {
104 			e->sgl = sgl;
105 		}
106 
107 		e->cur = sgl;
108 		e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
109 	}
110 
111 	e->size = ALIGN(len + 1, SZ_64K);
112 	e->buf = kmalloc(e->size, ALLOW_FAIL);
113 	if (!e->buf) {
114 		e->size = PAGE_ALIGN(len + 1);
115 		e->buf = kmalloc(e->size, GFP_KERNEL);
116 	}
117 	if (!e->buf) {
118 		e->err = -ENOMEM;
119 		return false;
120 	}
121 
122 	return true;
123 }
124 
125 __printf(2, 0)
126 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
127 			       const char *fmt, va_list args)
128 {
129 	va_list ap;
130 	int len;
131 
132 	if (e->err)
133 		return;
134 
135 	va_copy(ap, args);
136 	len = vsnprintf(NULL, 0, fmt, ap);
137 	va_end(ap);
138 	if (len <= 0) {
139 		e->err = len;
140 		return;
141 	}
142 
143 	if (!__i915_error_grow(e, len))
144 		return;
145 
146 	GEM_BUG_ON(e->bytes >= e->size);
147 	len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
148 	if (len < 0) {
149 		e->err = len;
150 		return;
151 	}
152 	e->bytes += len;
153 }
154 
155 static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
156 {
157 	unsigned len;
158 
159 	if (e->err || !str)
160 		return;
161 
162 	len = strlen(str);
163 	if (!__i915_error_grow(e, len))
164 		return;
165 
166 	GEM_BUG_ON(e->bytes + len > e->size);
167 	memcpy(e->buf + e->bytes, str, len);
168 	e->bytes += len;
169 }
170 
171 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
172 #define err_puts(e, s) i915_error_puts(e, s)
173 
174 static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
175 {
176 	i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
177 }
178 
179 static inline struct drm_printer
180 i915_error_printer(struct drm_i915_error_state_buf *e)
181 {
182 	struct drm_printer p = {
183 		.printfn = __i915_printfn_error,
184 		.arg = e,
185 	};
186 	return p;
187 }
188 
189 /* single threaded page allocator with a reserved stash for emergencies */
190 static void pool_fini(struct folio_batch *fbatch)
191 {
192 	folio_batch_release(fbatch);
193 }
194 
195 static int pool_refill(struct folio_batch *fbatch, gfp_t gfp)
196 {
197 	while (folio_batch_space(fbatch)) {
198 		struct folio *folio;
199 
200 		folio = folio_alloc(gfp, 0);
201 		if (!folio)
202 			return -ENOMEM;
203 
204 		folio_batch_add(fbatch, folio);
205 	}
206 
207 	return 0;
208 }
209 
210 static int pool_init(struct folio_batch *fbatch, gfp_t gfp)
211 {
212 	int err;
213 
214 	folio_batch_init(fbatch);
215 
216 	err = pool_refill(fbatch, gfp);
217 	if (err)
218 		pool_fini(fbatch);
219 
220 	return err;
221 }
222 
223 static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp)
224 {
225 	struct folio *folio;
226 
227 	folio = folio_alloc(gfp, 0);
228 	if (!folio && folio_batch_count(fbatch))
229 		folio = fbatch->folios[--fbatch->nr];
230 
231 	return folio ? folio_address(folio) : NULL;
232 }
233 
234 static void pool_free(struct folio_batch *fbatch, void *addr)
235 {
236 	struct folio *folio = virt_to_folio(addr);
237 
238 	if (folio_batch_space(fbatch))
239 		folio_batch_add(fbatch, folio);
240 	else
241 		folio_put(folio);
242 }
243 
244 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
245 
246 struct i915_vma_compress {
247 	struct folio_batch pool;
248 	struct z_stream_s zstream;
249 	void *tmp;
250 };
251 
252 static bool compress_init(struct i915_vma_compress *c)
253 {
254 	struct z_stream_s *zstream = &c->zstream;
255 
256 	if (pool_init(&c->pool, ALLOW_FAIL))
257 		return false;
258 
259 	zstream->workspace =
260 		kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
261 			ALLOW_FAIL);
262 	if (!zstream->workspace) {
263 		pool_fini(&c->pool);
264 		return false;
265 	}
266 
267 	c->tmp = NULL;
268 	if (i915_has_memcpy_from_wc())
269 		c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
270 
271 	return true;
272 }
273 
274 static bool compress_start(struct i915_vma_compress *c)
275 {
276 	struct z_stream_s *zstream = &c->zstream;
277 	void *workspace = zstream->workspace;
278 
279 	memset(zstream, 0, sizeof(*zstream));
280 	zstream->workspace = workspace;
281 
282 	return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
283 }
284 
285 static void *compress_next_page(struct i915_vma_compress *c,
286 				struct i915_vma_coredump *dst)
287 {
288 	void *page_addr;
289 	struct page *page;
290 
291 	page_addr = pool_alloc(&c->pool, ALLOW_FAIL);
292 	if (!page_addr)
293 		return ERR_PTR(-ENOMEM);
294 
295 	page = virt_to_page(page_addr);
296 	list_add_tail(&page->lru, &dst->page_list);
297 	return page_addr;
298 }
299 
300 static int compress_page(struct i915_vma_compress *c,
301 			 void *src,
302 			 struct i915_vma_coredump *dst,
303 			 bool wc)
304 {
305 	struct z_stream_s *zstream = &c->zstream;
306 
307 	zstream->next_in = src;
308 	if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
309 		zstream->next_in = c->tmp;
310 	zstream->avail_in = PAGE_SIZE;
311 
312 	do {
313 		if (zstream->avail_out == 0) {
314 			zstream->next_out = compress_next_page(c, dst);
315 			if (IS_ERR(zstream->next_out))
316 				return PTR_ERR(zstream->next_out);
317 
318 			zstream->avail_out = PAGE_SIZE;
319 		}
320 
321 		if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
322 			return -EIO;
323 
324 		cond_resched();
325 	} while (zstream->avail_in);
326 
327 	/* Fallback to uncompressed if we increase size? */
328 	if (0 && zstream->total_out > zstream->total_in)
329 		return -E2BIG;
330 
331 	return 0;
332 }
333 
334 static int compress_flush(struct i915_vma_compress *c,
335 			  struct i915_vma_coredump *dst)
336 {
337 	struct z_stream_s *zstream = &c->zstream;
338 
339 	do {
340 		switch (zlib_deflate(zstream, Z_FINISH)) {
341 		case Z_OK: /* more space requested */
342 			zstream->next_out = compress_next_page(c, dst);
343 			if (IS_ERR(zstream->next_out))
344 				return PTR_ERR(zstream->next_out);
345 
346 			zstream->avail_out = PAGE_SIZE;
347 			break;
348 
349 		case Z_STREAM_END:
350 			goto end;
351 
352 		default: /* any error */
353 			return -EIO;
354 		}
355 	} while (1);
356 
357 end:
358 	memset(zstream->next_out, 0, zstream->avail_out);
359 	dst->unused = zstream->avail_out;
360 	return 0;
361 }
362 
363 static void compress_finish(struct i915_vma_compress *c)
364 {
365 	zlib_deflateEnd(&c->zstream);
366 }
367 
368 static void compress_fini(struct i915_vma_compress *c)
369 {
370 	kfree(c->zstream.workspace);
371 	if (c->tmp)
372 		pool_free(&c->pool, c->tmp);
373 	pool_fini(&c->pool);
374 }
375 
376 static void err_compression_marker(struct drm_i915_error_state_buf *m)
377 {
378 	err_puts(m, ":");
379 }
380 
381 #else
382 
383 struct i915_vma_compress {
384 	struct folio_batch pool;
385 };
386 
387 static bool compress_init(struct i915_vma_compress *c)
388 {
389 	return pool_init(&c->pool, ALLOW_FAIL) == 0;
390 }
391 
392 static bool compress_start(struct i915_vma_compress *c)
393 {
394 	return true;
395 }
396 
397 static int compress_page(struct i915_vma_compress *c,
398 			 void *src,
399 			 struct i915_vma_coredump *dst,
400 			 bool wc)
401 {
402 	void *ptr;
403 
404 	ptr = pool_alloc(&c->pool, ALLOW_FAIL);
405 	if (!ptr)
406 		return -ENOMEM;
407 
408 	if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
409 		memcpy(ptr, src, PAGE_SIZE);
410 	list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list);
411 	cond_resched();
412 
413 	return 0;
414 }
415 
416 static int compress_flush(struct i915_vma_compress *c,
417 			  struct i915_vma_coredump *dst)
418 {
419 	return 0;
420 }
421 
422 static void compress_finish(struct i915_vma_compress *c)
423 {
424 }
425 
426 static void compress_fini(struct i915_vma_compress *c)
427 {
428 	pool_fini(&c->pool);
429 }
430 
431 static void err_compression_marker(struct drm_i915_error_state_buf *m)
432 {
433 	err_puts(m, "~");
434 }
435 
436 #endif
437 
438 static void error_print_instdone(struct drm_i915_error_state_buf *m,
439 				 const struct intel_engine_coredump *ee)
440 {
441 	int slice;
442 	int subslice;
443 	int iter;
444 
445 	err_printf(m, "  INSTDONE: 0x%08x\n",
446 		   ee->instdone.instdone);
447 
448 	if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
449 		return;
450 
451 	err_printf(m, "  SC_INSTDONE: 0x%08x\n",
452 		   ee->instdone.slice_common);
453 
454 	if (GRAPHICS_VER(m->i915) <= 6)
455 		return;
456 
457 	for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
458 		err_printf(m, "  SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
459 			   slice, subslice,
460 			   ee->instdone.sampler[slice][subslice]);
461 
462 	for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
463 		err_printf(m, "  ROW_INSTDONE[%d][%d]: 0x%08x\n",
464 			   slice, subslice,
465 			   ee->instdone.row[slice][subslice]);
466 
467 	if (GRAPHICS_VER(m->i915) < 12)
468 		return;
469 
470 	if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) {
471 		for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
472 			err_printf(m, "  GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n",
473 				   slice, subslice,
474 				   ee->instdone.geom_svg[slice][subslice]);
475 	}
476 
477 	err_printf(m, "  SC_INSTDONE_EXTRA: 0x%08x\n",
478 		   ee->instdone.slice_common_extra[0]);
479 	err_printf(m, "  SC_INSTDONE_EXTRA2: 0x%08x\n",
480 		   ee->instdone.slice_common_extra[1]);
481 }
482 
483 static void error_print_request(struct drm_i915_error_state_buf *m,
484 				const char *prefix,
485 				const struct i915_request_coredump *erq)
486 {
487 	if (!erq->seqno)
488 		return;
489 
490 	err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
491 		   prefix, erq->pid, erq->context, erq->seqno,
492 		   test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
493 			    &erq->flags) ? "!" : "",
494 		   test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
495 			    &erq->flags) ? "+" : "",
496 		   erq->sched_attr.priority,
497 		   erq->head, erq->tail);
498 }
499 
500 static void error_print_context(struct drm_i915_error_state_buf *m,
501 				const char *header,
502 				const struct i915_gem_context_coredump *ctx)
503 {
504 	err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
505 		   header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
506 		   ctx->guilty, ctx->active,
507 		   ctx->total_runtime, ctx->avg_runtime);
508 	err_printf(m, "  context timeline seqno %u\n", ctx->hwsp_seqno);
509 }
510 
511 static struct i915_vma_coredump *
512 __find_vma(struct i915_vma_coredump *vma, const char *name)
513 {
514 	while (vma) {
515 		if (strcmp(vma->name, name) == 0)
516 			return vma;
517 		vma = vma->next;
518 	}
519 
520 	return NULL;
521 }
522 
523 static struct i915_vma_coredump *
524 intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
525 {
526 	return __find_vma(ee->vma, "batch");
527 }
528 
529 static void error_print_engine(struct drm_i915_error_state_buf *m,
530 			       const struct intel_engine_coredump *ee)
531 {
532 	struct i915_vma_coredump *batch;
533 	int n;
534 
535 	err_printf(m, "%s command stream:\n", ee->engine->name);
536 	err_printf(m, "  CCID:  0x%08x\n", ee->ccid);
537 	err_printf(m, "  START: 0x%08x\n", ee->start);
538 	err_printf(m, "  HEAD:  0x%08x [0x%08x]\n", ee->head, ee->rq_head);
539 	err_printf(m, "  TAIL:  0x%08x [0x%08x, 0x%08x]\n",
540 		   ee->tail, ee->rq_post, ee->rq_tail);
541 	err_printf(m, "  CTL:   0x%08x\n", ee->ctl);
542 	err_printf(m, "  MODE:  0x%08x\n", ee->mode);
543 	err_printf(m, "  HWS:   0x%08x\n", ee->hws);
544 	err_printf(m, "  ACTHD: 0x%08x %08x\n",
545 		   (u32)(ee->acthd>>32), (u32)ee->acthd);
546 	err_printf(m, "  IPEIR: 0x%08x\n", ee->ipeir);
547 	err_printf(m, "  IPEHR: 0x%08x\n", ee->ipehr);
548 	err_printf(m, "  ESR:   0x%08x\n", ee->esr);
549 
550 	error_print_instdone(m, ee);
551 
552 	batch = intel_gpu_error_find_batch(ee);
553 	if (batch) {
554 		u64 start = batch->gtt_offset;
555 		u64 end = start + batch->gtt_size;
556 
557 		err_printf(m, "  batch: [0x%08x_%08x, 0x%08x_%08x]\n",
558 			   upper_32_bits(start), lower_32_bits(start),
559 			   upper_32_bits(end), lower_32_bits(end));
560 	}
561 	if (GRAPHICS_VER(m->i915) >= 4) {
562 		err_printf(m, "  BBADDR: 0x%08x_%08x\n",
563 			   (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
564 		err_printf(m, "  BB_STATE: 0x%08x\n", ee->bbstate);
565 		err_printf(m, "  INSTPS: 0x%08x\n", ee->instps);
566 	}
567 	err_printf(m, "  INSTPM: 0x%08x\n", ee->instpm);
568 	err_printf(m, "  FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
569 		   lower_32_bits(ee->faddr));
570 	if (GRAPHICS_VER(m->i915) >= 6) {
571 		err_printf(m, "  RC PSMI: 0x%08x\n", ee->rc_psmi);
572 		err_printf(m, "  FAULT_REG: 0x%08x\n", ee->fault_reg);
573 	}
574 	if (GRAPHICS_VER(m->i915) >= 11) {
575 		err_printf(m, "  NOPID: 0x%08x\n", ee->nopid);
576 		err_printf(m, "  EXCC: 0x%08x\n", ee->excc);
577 		err_printf(m, "  CMD_CCTL: 0x%08x\n", ee->cmd_cctl);
578 		err_printf(m, "  CSCMDOP: 0x%08x\n", ee->cscmdop);
579 		err_printf(m, "  CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl);
580 		err_printf(m, "  DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi);
581 		err_printf(m, "  DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo);
582 	}
583 	if (HAS_PPGTT(m->i915)) {
584 		err_printf(m, "  GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
585 
586 		if (GRAPHICS_VER(m->i915) >= 8) {
587 			int i;
588 			for (i = 0; i < 4; i++)
589 				err_printf(m, "  PDP%d: 0x%016llx\n",
590 					   i, ee->vm_info.pdp[i]);
591 		} else {
592 			err_printf(m, "  PP_DIR_BASE: 0x%08x\n",
593 				   ee->vm_info.pp_dir_base);
594 		}
595 	}
596 
597 	for (n = 0; n < ee->num_ports; n++) {
598 		err_printf(m, "  ELSP[%d]:", n);
599 		error_print_request(m, " ", &ee->execlist[n]);
600 	}
601 }
602 
603 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
604 {
605 	va_list args;
606 
607 	va_start(args, f);
608 	i915_error_vprintf(e, f, args);
609 	va_end(args);
610 }
611 
612 static void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
613 				      const struct intel_engine_cs *engine,
614 				      const struct i915_vma_coredump *vma)
615 {
616 	char out[ASCII85_BUFSZ];
617 	struct page *page;
618 
619 	if (!vma)
620 		return;
621 
622 	err_printf(m, "%s --- %s = 0x%08x %08x\n",
623 		   engine ? engine->name : "global", vma->name,
624 		   upper_32_bits(vma->gtt_offset),
625 		   lower_32_bits(vma->gtt_offset));
626 
627 	if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
628 		err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
629 
630 	err_compression_marker(m);
631 	list_for_each_entry(page, &vma->page_list, lru) {
632 		int i, len;
633 		const u32 *addr = page_address(page);
634 
635 		len = PAGE_SIZE;
636 		if (page == list_last_entry(&vma->page_list, typeof(*page), lru))
637 			len -= vma->unused;
638 		len = ascii85_encode_len(len);
639 
640 		for (i = 0; i < len; i++)
641 			err_puts(m, ascii85_encode(addr[i], out));
642 	}
643 	err_puts(m, "\n");
644 }
645 
646 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
647 				   struct i915_gpu_coredump *error)
648 {
649 	struct drm_printer p = i915_error_printer(m);
650 
651 	intel_device_info_print(&error->device_info, &error->runtime_info, &p);
652 	intel_display_device_info_print(&error->display_device_info,
653 					&error->display_runtime_info, &p);
654 	intel_driver_caps_print(&error->driver_caps, &p);
655 }
656 
657 static void err_print_params(struct drm_i915_error_state_buf *m,
658 			     const struct i915_params *params)
659 {
660 	struct drm_printer p = i915_error_printer(m);
661 
662 	i915_params_dump(params, &p);
663 }
664 
665 static void err_print_pciid(struct drm_i915_error_state_buf *m,
666 			    struct drm_i915_private *i915)
667 {
668 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
669 
670 	err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
671 	err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
672 	err_printf(m, "PCI Subsystem: %04x:%04x\n",
673 		   pdev->subsystem_vendor,
674 		   pdev->subsystem_device);
675 }
676 
677 static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
678 			      const char *name,
679 			      const struct intel_ctb_coredump *ctb)
680 {
681 	if (!ctb->size)
682 		return;
683 
684 	err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n",
685 		   name, ctb->raw_status, ctb->raw_head, ctb->raw_tail,
686 		   ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size);
687 }
688 
689 static void err_print_uc(struct drm_i915_error_state_buf *m,
690 			 const struct intel_uc_coredump *error_uc)
691 {
692 	struct drm_printer p = i915_error_printer(m);
693 
694 	intel_uc_fw_dump(&error_uc->guc_fw, &p);
695 	intel_uc_fw_dump(&error_uc->huc_fw, &p);
696 	err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp);
697 	intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log);
698 	err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence);
699 	err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
700 	err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1);
701 	intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_ctb);
702 }
703 
704 static void err_free_sgl(struct scatterlist *sgl)
705 {
706 	while (sgl) {
707 		struct scatterlist *sg;
708 
709 		for (sg = sgl; !sg_is_chain(sg); sg++) {
710 			kfree(sg_virt(sg));
711 			if (sg_is_last(sg))
712 				break;
713 		}
714 
715 		sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
716 		free_page((unsigned long)sgl);
717 		sgl = sg;
718 	}
719 }
720 
721 static void err_print_gt_info(struct drm_i915_error_state_buf *m,
722 			      struct intel_gt_coredump *gt)
723 {
724 	struct drm_printer p = i915_error_printer(m);
725 
726 	intel_gt_info_print(&gt->info, &p);
727 	intel_sseu_print_topology(gt->_gt->i915, &gt->info.sseu, &p);
728 }
729 
730 static void err_print_gt_display(struct drm_i915_error_state_buf *m,
731 				 struct intel_gt_coredump *gt)
732 {
733 	err_printf(m, "IER: 0x%08x\n", gt->ier);
734 	err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
735 }
736 
737 static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
738 				       struct intel_gt_coredump *gt)
739 {
740 	int i;
741 
742 	err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake));
743 	err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
744 		   gt->clock_frequency, gt->clock_period_ns);
745 	err_printf(m, "EIR: 0x%08x\n", gt->eir);
746 	err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
747 
748 	for (i = 0; i < gt->ngtier; i++)
749 		err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
750 }
751 
752 static void err_print_gt_global(struct drm_i915_error_state_buf *m,
753 				struct intel_gt_coredump *gt)
754 {
755 	err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
756 
757 	if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
758 		err_printf(m, "ERROR: 0x%08x\n", gt->error);
759 		err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
760 	}
761 
762 	if (GRAPHICS_VER(m->i915) >= 8)
763 		err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
764 			   gt->fault_data1, gt->fault_data0);
765 
766 	if (GRAPHICS_VER(m->i915) == 7)
767 		err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
768 
769 	if (IS_GRAPHICS_VER(m->i915, 8, 11))
770 		err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
771 
772 	if (GRAPHICS_VER(m->i915) == 12)
773 		err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
774 
775 	if (GRAPHICS_VER(m->i915) >= 12) {
776 		int i;
777 
778 		for (i = 0; i < I915_MAX_SFC; i++) {
779 			/*
780 			 * SFC_DONE resides in the VD forcewake domain, so it
781 			 * only exists if the corresponding VCS engine is
782 			 * present.
783 			 */
784 			if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
785 			    !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
786 				continue;
787 
788 			err_printf(m, "  SFC_DONE[%d]: 0x%08x\n", i,
789 				   gt->sfc_done[i]);
790 		}
791 
792 		err_printf(m, "  GAM_DONE: 0x%08x\n", gt->gam_done);
793 	}
794 }
795 
796 static void err_print_gt_fences(struct drm_i915_error_state_buf *m,
797 				struct intel_gt_coredump *gt)
798 {
799 	int i;
800 
801 	for (i = 0; i < gt->nfence; i++)
802 		err_printf(m, "  fence[%d] = %08llx\n", i, gt->fence[i]);
803 }
804 
805 static void err_print_gt_engines(struct drm_i915_error_state_buf *m,
806 				 struct intel_gt_coredump *gt)
807 {
808 	const struct intel_engine_coredump *ee;
809 
810 	for (ee = gt->engine; ee; ee = ee->next) {
811 		const struct i915_vma_coredump *vma;
812 
813 		if (gt->uc && gt->uc->guc.is_guc_capture) {
814 			if (ee->guc_capture_node)
815 				intel_guc_capture_print_engine_node(m, ee);
816 			else
817 				err_printf(m, "  Missing GuC capture node for %s\n",
818 					   ee->engine->name);
819 		} else {
820 			error_print_engine(m, ee);
821 		}
822 
823 		err_printf(m, "  hung: %u\n", ee->hung);
824 		err_printf(m, "  engine reset count: %u\n", ee->reset_count);
825 		error_print_context(m, "  Active context: ", &ee->context);
826 
827 		for (vma = ee->vma; vma; vma = vma->next)
828 			intel_gpu_error_print_vma(m, ee->engine, vma);
829 	}
830 
831 }
832 
833 static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
834 			       struct i915_gpu_coredump *error)
835 {
836 	const struct intel_engine_coredump *ee;
837 	struct timespec64 ts;
838 
839 	if (*error->error_msg)
840 		err_printf(m, "%s\n", error->error_msg);
841 	err_printf(m, "Kernel: %s %s\n",
842 		   init_utsname()->release,
843 		   init_utsname()->machine);
844 	err_printf(m, "Driver: %s\n", DRIVER_DATE);
845 	ts = ktime_to_timespec64(error->time);
846 	err_printf(m, "Time: %lld s %ld us\n",
847 		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
848 	ts = ktime_to_timespec64(error->boottime);
849 	err_printf(m, "Boottime: %lld s %ld us\n",
850 		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
851 	ts = ktime_to_timespec64(error->uptime);
852 	err_printf(m, "Uptime: %lld s %ld us\n",
853 		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
854 	err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
855 		   error->capture, jiffies_to_msecs(jiffies - error->capture));
856 
857 	for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
858 		err_printf(m, "Active process (on ring %s): %s [%d]\n",
859 			   ee->engine->name,
860 			   ee->context.comm,
861 			   ee->context.pid);
862 
863 	err_printf(m, "Reset count: %u\n", error->reset_count);
864 	err_printf(m, "Suspend count: %u\n", error->suspend_count);
865 	err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
866 	err_printf(m, "Subplatform: 0x%x\n",
867 		   intel_subplatform(&error->runtime_info,
868 				     error->device_info.platform));
869 	err_print_pciid(m, m->i915);
870 
871 	err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
872 
873 	intel_dmc_print_error_state(m, m->i915);
874 
875 	err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock));
876 	err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended));
877 
878 	if (error->gt) {
879 		bool print_guc_capture = false;
880 
881 		if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
882 			print_guc_capture = true;
883 
884 		err_print_gt_display(m, error->gt);
885 		err_print_gt_global_nonguc(m, error->gt);
886 		err_print_gt_fences(m, error->gt);
887 
888 		/*
889 		 * GuC dumped global, eng-class and eng-instance registers together
890 		 * as part of engine state dump so we print in err_print_gt_engines
891 		 */
892 		if (!print_guc_capture)
893 			err_print_gt_global(m, error->gt);
894 
895 		err_print_gt_engines(m, error->gt);
896 
897 		if (error->gt->uc)
898 			err_print_uc(m, error->gt->uc);
899 
900 		err_print_gt_info(m, error->gt);
901 	}
902 
903 	if (error->overlay)
904 		intel_overlay_print_error_state(m, error->overlay);
905 
906 	err_print_capabilities(m, error);
907 	err_print_params(m, &error->params);
908 }
909 
910 static int err_print_to_sgl(struct i915_gpu_coredump *error)
911 {
912 	struct drm_i915_error_state_buf m;
913 
914 	if (IS_ERR(error))
915 		return PTR_ERR(error);
916 
917 	if (READ_ONCE(error->sgl))
918 		return 0;
919 
920 	memset(&m, 0, sizeof(m));
921 	m.i915 = error->i915;
922 
923 	__err_print_to_sgl(&m, error);
924 
925 	if (m.buf) {
926 		__sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
927 		m.bytes = 0;
928 		m.buf = NULL;
929 	}
930 	if (m.cur) {
931 		GEM_BUG_ON(m.end < m.cur);
932 		sg_mark_end(m.cur - 1);
933 	}
934 	GEM_BUG_ON(m.sgl && !m.cur);
935 
936 	if (m.err) {
937 		err_free_sgl(m.sgl);
938 		return m.err;
939 	}
940 
941 	if (cmpxchg(&error->sgl, NULL, m.sgl))
942 		err_free_sgl(m.sgl);
943 
944 	return 0;
945 }
946 
947 ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
948 					 char *buf, loff_t off, size_t rem)
949 {
950 	struct scatterlist *sg;
951 	size_t count;
952 	loff_t pos;
953 	int err;
954 
955 	if (!error || !rem)
956 		return 0;
957 
958 	err = err_print_to_sgl(error);
959 	if (err)
960 		return err;
961 
962 	sg = READ_ONCE(error->fit);
963 	if (!sg || off < sg->dma_address)
964 		sg = error->sgl;
965 	if (!sg)
966 		return 0;
967 
968 	pos = sg->dma_address;
969 	count = 0;
970 	do {
971 		size_t len, start;
972 
973 		if (sg_is_chain(sg)) {
974 			sg = sg_chain_ptr(sg);
975 			GEM_BUG_ON(sg_is_chain(sg));
976 		}
977 
978 		len = sg->length;
979 		if (pos + len <= off) {
980 			pos += len;
981 			continue;
982 		}
983 
984 		start = sg->offset;
985 		if (pos < off) {
986 			GEM_BUG_ON(off - pos > len);
987 			len -= off - pos;
988 			start += off - pos;
989 			pos = off;
990 		}
991 
992 		len = min(len, rem);
993 		GEM_BUG_ON(!len || len > sg->length);
994 
995 		memcpy(buf, page_address(sg_page(sg)) + start, len);
996 
997 		count += len;
998 		pos += len;
999 
1000 		buf += len;
1001 		rem -= len;
1002 		if (!rem) {
1003 			WRITE_ONCE(error->fit, sg);
1004 			break;
1005 		}
1006 	} while (!sg_is_last(sg++));
1007 
1008 	return count;
1009 }
1010 
1011 static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
1012 {
1013 	while (vma) {
1014 		struct i915_vma_coredump *next = vma->next;
1015 		struct page *page, *n;
1016 
1017 		list_for_each_entry_safe(page, n, &vma->page_list, lru) {
1018 			list_del_init(&page->lru);
1019 			__free_page(page);
1020 		}
1021 
1022 		kfree(vma);
1023 		vma = next;
1024 	}
1025 }
1026 
1027 static void cleanup_params(struct i915_gpu_coredump *error)
1028 {
1029 	i915_params_free(&error->params);
1030 }
1031 
1032 static void cleanup_uc(struct intel_uc_coredump *uc)
1033 {
1034 	kfree(uc->guc_fw.file_selected.path);
1035 	kfree(uc->huc_fw.file_selected.path);
1036 	kfree(uc->guc_fw.file_wanted.path);
1037 	kfree(uc->huc_fw.file_wanted.path);
1038 	i915_vma_coredump_free(uc->guc.vma_log);
1039 	i915_vma_coredump_free(uc->guc.vma_ctb);
1040 
1041 	kfree(uc);
1042 }
1043 
1044 static void cleanup_gt(struct intel_gt_coredump *gt)
1045 {
1046 	while (gt->engine) {
1047 		struct intel_engine_coredump *ee = gt->engine;
1048 
1049 		gt->engine = ee->next;
1050 
1051 		i915_vma_coredump_free(ee->vma);
1052 		intel_guc_capture_free_node(ee);
1053 		kfree(ee);
1054 	}
1055 
1056 	if (gt->uc)
1057 		cleanup_uc(gt->uc);
1058 
1059 	kfree(gt);
1060 }
1061 
1062 void __i915_gpu_coredump_free(struct kref *error_ref)
1063 {
1064 	struct i915_gpu_coredump *error =
1065 		container_of(error_ref, typeof(*error), ref);
1066 
1067 	while (error->gt) {
1068 		struct intel_gt_coredump *gt = error->gt;
1069 
1070 		error->gt = gt->next;
1071 		cleanup_gt(gt);
1072 	}
1073 
1074 	kfree(error->overlay);
1075 
1076 	cleanup_params(error);
1077 
1078 	err_free_sgl(error->sgl);
1079 	kfree(error);
1080 }
1081 
1082 static struct i915_vma_coredump *
1083 i915_vma_coredump_create(const struct intel_gt *gt,
1084 			 const struct i915_vma_resource *vma_res,
1085 			 struct i915_vma_compress *compress,
1086 			 const char *name)
1087 
1088 {
1089 	struct i915_ggtt *ggtt = gt->ggtt;
1090 	const u64 slot = ggtt->error_capture.start;
1091 	struct i915_vma_coredump *dst;
1092 	struct sgt_iter iter;
1093 	int ret;
1094 
1095 	might_sleep();
1096 
1097 	if (!vma_res || !vma_res->bi.pages || !compress)
1098 		return NULL;
1099 
1100 	dst = kmalloc(sizeof(*dst), ALLOW_FAIL);
1101 	if (!dst)
1102 		return NULL;
1103 
1104 	if (!compress_start(compress)) {
1105 		kfree(dst);
1106 		return NULL;
1107 	}
1108 
1109 	INIT_LIST_HEAD(&dst->page_list);
1110 	strcpy(dst->name, name);
1111 	dst->next = NULL;
1112 
1113 	dst->gtt_offset = vma_res->start;
1114 	dst->gtt_size = vma_res->node_size;
1115 	dst->gtt_page_sizes = vma_res->page_sizes_gtt;
1116 	dst->unused = 0;
1117 
1118 	ret = -EINVAL;
1119 	if (drm_mm_node_allocated(&ggtt->error_capture)) {
1120 		void __iomem *s;
1121 		dma_addr_t dma;
1122 
1123 		for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
1124 			mutex_lock(&ggtt->error_mutex);
1125 			if (ggtt->vm.raw_insert_page)
1126 				ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
1127 							 i915_gem_get_pat_index(gt->i915,
1128 										I915_CACHE_NONE),
1129 							 0);
1130 			else
1131 				ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1132 						     i915_gem_get_pat_index(gt->i915,
1133 									    I915_CACHE_NONE),
1134 						     0);
1135 			mb();
1136 
1137 			s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
1138 			ret = compress_page(compress,
1139 					    (void  __force *)s, dst,
1140 					    true);
1141 			io_mapping_unmap(s);
1142 
1143 			mb();
1144 			ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1145 			mutex_unlock(&ggtt->error_mutex);
1146 			if (ret)
1147 				break;
1148 		}
1149 	} else if (vma_res->bi.lmem) {
1150 		struct intel_memory_region *mem = vma_res->mr;
1151 		dma_addr_t dma;
1152 
1153 		for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
1154 			dma_addr_t offset = dma - mem->region.start;
1155 			void __iomem *s;
1156 
1157 			if (offset + PAGE_SIZE > mem->io_size) {
1158 				ret = -EINVAL;
1159 				break;
1160 			}
1161 
1162 			s = io_mapping_map_wc(&mem->iomap, offset, PAGE_SIZE);
1163 			ret = compress_page(compress,
1164 					    (void __force *)s, dst,
1165 					    true);
1166 			io_mapping_unmap(s);
1167 			if (ret)
1168 				break;
1169 		}
1170 	} else {
1171 		struct page *page;
1172 
1173 		for_each_sgt_page(page, iter, vma_res->bi.pages) {
1174 			void *s;
1175 
1176 			drm_clflush_pages(&page, 1);
1177 
1178 			s = kmap_local_page(page);
1179 			ret = compress_page(compress, s, dst, false);
1180 			kunmap_local(s);
1181 
1182 			drm_clflush_pages(&page, 1);
1183 
1184 			if (ret)
1185 				break;
1186 		}
1187 	}
1188 
1189 	if (ret || compress_flush(compress, dst)) {
1190 		struct page *page, *n;
1191 
1192 		list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) {
1193 			list_del_init(&page->lru);
1194 			pool_free(&compress->pool, page_address(page));
1195 		}
1196 
1197 		kfree(dst);
1198 		dst = NULL;
1199 	}
1200 	compress_finish(compress);
1201 
1202 	return dst;
1203 }
1204 
1205 static void gt_record_fences(struct intel_gt_coredump *gt)
1206 {
1207 	struct i915_ggtt *ggtt = gt->_gt->ggtt;
1208 	struct intel_uncore *uncore = gt->_gt->uncore;
1209 	int i;
1210 
1211 	if (GRAPHICS_VER(uncore->i915) >= 6) {
1212 		for (i = 0; i < ggtt->num_fences; i++)
1213 			gt->fence[i] =
1214 				intel_uncore_read64(uncore,
1215 						    FENCE_REG_GEN6_LO(i));
1216 	} else if (GRAPHICS_VER(uncore->i915) >= 4) {
1217 		for (i = 0; i < ggtt->num_fences; i++)
1218 			gt->fence[i] =
1219 				intel_uncore_read64(uncore,
1220 						    FENCE_REG_965_LO(i));
1221 	} else {
1222 		for (i = 0; i < ggtt->num_fences; i++)
1223 			gt->fence[i] =
1224 				intel_uncore_read(uncore, FENCE_REG(i));
1225 	}
1226 	gt->nfence = i;
1227 }
1228 
1229 static void engine_record_registers(struct intel_engine_coredump *ee)
1230 {
1231 	const struct intel_engine_cs *engine = ee->engine;
1232 	struct drm_i915_private *i915 = engine->i915;
1233 
1234 	if (GRAPHICS_VER(i915) >= 6) {
1235 		ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
1236 
1237 		/*
1238 		 * For the media GT, this ring fault register is not replicated,
1239 		 * so don't do multicast/replicated register read/write
1240 		 * operation on it.
1241 		 */
1242 		if (MEDIA_VER(i915) >= 13 && engine->gt->type == GT_MEDIA)
1243 			ee->fault_reg = intel_uncore_read(engine->uncore,
1244 							  XELPMP_RING_FAULT_REG);
1245 
1246 		else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
1247 			ee->fault_reg = intel_gt_mcr_read_any(engine->gt,
1248 							      XEHP_RING_FAULT_REG);
1249 		else if (GRAPHICS_VER(i915) >= 12)
1250 			ee->fault_reg = intel_uncore_read(engine->uncore,
1251 							  GEN12_RING_FAULT_REG);
1252 		else if (GRAPHICS_VER(i915) >= 8)
1253 			ee->fault_reg = intel_uncore_read(engine->uncore,
1254 							  GEN8_RING_FAULT_REG);
1255 		else
1256 			ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
1257 	}
1258 
1259 	if (GRAPHICS_VER(i915) >= 4) {
1260 		ee->esr = ENGINE_READ(engine, RING_ESR);
1261 		ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1262 		ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1263 		ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1264 		ee->instps = ENGINE_READ(engine, RING_INSTPS);
1265 		ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
1266 		ee->ccid = ENGINE_READ(engine, CCID);
1267 		if (GRAPHICS_VER(i915) >= 8) {
1268 			ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1269 			ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
1270 		}
1271 		ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
1272 	} else {
1273 		ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1274 		ee->ipeir = ENGINE_READ(engine, IPEIR);
1275 		ee->ipehr = ENGINE_READ(engine, IPEHR);
1276 	}
1277 
1278 	if (GRAPHICS_VER(i915) >= 11) {
1279 		ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL);
1280 		ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP);
1281 		ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL);
1282 		ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW);
1283 		ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD);
1284 		ee->nopid = ENGINE_READ(engine, RING_NOPID);
1285 		ee->excc = ENGINE_READ(engine, RING_EXCC);
1286 	}
1287 
1288 	intel_engine_get_instdone(engine, &ee->instdone);
1289 
1290 	ee->instpm = ENGINE_READ(engine, RING_INSTPM);
1291 	ee->acthd = intel_engine_get_active_head(engine);
1292 	ee->start = ENGINE_READ(engine, RING_START);
1293 	ee->head = ENGINE_READ(engine, RING_HEAD);
1294 	ee->tail = ENGINE_READ(engine, RING_TAIL);
1295 	ee->ctl = ENGINE_READ(engine, RING_CTL);
1296 	if (GRAPHICS_VER(i915) > 2)
1297 		ee->mode = ENGINE_READ(engine, RING_MI_MODE);
1298 
1299 	if (!HWS_NEEDS_PHYSICAL(i915)) {
1300 		i915_reg_t mmio;
1301 
1302 		if (GRAPHICS_VER(i915) == 7) {
1303 			switch (engine->id) {
1304 			default:
1305 				MISSING_CASE(engine->id);
1306 				fallthrough;
1307 			case RCS0:
1308 				mmio = RENDER_HWS_PGA_GEN7;
1309 				break;
1310 			case BCS0:
1311 				mmio = BLT_HWS_PGA_GEN7;
1312 				break;
1313 			case VCS0:
1314 				mmio = BSD_HWS_PGA_GEN7;
1315 				break;
1316 			case VECS0:
1317 				mmio = VEBOX_HWS_PGA_GEN7;
1318 				break;
1319 			}
1320 		} else if (GRAPHICS_VER(engine->i915) == 6) {
1321 			mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1322 		} else {
1323 			/* XXX: gen8 returns to sanity */
1324 			mmio = RING_HWS_PGA(engine->mmio_base);
1325 		}
1326 
1327 		ee->hws = intel_uncore_read(engine->uncore, mmio);
1328 	}
1329 
1330 	ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1331 
1332 	if (HAS_PPGTT(i915)) {
1333 		int i;
1334 
1335 		ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
1336 
1337 		if (GRAPHICS_VER(i915) == 6) {
1338 			ee->vm_info.pp_dir_base =
1339 				ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
1340 		} else if (GRAPHICS_VER(i915) == 7) {
1341 			ee->vm_info.pp_dir_base =
1342 				ENGINE_READ(engine, RING_PP_DIR_BASE);
1343 		} else if (GRAPHICS_VER(i915) >= 8) {
1344 			u32 base = engine->mmio_base;
1345 
1346 			for (i = 0; i < 4; i++) {
1347 				ee->vm_info.pdp[i] =
1348 					intel_uncore_read(engine->uncore,
1349 							  GEN8_RING_PDP_UDW(base, i));
1350 				ee->vm_info.pdp[i] <<= 32;
1351 				ee->vm_info.pdp[i] |=
1352 					intel_uncore_read(engine->uncore,
1353 							  GEN8_RING_PDP_LDW(base, i));
1354 			}
1355 		}
1356 	}
1357 }
1358 
1359 static void record_request(const struct i915_request *request,
1360 			   struct i915_request_coredump *erq)
1361 {
1362 	erq->flags = request->fence.flags;
1363 	erq->context = request->fence.context;
1364 	erq->seqno = request->fence.seqno;
1365 	erq->sched_attr = request->sched.attr;
1366 	erq->head = request->head;
1367 	erq->tail = request->tail;
1368 
1369 	erq->pid = 0;
1370 	rcu_read_lock();
1371 	if (!intel_context_is_closed(request->context)) {
1372 		const struct i915_gem_context *ctx;
1373 
1374 		ctx = rcu_dereference(request->context->gem_context);
1375 		if (ctx)
1376 			erq->pid = pid_nr(ctx->pid);
1377 	}
1378 	rcu_read_unlock();
1379 }
1380 
1381 static void engine_record_execlists(struct intel_engine_coredump *ee)
1382 {
1383 	const struct intel_engine_execlists * const el = &ee->engine->execlists;
1384 	struct i915_request * const *port = el->active;
1385 	unsigned int n = 0;
1386 
1387 	while (*port)
1388 		record_request(*port++, &ee->execlist[n++]);
1389 
1390 	ee->num_ports = n;
1391 }
1392 
1393 static bool record_context(struct i915_gem_context_coredump *e,
1394 			   struct intel_context *ce)
1395 {
1396 	struct i915_gem_context *ctx;
1397 	struct task_struct *task;
1398 	bool simulated;
1399 
1400 	rcu_read_lock();
1401 	ctx = rcu_dereference(ce->gem_context);
1402 	if (ctx && !kref_get_unless_zero(&ctx->ref))
1403 		ctx = NULL;
1404 	rcu_read_unlock();
1405 	if (!ctx)
1406 		return true;
1407 
1408 	rcu_read_lock();
1409 	task = pid_task(ctx->pid, PIDTYPE_PID);
1410 	if (task) {
1411 		strcpy(e->comm, task->comm);
1412 		e->pid = task->pid;
1413 	}
1414 	rcu_read_unlock();
1415 
1416 	e->sched_attr = ctx->sched;
1417 	e->guilty = atomic_read(&ctx->guilty_count);
1418 	e->active = atomic_read(&ctx->active_count);
1419 	e->hwsp_seqno = (ce->timeline && ce->timeline->hwsp_seqno) ?
1420 				*ce->timeline->hwsp_seqno : ~0U;
1421 
1422 	e->total_runtime = intel_context_get_total_runtime_ns(ce);
1423 	e->avg_runtime = intel_context_get_avg_runtime_ns(ce);
1424 
1425 	simulated = i915_gem_context_no_error_capture(ctx);
1426 
1427 	i915_gem_context_put(ctx);
1428 	return simulated;
1429 }
1430 
1431 struct intel_engine_capture_vma {
1432 	struct intel_engine_capture_vma *next;
1433 	struct i915_vma_resource *vma_res;
1434 	char name[16];
1435 	bool lockdep_cookie;
1436 };
1437 
1438 static struct intel_engine_capture_vma *
1439 capture_vma_snapshot(struct intel_engine_capture_vma *next,
1440 		     struct i915_vma_resource *vma_res,
1441 		     gfp_t gfp, const char *name)
1442 {
1443 	struct intel_engine_capture_vma *c;
1444 
1445 	if (!vma_res)
1446 		return next;
1447 
1448 	c = kmalloc(sizeof(*c), gfp);
1449 	if (!c)
1450 		return next;
1451 
1452 	if (!i915_vma_resource_hold(vma_res, &c->lockdep_cookie)) {
1453 		kfree(c);
1454 		return next;
1455 	}
1456 
1457 	strcpy(c->name, name);
1458 	c->vma_res = i915_vma_resource_get(vma_res);
1459 
1460 	c->next = next;
1461 	return c;
1462 }
1463 
1464 static struct intel_engine_capture_vma *
1465 capture_vma(struct intel_engine_capture_vma *next,
1466 	    struct i915_vma *vma,
1467 	    const char *name,
1468 	    gfp_t gfp)
1469 {
1470 	if (!vma)
1471 		return next;
1472 
1473 	/*
1474 	 * If the vma isn't pinned, then the vma should be snapshotted
1475 	 * to a struct i915_vma_snapshot at command submission time.
1476 	 * Not here.
1477 	 */
1478 	if (GEM_WARN_ON(!i915_vma_is_pinned(vma)))
1479 		return next;
1480 
1481 	next = capture_vma_snapshot(next, vma->resource, gfp, name);
1482 
1483 	return next;
1484 }
1485 
1486 static struct intel_engine_capture_vma *
1487 capture_user(struct intel_engine_capture_vma *capture,
1488 	     const struct i915_request *rq,
1489 	     gfp_t gfp)
1490 {
1491 	struct i915_capture_list *c;
1492 
1493 	for (c = rq->capture_list; c; c = c->next)
1494 		capture = capture_vma_snapshot(capture, c->vma_res, gfp,
1495 					       "user");
1496 
1497 	return capture;
1498 }
1499 
1500 static void add_vma(struct intel_engine_coredump *ee,
1501 		    struct i915_vma_coredump *vma)
1502 {
1503 	if (vma) {
1504 		vma->next = ee->vma;
1505 		ee->vma = vma;
1506 	}
1507 }
1508 
1509 static struct i915_vma_coredump *
1510 create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma,
1511 		    const char *name, struct i915_vma_compress *compress)
1512 {
1513 	struct i915_vma_coredump *ret = NULL;
1514 	struct i915_vma_resource *vma_res;
1515 	bool lockdep_cookie;
1516 
1517 	if (!vma)
1518 		return NULL;
1519 
1520 	vma_res = vma->resource;
1521 
1522 	if (i915_vma_resource_hold(vma_res, &lockdep_cookie)) {
1523 		ret = i915_vma_coredump_create(gt, vma_res, compress, name);
1524 		i915_vma_resource_unhold(vma_res, lockdep_cookie);
1525 	}
1526 
1527 	return ret;
1528 }
1529 
1530 static void add_vma_coredump(struct intel_engine_coredump *ee,
1531 			     const struct intel_gt *gt,
1532 			     struct i915_vma *vma,
1533 			     const char *name,
1534 			     struct i915_vma_compress *compress)
1535 {
1536 	add_vma(ee, create_vma_coredump(gt, vma, name, compress));
1537 }
1538 
1539 struct intel_engine_coredump *
1540 intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags)
1541 {
1542 	struct intel_engine_coredump *ee;
1543 
1544 	ee = kzalloc(sizeof(*ee), gfp);
1545 	if (!ee)
1546 		return NULL;
1547 
1548 	ee->engine = engine;
1549 
1550 	if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) {
1551 		engine_record_registers(ee);
1552 		engine_record_execlists(ee);
1553 	}
1554 
1555 	return ee;
1556 }
1557 
1558 static struct intel_engine_capture_vma *
1559 engine_coredump_add_context(struct intel_engine_coredump *ee,
1560 			    struct intel_context *ce,
1561 			    gfp_t gfp)
1562 {
1563 	struct intel_engine_capture_vma *vma = NULL;
1564 
1565 	ee->simulated |= record_context(&ee->context, ce);
1566 	if (ee->simulated)
1567 		return NULL;
1568 
1569 	/*
1570 	 * We need to copy these to an anonymous buffer
1571 	 * as the simplest method to avoid being overwritten
1572 	 * by userspace.
1573 	 */
1574 	vma = capture_vma(vma, ce->ring->vma, "ring", gfp);
1575 	vma = capture_vma(vma, ce->state, "HW context", gfp);
1576 
1577 	return vma;
1578 }
1579 
1580 struct intel_engine_capture_vma *
1581 intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1582 				  struct i915_request *rq,
1583 				  gfp_t gfp)
1584 {
1585 	struct intel_engine_capture_vma *vma;
1586 
1587 	vma = engine_coredump_add_context(ee, rq->context, gfp);
1588 	if (!vma)
1589 		return NULL;
1590 
1591 	/*
1592 	 * We need to copy these to an anonymous buffer
1593 	 * as the simplest method to avoid being overwritten
1594 	 * by userspace.
1595 	 */
1596 	vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch");
1597 	vma = capture_user(vma, rq, gfp);
1598 
1599 	ee->rq_head = rq->head;
1600 	ee->rq_post = rq->postfix;
1601 	ee->rq_tail = rq->tail;
1602 
1603 	return vma;
1604 }
1605 
1606 void
1607 intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1608 			      struct intel_engine_capture_vma *capture,
1609 			      struct i915_vma_compress *compress)
1610 {
1611 	const struct intel_engine_cs *engine = ee->engine;
1612 
1613 	while (capture) {
1614 		struct intel_engine_capture_vma *this = capture;
1615 		struct i915_vma_resource *vma_res = this->vma_res;
1616 
1617 		add_vma(ee,
1618 			i915_vma_coredump_create(engine->gt, vma_res,
1619 						 compress, this->name));
1620 
1621 		i915_vma_resource_unhold(vma_res, this->lockdep_cookie);
1622 		i915_vma_resource_put(vma_res);
1623 
1624 		capture = this->next;
1625 		kfree(this);
1626 	}
1627 
1628 	add_vma_coredump(ee, engine->gt, engine->status_page.vma,
1629 			 "HW Status", compress);
1630 
1631 	add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma,
1632 			 "WA context", compress);
1633 }
1634 
1635 static struct intel_engine_coredump *
1636 capture_engine(struct intel_engine_cs *engine,
1637 	       struct i915_vma_compress *compress,
1638 	       u32 dump_flags)
1639 {
1640 	struct intel_engine_capture_vma *capture = NULL;
1641 	struct intel_engine_coredump *ee;
1642 	struct intel_context *ce = NULL;
1643 	struct i915_request *rq = NULL;
1644 
1645 	ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
1646 	if (!ee)
1647 		return NULL;
1648 
1649 	intel_engine_get_hung_entity(engine, &ce, &rq);
1650 	if (rq && !i915_request_started(rq))
1651 		drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
1652 			 engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
1653 
1654 	if (rq) {
1655 		capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
1656 		i915_request_put(rq);
1657 	} else if (ce) {
1658 		capture = engine_coredump_add_context(ee, ce, ATOMIC_MAYFAIL);
1659 	}
1660 
1661 	if (capture) {
1662 		intel_engine_coredump_add_vma(ee, capture, compress);
1663 
1664 		if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1665 			intel_guc_capture_get_matching_node(engine->gt, ee, ce);
1666 	} else {
1667 		kfree(ee);
1668 		ee = NULL;
1669 	}
1670 
1671 	return ee;
1672 }
1673 
1674 static void
1675 gt_record_engines(struct intel_gt_coredump *gt,
1676 		  intel_engine_mask_t engine_mask,
1677 		  struct i915_vma_compress *compress,
1678 		  u32 dump_flags)
1679 {
1680 	struct intel_engine_cs *engine;
1681 	enum intel_engine_id id;
1682 
1683 	for_each_engine(engine, gt->_gt, id) {
1684 		struct intel_engine_coredump *ee;
1685 
1686 		/* Refill our page pool before entering atomic section */
1687 		pool_refill(&compress->pool, ALLOW_FAIL);
1688 
1689 		ee = capture_engine(engine, compress, dump_flags);
1690 		if (!ee)
1691 			continue;
1692 
1693 		ee->hung = engine->mask & engine_mask;
1694 
1695 		gt->simulated |= ee->simulated;
1696 		if (ee->simulated) {
1697 			if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1698 				intel_guc_capture_free_node(ee);
1699 			kfree(ee);
1700 			continue;
1701 		}
1702 
1703 		ee->next = gt->engine;
1704 		gt->engine = ee;
1705 	}
1706 }
1707 
1708 static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
1709 			      const struct intel_guc_ct_buffer *ctb,
1710 			      const void *blob_ptr, struct intel_guc *guc)
1711 {
1712 	if (!ctb || !ctb->desc)
1713 		return;
1714 
1715 	saved->raw_status = ctb->desc->status;
1716 	saved->raw_head = ctb->desc->head;
1717 	saved->raw_tail = ctb->desc->tail;
1718 	saved->head = ctb->head;
1719 	saved->tail = ctb->tail;
1720 	saved->size = ctb->size;
1721 	saved->desc_offset = ((void *)ctb->desc) - blob_ptr;
1722 	saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
1723 }
1724 
1725 static struct intel_uc_coredump *
1726 gt_record_uc(struct intel_gt_coredump *gt,
1727 	     struct i915_vma_compress *compress)
1728 {
1729 	const struct intel_uc *uc = &gt->_gt->uc;
1730 	struct intel_uc_coredump *error_uc;
1731 
1732 	error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
1733 	if (!error_uc)
1734 		return NULL;
1735 
1736 	memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
1737 	memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
1738 
1739 	error_uc->guc_fw.file_selected.path = kstrdup(uc->guc.fw.file_selected.path, ALLOW_FAIL);
1740 	error_uc->huc_fw.file_selected.path = kstrdup(uc->huc.fw.file_selected.path, ALLOW_FAIL);
1741 	error_uc->guc_fw.file_wanted.path = kstrdup(uc->guc.fw.file_wanted.path, ALLOW_FAIL);
1742 	error_uc->huc_fw.file_wanted.path = kstrdup(uc->huc.fw.file_wanted.path, ALLOW_FAIL);
1743 
1744 	/*
1745 	 * Save the GuC log and include a timestamp reference for converting the
1746 	 * log times to system times (in conjunction with the error->boottime and
1747 	 * gt->clock_frequency fields saved elsewhere).
1748 	 */
1749 	error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP);
1750 	error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
1751 						    "GuC log buffer", compress);
1752 	error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma,
1753 						    "GuC CT buffer", compress);
1754 	error_uc->guc.last_fence = uc->guc.ct.requests.last_fence;
1755 	gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send,
1756 			  uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1757 	gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
1758 			  uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1759 
1760 	return error_uc;
1761 }
1762 
1763 /* Capture display registers. */
1764 static void gt_record_display_regs(struct intel_gt_coredump *gt)
1765 {
1766 	struct intel_uncore *uncore = gt->_gt->uncore;
1767 	struct drm_i915_private *i915 = uncore->i915;
1768 
1769 	if (DISPLAY_VER(i915) >= 6 && DISPLAY_VER(i915) < 20)
1770 		gt->derrmr = intel_uncore_read(uncore, DERRMR);
1771 
1772 	if (GRAPHICS_VER(i915) >= 8)
1773 		gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1774 	else if (IS_VALLEYVIEW(i915))
1775 		gt->ier = intel_uncore_read(uncore, VLV_IER);
1776 	else if (HAS_PCH_SPLIT(i915))
1777 		gt->ier = intel_uncore_read(uncore, DEIER);
1778 	else if (GRAPHICS_VER(i915) == 2)
1779 		gt->ier = intel_uncore_read16(uncore, GEN2_IER);
1780 	else
1781 		gt->ier = intel_uncore_read(uncore, GEN2_IER);
1782 }
1783 
1784 /* Capture all other registers that GuC doesn't capture. */
1785 static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
1786 {
1787 	struct intel_uncore *uncore = gt->_gt->uncore;
1788 	struct drm_i915_private *i915 = uncore->i915;
1789 	int i;
1790 
1791 	if (IS_VALLEYVIEW(i915)) {
1792 		gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1793 		gt->ngtier = 1;
1794 	} else if (GRAPHICS_VER(i915) >= 11) {
1795 		gt->gtier[0] =
1796 			intel_uncore_read(uncore,
1797 					  GEN11_RENDER_COPY_INTR_ENABLE);
1798 		gt->gtier[1] =
1799 			intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
1800 		gt->gtier[2] =
1801 			intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
1802 		gt->gtier[3] =
1803 			intel_uncore_read(uncore,
1804 					  GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1805 		gt->gtier[4] =
1806 			intel_uncore_read(uncore,
1807 					  GEN11_CRYPTO_RSVD_INTR_ENABLE);
1808 		gt->gtier[5] =
1809 			intel_uncore_read(uncore,
1810 					  GEN11_GUNIT_CSME_INTR_ENABLE);
1811 		gt->ngtier = 6;
1812 	} else if (GRAPHICS_VER(i915) >= 8) {
1813 		for (i = 0; i < 4; i++)
1814 			gt->gtier[i] =
1815 				intel_uncore_read(uncore, GEN8_GT_IER(i));
1816 		gt->ngtier = 4;
1817 	} else if (HAS_PCH_SPLIT(i915)) {
1818 		gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1819 		gt->ngtier = 1;
1820 	}
1821 
1822 	gt->eir = intel_uncore_read(uncore, EIR);
1823 	gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
1824 }
1825 
1826 /*
1827  * Capture all registers that relate to workload submission.
1828  * NOTE: In GuC submission, when GuC resets an engine, it can dump these for us
1829  */
1830 static void gt_record_global_regs(struct intel_gt_coredump *gt)
1831 {
1832 	struct intel_uncore *uncore = gt->_gt->uncore;
1833 	struct drm_i915_private *i915 = uncore->i915;
1834 	int i;
1835 
1836 	/*
1837 	 * General organization
1838 	 * 1. Registers specific to a single generation
1839 	 * 2. Registers which belong to multiple generations
1840 	 * 3. Feature specific registers.
1841 	 * 4. Everything else
1842 	 * Please try to follow the order.
1843 	 */
1844 
1845 	/* 1: Registers specific to a single generation */
1846 	if (IS_VALLEYVIEW(i915))
1847 		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
1848 
1849 	if (GRAPHICS_VER(i915) == 7)
1850 		gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
1851 
1852 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
1853 		gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
1854 							XEHP_FAULT_TLB_DATA0);
1855 		gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
1856 							XEHP_FAULT_TLB_DATA1);
1857 	} else if (GRAPHICS_VER(i915) >= 12) {
1858 		gt->fault_data0 = intel_uncore_read(uncore,
1859 						    GEN12_FAULT_TLB_DATA0);
1860 		gt->fault_data1 = intel_uncore_read(uncore,
1861 						    GEN12_FAULT_TLB_DATA1);
1862 	} else if (GRAPHICS_VER(i915) >= 8) {
1863 		gt->fault_data0 = intel_uncore_read(uncore,
1864 						    GEN8_FAULT_TLB_DATA0);
1865 		gt->fault_data1 = intel_uncore_read(uncore,
1866 						    GEN8_FAULT_TLB_DATA1);
1867 	}
1868 
1869 	if (GRAPHICS_VER(i915) == 6) {
1870 		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
1871 		gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
1872 		gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
1873 	}
1874 
1875 	/* 2: Registers which belong to multiple generations */
1876 	if (GRAPHICS_VER(i915) >= 7)
1877 		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
1878 
1879 	if (GRAPHICS_VER(i915) >= 6) {
1880 		if (GRAPHICS_VER(i915) < 12) {
1881 			gt->error = intel_uncore_read(uncore, ERROR_GEN6);
1882 			gt->done_reg = intel_uncore_read(uncore, DONE_REG);
1883 		}
1884 	}
1885 
1886 	/* 3: Feature specific registers */
1887 	if (IS_GRAPHICS_VER(i915, 6, 7)) {
1888 		gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1889 		gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
1890 	}
1891 
1892 	if (IS_GRAPHICS_VER(i915, 8, 11))
1893 		gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
1894 
1895 	if (GRAPHICS_VER(i915) == 12)
1896 		gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
1897 
1898 	if (GRAPHICS_VER(i915) >= 12) {
1899 		for (i = 0; i < I915_MAX_SFC; i++) {
1900 			/*
1901 			 * SFC_DONE resides in the VD forcewake domain, so it
1902 			 * only exists if the corresponding VCS engine is
1903 			 * present.
1904 			 */
1905 			if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
1906 			    !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
1907 				continue;
1908 
1909 			gt->sfc_done[i] =
1910 				intel_uncore_read(uncore, GEN12_SFC_DONE(i));
1911 		}
1912 
1913 		gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
1914 	}
1915 }
1916 
1917 static void gt_record_info(struct intel_gt_coredump *gt)
1918 {
1919 	memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info));
1920 	gt->clock_frequency = gt->_gt->clock_frequency;
1921 	gt->clock_period_ns = gt->_gt->clock_period_ns;
1922 }
1923 
1924 /*
1925  * Generate a semi-unique error code. The code is not meant to have meaning, The
1926  * code's only purpose is to try to prevent false duplicated bug reports by
1927  * grossly estimating a GPU error state.
1928  *
1929  * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1930  * the hang if we could strip the GTT offset information from it.
1931  *
1932  * It's only a small step better than a random number in its current form.
1933  */
1934 static u32 generate_ecode(const struct intel_engine_coredump *ee)
1935 {
1936 	/*
1937 	 * IPEHR would be an ideal way to detect errors, as it's the gross
1938 	 * measure of "the command that hung." However, has some very common
1939 	 * synchronization commands which almost always appear in the case
1940 	 * strictly a client bug. Use instdone to differentiate those some.
1941 	 */
1942 	return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1943 }
1944 
1945 static const char *error_msg(struct i915_gpu_coredump *error)
1946 {
1947 	struct intel_engine_coredump *first = NULL;
1948 	unsigned int hung_classes = 0;
1949 	struct intel_gt_coredump *gt;
1950 	int len;
1951 
1952 	for (gt = error->gt; gt; gt = gt->next) {
1953 		struct intel_engine_coredump *cs;
1954 
1955 		for (cs = gt->engine; cs; cs = cs->next) {
1956 			if (cs->hung) {
1957 				hung_classes |= BIT(cs->engine->uabi_class);
1958 				if (!first)
1959 					first = cs;
1960 			}
1961 		}
1962 	}
1963 
1964 	len = scnprintf(error->error_msg, sizeof(error->error_msg),
1965 			"GPU HANG: ecode %d:%x:%08x",
1966 			GRAPHICS_VER(error->i915), hung_classes,
1967 			generate_ecode(first));
1968 	if (first && first->context.pid) {
1969 		/* Just show the first executing process, more is confusing */
1970 		len += scnprintf(error->error_msg + len,
1971 				 sizeof(error->error_msg) - len,
1972 				 ", in %s [%d]",
1973 				 first->context.comm, first->context.pid);
1974 	}
1975 
1976 	return error->error_msg;
1977 }
1978 
1979 static void capture_gen(struct i915_gpu_coredump *error)
1980 {
1981 	struct drm_i915_private *i915 = error->i915;
1982 
1983 	error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1984 	error->suspended = pm_runtime_suspended(i915->drm.dev);
1985 
1986 	error->iommu = i915_vtd_active(i915);
1987 	error->reset_count = i915_reset_count(&i915->gpu_error);
1988 	error->suspend_count = i915->suspend_count;
1989 
1990 	i915_params_copy(&error->params, &i915->params);
1991 	memcpy(&error->device_info,
1992 	       INTEL_INFO(i915),
1993 	       sizeof(error->device_info));
1994 	memcpy(&error->runtime_info,
1995 	       RUNTIME_INFO(i915),
1996 	       sizeof(error->runtime_info));
1997 	memcpy(&error->display_device_info, DISPLAY_INFO(i915),
1998 	       sizeof(error->display_device_info));
1999 	memcpy(&error->display_runtime_info, DISPLAY_RUNTIME_INFO(i915),
2000 	       sizeof(error->display_runtime_info));
2001 	error->driver_caps = i915->caps;
2002 }
2003 
2004 struct i915_gpu_coredump *
2005 i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
2006 {
2007 	struct i915_gpu_coredump *error;
2008 
2009 	if (!i915->params.error_capture)
2010 		return NULL;
2011 
2012 	error = kzalloc(sizeof(*error), gfp);
2013 	if (!error)
2014 		return NULL;
2015 
2016 	kref_init(&error->ref);
2017 	error->i915 = i915;
2018 
2019 	error->time = ktime_get_real();
2020 	error->boottime = ktime_get_boottime();
2021 	error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time);
2022 	error->capture = jiffies;
2023 
2024 	capture_gen(error);
2025 
2026 	return error;
2027 }
2028 
2029 #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
2030 
2031 struct intel_gt_coredump *
2032 intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
2033 {
2034 	struct intel_gt_coredump *gc;
2035 
2036 	gc = kzalloc(sizeof(*gc), gfp);
2037 	if (!gc)
2038 		return NULL;
2039 
2040 	gc->_gt = gt;
2041 	gc->awake = intel_gt_pm_is_awake(gt);
2042 
2043 	gt_record_display_regs(gc);
2044 	gt_record_global_nonguc_regs(gc);
2045 
2046 	/*
2047 	 * GuC dumps global, eng-class and eng-instance registers
2048 	 * (that can change as part of engine state during execution)
2049 	 * before an engine is reset due to a hung context.
2050 	 * GuC captures and reports all three groups of registers
2051 	 * together as a single set before the engine is reset.
2052 	 * Thus, if GuC triggered the context reset we retrieve
2053 	 * the register values as part of gt_record_engines.
2054 	 */
2055 	if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE))
2056 		gt_record_global_regs(gc);
2057 
2058 	gt_record_fences(gc);
2059 
2060 	return gc;
2061 }
2062 
2063 struct i915_vma_compress *
2064 i915_vma_capture_prepare(struct intel_gt_coredump *gt)
2065 {
2066 	struct i915_vma_compress *compress;
2067 
2068 	compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
2069 	if (!compress)
2070 		return NULL;
2071 
2072 	if (!compress_init(compress)) {
2073 		kfree(compress);
2074 		return NULL;
2075 	}
2076 
2077 	return compress;
2078 }
2079 
2080 void i915_vma_capture_finish(struct intel_gt_coredump *gt,
2081 			     struct i915_vma_compress *compress)
2082 {
2083 	if (!compress)
2084 		return;
2085 
2086 	compress_fini(compress);
2087 	kfree(compress);
2088 }
2089 
2090 static struct i915_gpu_coredump *
2091 __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2092 {
2093 	struct drm_i915_private *i915 = gt->i915;
2094 	struct i915_gpu_coredump *error;
2095 
2096 	/* Check if GPU capture has been disabled */
2097 	error = READ_ONCE(i915->gpu_error.first_error);
2098 	if (IS_ERR(error))
2099 		return error;
2100 
2101 	error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
2102 	if (!error)
2103 		return ERR_PTR(-ENOMEM);
2104 
2105 	error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL, dump_flags);
2106 	if (error->gt) {
2107 		struct i915_vma_compress *compress;
2108 
2109 		compress = i915_vma_capture_prepare(error->gt);
2110 		if (!compress) {
2111 			kfree(error->gt);
2112 			kfree(error);
2113 			return ERR_PTR(-ENOMEM);
2114 		}
2115 
2116 		if (INTEL_INFO(i915)->has_gt_uc) {
2117 			error->gt->uc = gt_record_uc(error->gt, compress);
2118 			if (error->gt->uc) {
2119 				if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
2120 					error->gt->uc->guc.is_guc_capture = true;
2121 				else
2122 					GEM_BUG_ON(error->gt->uc->guc.is_guc_capture);
2123 			}
2124 		}
2125 
2126 		gt_record_info(error->gt);
2127 		gt_record_engines(error->gt, engine_mask, compress, dump_flags);
2128 
2129 
2130 		i915_vma_capture_finish(error->gt, compress);
2131 
2132 		error->simulated |= error->gt->simulated;
2133 	}
2134 
2135 	error->overlay = intel_overlay_capture_error_state(i915);
2136 
2137 	return error;
2138 }
2139 
2140 struct i915_gpu_coredump *
2141 i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2142 {
2143 	static DEFINE_MUTEX(capture_mutex);
2144 	int ret = mutex_lock_interruptible(&capture_mutex);
2145 	struct i915_gpu_coredump *dump;
2146 
2147 	if (ret)
2148 		return ERR_PTR(ret);
2149 
2150 	dump = __i915_gpu_coredump(gt, engine_mask, dump_flags);
2151 	mutex_unlock(&capture_mutex);
2152 
2153 	return dump;
2154 }
2155 
2156 void i915_error_state_store(struct i915_gpu_coredump *error)
2157 {
2158 	struct drm_i915_private *i915;
2159 	static bool warned;
2160 
2161 	if (IS_ERR_OR_NULL(error))
2162 		return;
2163 
2164 	i915 = error->i915;
2165 	drm_info(&i915->drm, "%s\n", error_msg(error));
2166 
2167 	if (error->simulated ||
2168 	    cmpxchg(&i915->gpu_error.first_error, NULL, error))
2169 		return;
2170 
2171 	i915_gpu_coredump_get(error);
2172 
2173 	if (!xchg(&warned, true) &&
2174 	    ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
2175 		pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
2176 		pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
2177 		pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
2178 		pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
2179 		pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
2180 		pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
2181 			i915->drm.primary->index);
2182 	}
2183 }
2184 
2185 /**
2186  * i915_capture_error_state - capture an error record for later analysis
2187  * @gt: intel_gt which originated the hang
2188  * @engine_mask: hung engines
2189  * @dump_flags: dump flags
2190  *
2191  * Should be called when an error is detected (either a hang or an error
2192  * interrupt) to capture error state from the time of the error.  Fills
2193  * out a structure which becomes available in debugfs for user level tools
2194  * to pick up.
2195  */
2196 void i915_capture_error_state(struct intel_gt *gt,
2197 			      intel_engine_mask_t engine_mask, u32 dump_flags)
2198 {
2199 	struct i915_gpu_coredump *error;
2200 
2201 	error = i915_gpu_coredump(gt, engine_mask, dump_flags);
2202 	if (IS_ERR(error)) {
2203 		cmpxchg(&gt->i915->gpu_error.first_error, NULL, error);
2204 		return;
2205 	}
2206 
2207 	i915_error_state_store(error);
2208 	i915_gpu_coredump_put(error);
2209 }
2210 
2211 struct i915_gpu_coredump *
2212 i915_first_error_state(struct drm_i915_private *i915)
2213 {
2214 	struct i915_gpu_coredump *error;
2215 
2216 	spin_lock_irq(&i915->gpu_error.lock);
2217 	error = i915->gpu_error.first_error;
2218 	if (!IS_ERR_OR_NULL(error))
2219 		i915_gpu_coredump_get(error);
2220 	spin_unlock_irq(&i915->gpu_error.lock);
2221 
2222 	return error;
2223 }
2224 
2225 void i915_reset_error_state(struct drm_i915_private *i915)
2226 {
2227 	struct i915_gpu_coredump *error;
2228 
2229 	spin_lock_irq(&i915->gpu_error.lock);
2230 	error = i915->gpu_error.first_error;
2231 	if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
2232 		i915->gpu_error.first_error = NULL;
2233 	spin_unlock_irq(&i915->gpu_error.lock);
2234 
2235 	if (!IS_ERR_OR_NULL(error))
2236 		i915_gpu_coredump_put(error);
2237 }
2238 
2239 void i915_disable_error_state(struct drm_i915_private *i915, int err)
2240 {
2241 	spin_lock_irq(&i915->gpu_error.lock);
2242 	if (!i915->gpu_error.first_error)
2243 		i915->gpu_error.first_error = ERR_PTR(err);
2244 	spin_unlock_irq(&i915->gpu_error.lock);
2245 }
2246 
2247 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
2248 void intel_klog_error_capture(struct intel_gt *gt,
2249 			      intel_engine_mask_t engine_mask)
2250 {
2251 	static int g_count;
2252 	struct drm_i915_private *i915 = gt->i915;
2253 	struct i915_gpu_coredump *error;
2254 	intel_wakeref_t wakeref;
2255 	size_t buf_size = PAGE_SIZE * 128;
2256 	size_t pos_err;
2257 	char *buf, *ptr, *next;
2258 	int l_count = g_count++;
2259 	int line = 0;
2260 
2261 	/* Can't allocate memory during a reset */
2262 	if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
2263 		drm_err(&gt->i915->drm, "[Capture/%d.%d] Inside GT reset, skipping error capture :(\n",
2264 			l_count, line++);
2265 		return;
2266 	}
2267 
2268 	error = READ_ONCE(i915->gpu_error.first_error);
2269 	if (error) {
2270 		drm_err(&i915->drm, "[Capture/%d.%d] Clearing existing error capture first...\n",
2271 			l_count, line++);
2272 		i915_reset_error_state(i915);
2273 	}
2274 
2275 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2276 		error = i915_gpu_coredump(gt, engine_mask, CORE_DUMP_FLAG_NONE);
2277 
2278 	if (IS_ERR(error)) {
2279 		drm_err(&i915->drm, "[Capture/%d.%d] Failed to capture error capture: %ld!\n",
2280 			l_count, line++, PTR_ERR(error));
2281 		return;
2282 	}
2283 
2284 	buf = kvmalloc(buf_size, GFP_KERNEL);
2285 	if (!buf) {
2286 		drm_err(&i915->drm, "[Capture/%d.%d] Failed to allocate buffer for error capture!\n",
2287 			l_count, line++);
2288 		i915_gpu_coredump_put(error);
2289 		return;
2290 	}
2291 
2292 	drm_info(&i915->drm, "[Capture/%d.%d] Dumping i915 error capture for %ps...\n",
2293 		 l_count, line++, __builtin_return_address(0));
2294 
2295 	/* Largest string length safe to print via dmesg */
2296 #	define MAX_CHUNK	800
2297 
2298 	pos_err = 0;
2299 	while (1) {
2300 		ssize_t got = i915_gpu_coredump_copy_to_buffer(error, buf, pos_err, buf_size - 1);
2301 
2302 		if (got <= 0)
2303 			break;
2304 
2305 		buf[got] = 0;
2306 		pos_err += got;
2307 
2308 		ptr = buf;
2309 		while (got > 0) {
2310 			size_t count;
2311 			char tag[2];
2312 
2313 			next = strnchr(ptr, got, '\n');
2314 			if (next) {
2315 				count = next - ptr;
2316 				*next = 0;
2317 				tag[0] = '>';
2318 				tag[1] = '<';
2319 			} else {
2320 				count = got;
2321 				tag[0] = '}';
2322 				tag[1] = '{';
2323 			}
2324 
2325 			if (count > MAX_CHUNK) {
2326 				size_t pos;
2327 				char *ptr2 = ptr;
2328 
2329 				for (pos = MAX_CHUNK; pos < count; pos += MAX_CHUNK) {
2330 					char chr = ptr[pos];
2331 
2332 					ptr[pos] = 0;
2333 					drm_info(&i915->drm, "[Capture/%d.%d] }%s{\n",
2334 						 l_count, line++, ptr2);
2335 					ptr[pos] = chr;
2336 					ptr2 = ptr + pos;
2337 
2338 					/*
2339 					 * If spewing large amounts of data via a serial console,
2340 					 * this can be a very slow process. So be friendly and try
2341 					 * not to cause 'softlockup on CPU' problems.
2342 					 */
2343 					cond_resched();
2344 				}
2345 
2346 				if (ptr2 < (ptr + count))
2347 					drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n",
2348 						 l_count, line++, tag[0], ptr2, tag[1]);
2349 				else if (tag[0] == '>')
2350 					drm_info(&i915->drm, "[Capture/%d.%d] ><\n",
2351 						 l_count, line++);
2352 			} else {
2353 				drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n",
2354 					 l_count, line++, tag[0], ptr, tag[1]);
2355 			}
2356 
2357 			ptr = next;
2358 			got -= count;
2359 			if (next) {
2360 				ptr++;
2361 				got--;
2362 			}
2363 
2364 			/* As above. */
2365 			cond_resched();
2366 		}
2367 
2368 		if (got)
2369 			drm_info(&i915->drm, "[Capture/%d.%d] Got %zd bytes remaining!\n",
2370 				 l_count, line++, got);
2371 	}
2372 
2373 	kvfree(buf);
2374 
2375 	drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err);
2376 }
2377 #endif
2378