1 /*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30 #include <linux/ascii85.h>
31 #include <linux/nmi.h>
32 #include <linux/pagevec.h>
33 #include <linux/scatterlist.h>
34 #include <linux/utsname.h>
35 #include <linux/zlib.h>
36
37 #include <drm/drm_print.h>
38
39 #include "display/intel_atomic.h"
40 #include "display/intel_csr.h"
41 #include "display/intel_overlay.h"
42
43 #include "gem/i915_gem_context.h"
44 #include "gem/i915_gem_lmem.h"
45 #include "gt/intel_gt.h"
46 #include "gt/intel_gt_pm.h"
47
48 #include "i915_drv.h"
49 #include "i915_gpu_error.h"
50 #include "i915_memcpy.h"
51 #include "i915_scatterlist.h"
52
53 #define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
54 #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
55
__sg_set_buf(struct scatterlist * sg,void * addr,unsigned int len,loff_t it)56 static void __sg_set_buf(struct scatterlist *sg,
57 void *addr, unsigned int len, loff_t it)
58 {
59 sg->page_link = (unsigned long)virt_to_page(addr);
60 sg->offset = offset_in_page(addr);
61 sg->length = len;
62 sg->dma_address = it;
63 }
64
__i915_error_grow(struct drm_i915_error_state_buf * e,size_t len)65 static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
66 {
67 if (!len)
68 return false;
69
70 if (e->bytes + len + 1 <= e->size)
71 return true;
72
73 if (e->bytes) {
74 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
75 e->iter += e->bytes;
76 e->buf = NULL;
77 e->bytes = 0;
78 }
79
80 if (e->cur == e->end) {
81 struct scatterlist *sgl;
82
83 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
84 if (!sgl) {
85 e->err = -ENOMEM;
86 return false;
87 }
88
89 if (e->cur) {
90 e->cur->offset = 0;
91 e->cur->length = 0;
92 e->cur->page_link =
93 (unsigned long)sgl | SG_CHAIN;
94 } else {
95 e->sgl = sgl;
96 }
97
98 e->cur = sgl;
99 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
100 }
101
102 e->size = ALIGN(len + 1, SZ_64K);
103 e->buf = kmalloc(e->size, ALLOW_FAIL);
104 if (!e->buf) {
105 e->size = PAGE_ALIGN(len + 1);
106 e->buf = kmalloc(e->size, GFP_KERNEL);
107 }
108 if (!e->buf) {
109 e->err = -ENOMEM;
110 return false;
111 }
112
113 return true;
114 }
115
116 __printf(2, 0)
i915_error_vprintf(struct drm_i915_error_state_buf * e,const char * fmt,va_list args)117 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
118 const char *fmt, va_list args)
119 {
120 va_list ap;
121 int len;
122
123 if (e->err)
124 return;
125
126 va_copy(ap, args);
127 len = vsnprintf(NULL, 0, fmt, ap);
128 va_end(ap);
129 if (len <= 0) {
130 e->err = len;
131 return;
132 }
133
134 if (!__i915_error_grow(e, len))
135 return;
136
137 GEM_BUG_ON(e->bytes >= e->size);
138 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
139 if (len < 0) {
140 e->err = len;
141 return;
142 }
143 e->bytes += len;
144 }
145
i915_error_puts(struct drm_i915_error_state_buf * e,const char * str)146 static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
147 {
148 unsigned len;
149
150 if (e->err || !str)
151 return;
152
153 len = strlen(str);
154 if (!__i915_error_grow(e, len))
155 return;
156
157 GEM_BUG_ON(e->bytes + len > e->size);
158 memcpy(e->buf + e->bytes, str, len);
159 e->bytes += len;
160 }
161
162 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
163 #define err_puts(e, s) i915_error_puts(e, s)
164
__i915_printfn_error(struct drm_printer * p,struct va_format * vaf)165 static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
166 {
167 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
168 }
169
170 static inline struct drm_printer
i915_error_printer(struct drm_i915_error_state_buf * e)171 i915_error_printer(struct drm_i915_error_state_buf *e)
172 {
173 struct drm_printer p = {
174 .printfn = __i915_printfn_error,
175 .arg = e,
176 };
177 return p;
178 }
179
180 /* single threaded page allocator with a reserved stash for emergencies */
pool_fini(struct pagevec * pv)181 static void pool_fini(struct pagevec *pv)
182 {
183 pagevec_release(pv);
184 }
185
pool_refill(struct pagevec * pv,gfp_t gfp)186 static int pool_refill(struct pagevec *pv, gfp_t gfp)
187 {
188 while (pagevec_space(pv)) {
189 struct page *p;
190
191 p = alloc_page(gfp);
192 if (!p)
193 return -ENOMEM;
194
195 pagevec_add(pv, p);
196 }
197
198 return 0;
199 }
200
pool_init(struct pagevec * pv,gfp_t gfp)201 static int pool_init(struct pagevec *pv, gfp_t gfp)
202 {
203 int err;
204
205 pagevec_init(pv);
206
207 err = pool_refill(pv, gfp);
208 if (err)
209 pool_fini(pv);
210
211 return err;
212 }
213
pool_alloc(struct pagevec * pv,gfp_t gfp)214 static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
215 {
216 struct page *p;
217
218 p = alloc_page(gfp);
219 if (!p && pagevec_count(pv))
220 p = pv->pages[--pv->nr];
221
222 return p ? page_address(p) : NULL;
223 }
224
pool_free(struct pagevec * pv,void * addr)225 static void pool_free(struct pagevec *pv, void *addr)
226 {
227 struct page *p = virt_to_page(addr);
228
229 if (pagevec_space(pv))
230 pagevec_add(pv, p);
231 else
232 __free_page(p);
233 }
234
235 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
236
237 struct i915_vma_compress {
238 struct pagevec pool;
239 struct z_stream_s zstream;
240 void *tmp;
241 };
242
compress_init(struct i915_vma_compress * c)243 static bool compress_init(struct i915_vma_compress *c)
244 {
245 struct z_stream_s *zstream = &c->zstream;
246
247 if (pool_init(&c->pool, ALLOW_FAIL))
248 return false;
249
250 zstream->workspace =
251 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
252 ALLOW_FAIL);
253 if (!zstream->workspace) {
254 pool_fini(&c->pool);
255 return false;
256 }
257
258 c->tmp = NULL;
259 if (i915_has_memcpy_from_wc())
260 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
261
262 return true;
263 }
264
compress_start(struct i915_vma_compress * c)265 static bool compress_start(struct i915_vma_compress *c)
266 {
267 struct z_stream_s *zstream = &c->zstream;
268 void *workspace = zstream->workspace;
269
270 memset(zstream, 0, sizeof(*zstream));
271 zstream->workspace = workspace;
272
273 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
274 }
275
compress_next_page(struct i915_vma_compress * c,struct i915_vma_coredump * dst)276 static void *compress_next_page(struct i915_vma_compress *c,
277 struct i915_vma_coredump *dst)
278 {
279 void *page;
280
281 if (dst->page_count >= dst->num_pages)
282 return ERR_PTR(-ENOSPC);
283
284 page = pool_alloc(&c->pool, ALLOW_FAIL);
285 if (!page)
286 return ERR_PTR(-ENOMEM);
287
288 return dst->pages[dst->page_count++] = page;
289 }
290
compress_page(struct i915_vma_compress * c,void * src,struct i915_vma_coredump * dst,bool wc)291 static int compress_page(struct i915_vma_compress *c,
292 void *src,
293 struct i915_vma_coredump *dst,
294 bool wc)
295 {
296 struct z_stream_s *zstream = &c->zstream;
297
298 zstream->next_in = src;
299 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
300 zstream->next_in = c->tmp;
301 zstream->avail_in = PAGE_SIZE;
302
303 do {
304 if (zstream->avail_out == 0) {
305 zstream->next_out = compress_next_page(c, dst);
306 if (IS_ERR(zstream->next_out))
307 return PTR_ERR(zstream->next_out);
308
309 zstream->avail_out = PAGE_SIZE;
310 }
311
312 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
313 return -EIO;
314
315 cond_resched();
316 } while (zstream->avail_in);
317
318 /* Fallback to uncompressed if we increase size? */
319 if (0 && zstream->total_out > zstream->total_in)
320 return -E2BIG;
321
322 return 0;
323 }
324
compress_flush(struct i915_vma_compress * c,struct i915_vma_coredump * dst)325 static int compress_flush(struct i915_vma_compress *c,
326 struct i915_vma_coredump *dst)
327 {
328 struct z_stream_s *zstream = &c->zstream;
329
330 do {
331 switch (zlib_deflate(zstream, Z_FINISH)) {
332 case Z_OK: /* more space requested */
333 zstream->next_out = compress_next_page(c, dst);
334 if (IS_ERR(zstream->next_out))
335 return PTR_ERR(zstream->next_out);
336
337 zstream->avail_out = PAGE_SIZE;
338 break;
339
340 case Z_STREAM_END:
341 goto end;
342
343 default: /* any error */
344 return -EIO;
345 }
346 } while (1);
347
348 end:
349 memset(zstream->next_out, 0, zstream->avail_out);
350 dst->unused = zstream->avail_out;
351 return 0;
352 }
353
compress_finish(struct i915_vma_compress * c)354 static void compress_finish(struct i915_vma_compress *c)
355 {
356 zlib_deflateEnd(&c->zstream);
357 }
358
compress_fini(struct i915_vma_compress * c)359 static void compress_fini(struct i915_vma_compress *c)
360 {
361 kfree(c->zstream.workspace);
362 if (c->tmp)
363 pool_free(&c->pool, c->tmp);
364 pool_fini(&c->pool);
365 }
366
err_compression_marker(struct drm_i915_error_state_buf * m)367 static void err_compression_marker(struct drm_i915_error_state_buf *m)
368 {
369 err_puts(m, ":");
370 }
371
372 #else
373
374 struct i915_vma_compress {
375 struct pagevec pool;
376 };
377
compress_init(struct i915_vma_compress * c)378 static bool compress_init(struct i915_vma_compress *c)
379 {
380 return pool_init(&c->pool, ALLOW_FAIL) == 0;
381 }
382
compress_start(struct i915_vma_compress * c)383 static bool compress_start(struct i915_vma_compress *c)
384 {
385 return true;
386 }
387
compress_page(struct i915_vma_compress * c,void * src,struct i915_vma_coredump * dst,bool wc)388 static int compress_page(struct i915_vma_compress *c,
389 void *src,
390 struct i915_vma_coredump *dst,
391 bool wc)
392 {
393 void *ptr;
394
395 ptr = pool_alloc(&c->pool, ALLOW_FAIL);
396 if (!ptr)
397 return -ENOMEM;
398
399 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
400 memcpy(ptr, src, PAGE_SIZE);
401 dst->pages[dst->page_count++] = ptr;
402 cond_resched();
403
404 return 0;
405 }
406
compress_flush(struct i915_vma_compress * c,struct i915_vma_coredump * dst)407 static int compress_flush(struct i915_vma_compress *c,
408 struct i915_vma_coredump *dst)
409 {
410 return 0;
411 }
412
compress_finish(struct i915_vma_compress * c)413 static void compress_finish(struct i915_vma_compress *c)
414 {
415 }
416
compress_fini(struct i915_vma_compress * c)417 static void compress_fini(struct i915_vma_compress *c)
418 {
419 pool_fini(&c->pool);
420 }
421
err_compression_marker(struct drm_i915_error_state_buf * m)422 static void err_compression_marker(struct drm_i915_error_state_buf *m)
423 {
424 err_puts(m, "~");
425 }
426
427 #endif
428
error_print_instdone(struct drm_i915_error_state_buf * m,const struct intel_engine_coredump * ee)429 static void error_print_instdone(struct drm_i915_error_state_buf *m,
430 const struct intel_engine_coredump *ee)
431 {
432 const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
433 int slice;
434 int subslice;
435
436 err_printf(m, " INSTDONE: 0x%08x\n",
437 ee->instdone.instdone);
438
439 if (ee->engine->class != RENDER_CLASS || INTEL_GEN(m->i915) <= 3)
440 return;
441
442 err_printf(m, " SC_INSTDONE: 0x%08x\n",
443 ee->instdone.slice_common);
444
445 if (INTEL_GEN(m->i915) <= 6)
446 return;
447
448 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
449 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
450 slice, subslice,
451 ee->instdone.sampler[slice][subslice]);
452
453 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
454 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
455 slice, subslice,
456 ee->instdone.row[slice][subslice]);
457
458 if (INTEL_GEN(m->i915) < 12)
459 return;
460
461 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
462 ee->instdone.slice_common_extra[0]);
463 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
464 ee->instdone.slice_common_extra[1]);
465 }
466
error_print_request(struct drm_i915_error_state_buf * m,const char * prefix,const struct i915_request_coredump * erq)467 static void error_print_request(struct drm_i915_error_state_buf *m,
468 const char *prefix,
469 const struct i915_request_coredump *erq)
470 {
471 if (!erq->seqno)
472 return;
473
474 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
475 prefix, erq->pid, erq->context, erq->seqno,
476 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
477 &erq->flags) ? "!" : "",
478 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
479 &erq->flags) ? "+" : "",
480 erq->sched_attr.priority,
481 erq->head, erq->tail);
482 }
483
error_print_context(struct drm_i915_error_state_buf * m,const char * header,const struct i915_gem_context_coredump * ctx)484 static void error_print_context(struct drm_i915_error_state_buf *m,
485 const char *header,
486 const struct i915_gem_context_coredump *ctx)
487 {
488 const u32 period = m->i915->gt.clock_period_ns;
489
490 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
491 header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
492 ctx->guilty, ctx->active,
493 ctx->total_runtime * period,
494 mul_u32_u32(ctx->avg_runtime, period));
495 }
496
497 static struct i915_vma_coredump *
__find_vma(struct i915_vma_coredump * vma,const char * name)498 __find_vma(struct i915_vma_coredump *vma, const char *name)
499 {
500 while (vma) {
501 if (strcmp(vma->name, name) == 0)
502 return vma;
503 vma = vma->next;
504 }
505
506 return NULL;
507 }
508
509 static struct i915_vma_coredump *
find_batch(const struct intel_engine_coredump * ee)510 find_batch(const struct intel_engine_coredump *ee)
511 {
512 return __find_vma(ee->vma, "batch");
513 }
514
error_print_engine(struct drm_i915_error_state_buf * m,const struct intel_engine_coredump * ee)515 static void error_print_engine(struct drm_i915_error_state_buf *m,
516 const struct intel_engine_coredump *ee)
517 {
518 struct i915_vma_coredump *batch;
519 int n;
520
521 err_printf(m, "%s command stream:\n", ee->engine->name);
522 err_printf(m, " CCID: 0x%08x\n", ee->ccid);
523 err_printf(m, " START: 0x%08x\n", ee->start);
524 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
525 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
526 ee->tail, ee->rq_post, ee->rq_tail);
527 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
528 err_printf(m, " MODE: 0x%08x\n", ee->mode);
529 err_printf(m, " HWS: 0x%08x\n", ee->hws);
530 err_printf(m, " ACTHD: 0x%08x %08x\n",
531 (u32)(ee->acthd>>32), (u32)ee->acthd);
532 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
533 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
534 err_printf(m, " ESR: 0x%08x\n", ee->esr);
535
536 error_print_instdone(m, ee);
537
538 batch = find_batch(ee);
539 if (batch) {
540 u64 start = batch->gtt_offset;
541 u64 end = start + batch->gtt_size;
542
543 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
544 upper_32_bits(start), lower_32_bits(start),
545 upper_32_bits(end), lower_32_bits(end));
546 }
547 if (INTEL_GEN(m->i915) >= 4) {
548 err_printf(m, " BBADDR: 0x%08x_%08x\n",
549 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
550 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
551 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
552 }
553 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
554 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
555 lower_32_bits(ee->faddr));
556 if (INTEL_GEN(m->i915) >= 6) {
557 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
558 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
559 }
560 if (HAS_PPGTT(m->i915)) {
561 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
562
563 if (INTEL_GEN(m->i915) >= 8) {
564 int i;
565 for (i = 0; i < 4; i++)
566 err_printf(m, " PDP%d: 0x%016llx\n",
567 i, ee->vm_info.pdp[i]);
568 } else {
569 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
570 ee->vm_info.pp_dir_base);
571 }
572 }
573 err_printf(m, " hung: %u\n", ee->hung);
574 err_printf(m, " engine reset count: %u\n", ee->reset_count);
575
576 for (n = 0; n < ee->num_ports; n++) {
577 err_printf(m, " ELSP[%d]:", n);
578 error_print_request(m, " ", &ee->execlist[n]);
579 }
580
581 error_print_context(m, " Active context: ", &ee->context);
582 }
583
i915_error_printf(struct drm_i915_error_state_buf * e,const char * f,...)584 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
585 {
586 va_list args;
587
588 va_start(args, f);
589 i915_error_vprintf(e, f, args);
590 va_end(args);
591 }
592
print_error_vma(struct drm_i915_error_state_buf * m,const struct intel_engine_cs * engine,const struct i915_vma_coredump * vma)593 static void print_error_vma(struct drm_i915_error_state_buf *m,
594 const struct intel_engine_cs *engine,
595 const struct i915_vma_coredump *vma)
596 {
597 char out[ASCII85_BUFSZ];
598 int page;
599
600 if (!vma)
601 return;
602
603 err_printf(m, "%s --- %s = 0x%08x %08x\n",
604 engine ? engine->name : "global", vma->name,
605 upper_32_bits(vma->gtt_offset),
606 lower_32_bits(vma->gtt_offset));
607
608 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
609 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
610
611 err_compression_marker(m);
612 for (page = 0; page < vma->page_count; page++) {
613 int i, len;
614
615 len = PAGE_SIZE;
616 if (page == vma->page_count - 1)
617 len -= vma->unused;
618 len = ascii85_encode_len(len);
619
620 for (i = 0; i < len; i++)
621 err_puts(m, ascii85_encode(vma->pages[page][i], out));
622 }
623 err_puts(m, "\n");
624 }
625
err_print_capabilities(struct drm_i915_error_state_buf * m,struct i915_gpu_coredump * error)626 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
627 struct i915_gpu_coredump *error)
628 {
629 struct drm_printer p = i915_error_printer(m);
630
631 intel_device_info_print_static(&error->device_info, &p);
632 intel_device_info_print_runtime(&error->runtime_info, &p);
633 intel_driver_caps_print(&error->driver_caps, &p);
634 }
635
err_print_params(struct drm_i915_error_state_buf * m,const struct i915_params * params)636 static void err_print_params(struct drm_i915_error_state_buf *m,
637 const struct i915_params *params)
638 {
639 struct drm_printer p = i915_error_printer(m);
640
641 i915_params_dump(params, &p);
642 }
643
err_print_pciid(struct drm_i915_error_state_buf * m,struct drm_i915_private * i915)644 static void err_print_pciid(struct drm_i915_error_state_buf *m,
645 struct drm_i915_private *i915)
646 {
647 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
648
649 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
650 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
651 err_printf(m, "PCI Subsystem: %04x:%04x\n",
652 pdev->subsystem_vendor,
653 pdev->subsystem_device);
654 }
655
err_print_uc(struct drm_i915_error_state_buf * m,const struct intel_uc_coredump * error_uc)656 static void err_print_uc(struct drm_i915_error_state_buf *m,
657 const struct intel_uc_coredump *error_uc)
658 {
659 struct drm_printer p = i915_error_printer(m);
660
661 intel_uc_fw_dump(&error_uc->guc_fw, &p);
662 intel_uc_fw_dump(&error_uc->huc_fw, &p);
663 print_error_vma(m, NULL, error_uc->guc_log);
664 }
665
err_free_sgl(struct scatterlist * sgl)666 static void err_free_sgl(struct scatterlist *sgl)
667 {
668 while (sgl) {
669 struct scatterlist *sg;
670
671 for (sg = sgl; !sg_is_chain(sg); sg++) {
672 kfree(sg_virt(sg));
673 if (sg_is_last(sg))
674 break;
675 }
676
677 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
678 free_page((unsigned long)sgl);
679 sgl = sg;
680 }
681 }
682
err_print_gt_info(struct drm_i915_error_state_buf * m,struct intel_gt_coredump * gt)683 static void err_print_gt_info(struct drm_i915_error_state_buf *m,
684 struct intel_gt_coredump *gt)
685 {
686 struct drm_printer p = i915_error_printer(m);
687
688 intel_gt_info_print(>->info, &p);
689 intel_sseu_print_topology(>->info.sseu, &p);
690 }
691
err_print_gt(struct drm_i915_error_state_buf * m,struct intel_gt_coredump * gt)692 static void err_print_gt(struct drm_i915_error_state_buf *m,
693 struct intel_gt_coredump *gt)
694 {
695 const struct intel_engine_coredump *ee;
696 int i;
697
698 err_printf(m, "GT awake: %s\n", yesno(gt->awake));
699 err_printf(m, "EIR: 0x%08x\n", gt->eir);
700 err_printf(m, "IER: 0x%08x\n", gt->ier);
701 for (i = 0; i < gt->ngtier; i++)
702 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
703 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
704 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
705 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
706
707 for (i = 0; i < gt->nfence; i++)
708 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
709
710 if (IS_GEN_RANGE(m->i915, 6, 11)) {
711 err_printf(m, "ERROR: 0x%08x\n", gt->error);
712 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
713 }
714
715 if (INTEL_GEN(m->i915) >= 8)
716 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
717 gt->fault_data1, gt->fault_data0);
718
719 if (IS_GEN(m->i915, 7))
720 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
721
722 if (IS_GEN_RANGE(m->i915, 8, 11))
723 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
724
725 if (IS_GEN(m->i915, 12))
726 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
727
728 if (INTEL_GEN(m->i915) >= 12) {
729 int i;
730
731 for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
732 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
733 gt->sfc_done[i]);
734
735 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
736 }
737
738 for (ee = gt->engine; ee; ee = ee->next) {
739 const struct i915_vma_coredump *vma;
740
741 error_print_engine(m, ee);
742 for (vma = ee->vma; vma; vma = vma->next)
743 print_error_vma(m, ee->engine, vma);
744 }
745
746 if (gt->uc)
747 err_print_uc(m, gt->uc);
748
749 err_print_gt_info(m, gt);
750 }
751
__err_print_to_sgl(struct drm_i915_error_state_buf * m,struct i915_gpu_coredump * error)752 static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
753 struct i915_gpu_coredump *error)
754 {
755 const struct intel_engine_coredump *ee;
756 struct timespec64 ts;
757
758 if (*error->error_msg)
759 err_printf(m, "%s\n", error->error_msg);
760 err_printf(m, "Kernel: %s %s\n",
761 init_utsname()->release,
762 init_utsname()->machine);
763 err_printf(m, "Driver: %s\n", DRIVER_DATE);
764 ts = ktime_to_timespec64(error->time);
765 err_printf(m, "Time: %lld s %ld us\n",
766 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
767 ts = ktime_to_timespec64(error->boottime);
768 err_printf(m, "Boottime: %lld s %ld us\n",
769 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
770 ts = ktime_to_timespec64(error->uptime);
771 err_printf(m, "Uptime: %lld s %ld us\n",
772 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
773 err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
774 error->capture, jiffies_to_msecs(jiffies - error->capture));
775
776 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
777 err_printf(m, "Active process (on ring %s): %s [%d]\n",
778 ee->engine->name,
779 ee->context.comm,
780 ee->context.pid);
781
782 err_printf(m, "Reset count: %u\n", error->reset_count);
783 err_printf(m, "Suspend count: %u\n", error->suspend_count);
784 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
785 err_printf(m, "Subplatform: 0x%x\n",
786 intel_subplatform(&error->runtime_info,
787 error->device_info.platform));
788 err_print_pciid(m, m->i915);
789
790 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
791
792 if (HAS_CSR(m->i915)) {
793 struct intel_csr *csr = &m->i915->csr;
794
795 err_printf(m, "DMC loaded: %s\n",
796 yesno(csr->dmc_payload != NULL));
797 err_printf(m, "DMC fw version: %d.%d\n",
798 CSR_VERSION_MAJOR(csr->version),
799 CSR_VERSION_MINOR(csr->version));
800 }
801
802 err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
803 err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
804
805 if (error->gt)
806 err_print_gt(m, error->gt);
807
808 if (error->overlay)
809 intel_overlay_print_error_state(m, error->overlay);
810
811 if (error->display)
812 intel_display_print_error_state(m, error->display);
813
814 err_print_capabilities(m, error);
815 err_print_params(m, &error->params);
816 }
817
err_print_to_sgl(struct i915_gpu_coredump * error)818 static int err_print_to_sgl(struct i915_gpu_coredump *error)
819 {
820 struct drm_i915_error_state_buf m;
821
822 if (IS_ERR(error))
823 return PTR_ERR(error);
824
825 if (READ_ONCE(error->sgl))
826 return 0;
827
828 memset(&m, 0, sizeof(m));
829 m.i915 = error->i915;
830
831 __err_print_to_sgl(&m, error);
832
833 if (m.buf) {
834 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
835 m.bytes = 0;
836 m.buf = NULL;
837 }
838 if (m.cur) {
839 GEM_BUG_ON(m.end < m.cur);
840 sg_mark_end(m.cur - 1);
841 }
842 GEM_BUG_ON(m.sgl && !m.cur);
843
844 if (m.err) {
845 err_free_sgl(m.sgl);
846 return m.err;
847 }
848
849 if (cmpxchg(&error->sgl, NULL, m.sgl))
850 err_free_sgl(m.sgl);
851
852 return 0;
853 }
854
i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump * error,char * buf,loff_t off,size_t rem)855 ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
856 char *buf, loff_t off, size_t rem)
857 {
858 struct scatterlist *sg;
859 size_t count;
860 loff_t pos;
861 int err;
862
863 if (!error || !rem)
864 return 0;
865
866 err = err_print_to_sgl(error);
867 if (err)
868 return err;
869
870 sg = READ_ONCE(error->fit);
871 if (!sg || off < sg->dma_address)
872 sg = error->sgl;
873 if (!sg)
874 return 0;
875
876 pos = sg->dma_address;
877 count = 0;
878 do {
879 size_t len, start;
880
881 if (sg_is_chain(sg)) {
882 sg = sg_chain_ptr(sg);
883 GEM_BUG_ON(sg_is_chain(sg));
884 }
885
886 len = sg->length;
887 if (pos + len <= off) {
888 pos += len;
889 continue;
890 }
891
892 start = sg->offset;
893 if (pos < off) {
894 GEM_BUG_ON(off - pos > len);
895 len -= off - pos;
896 start += off - pos;
897 pos = off;
898 }
899
900 len = min(len, rem);
901 GEM_BUG_ON(!len || len > sg->length);
902
903 memcpy(buf, page_address(sg_page(sg)) + start, len);
904
905 count += len;
906 pos += len;
907
908 buf += len;
909 rem -= len;
910 if (!rem) {
911 WRITE_ONCE(error->fit, sg);
912 break;
913 }
914 } while (!sg_is_last(sg++));
915
916 return count;
917 }
918
i915_vma_coredump_free(struct i915_vma_coredump * vma)919 static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
920 {
921 while (vma) {
922 struct i915_vma_coredump *next = vma->next;
923 int page;
924
925 for (page = 0; page < vma->page_count; page++)
926 free_page((unsigned long)vma->pages[page]);
927
928 kfree(vma);
929 vma = next;
930 }
931 }
932
cleanup_params(struct i915_gpu_coredump * error)933 static void cleanup_params(struct i915_gpu_coredump *error)
934 {
935 i915_params_free(&error->params);
936 }
937
cleanup_uc(struct intel_uc_coredump * uc)938 static void cleanup_uc(struct intel_uc_coredump *uc)
939 {
940 kfree(uc->guc_fw.path);
941 kfree(uc->huc_fw.path);
942 i915_vma_coredump_free(uc->guc_log);
943
944 kfree(uc);
945 }
946
cleanup_gt(struct intel_gt_coredump * gt)947 static void cleanup_gt(struct intel_gt_coredump *gt)
948 {
949 while (gt->engine) {
950 struct intel_engine_coredump *ee = gt->engine;
951
952 gt->engine = ee->next;
953
954 i915_vma_coredump_free(ee->vma);
955 kfree(ee);
956 }
957
958 if (gt->uc)
959 cleanup_uc(gt->uc);
960
961 kfree(gt);
962 }
963
__i915_gpu_coredump_free(struct kref * error_ref)964 void __i915_gpu_coredump_free(struct kref *error_ref)
965 {
966 struct i915_gpu_coredump *error =
967 container_of(error_ref, typeof(*error), ref);
968
969 while (error->gt) {
970 struct intel_gt_coredump *gt = error->gt;
971
972 error->gt = gt->next;
973 cleanup_gt(gt);
974 }
975
976 kfree(error->overlay);
977 kfree(error->display);
978
979 cleanup_params(error);
980
981 err_free_sgl(error->sgl);
982 kfree(error);
983 }
984
985 static struct i915_vma_coredump *
i915_vma_coredump_create(const struct intel_gt * gt,const struct i915_vma * vma,const char * name,struct i915_vma_compress * compress)986 i915_vma_coredump_create(const struct intel_gt *gt,
987 const struct i915_vma *vma,
988 const char *name,
989 struct i915_vma_compress *compress)
990 {
991 struct i915_ggtt *ggtt = gt->ggtt;
992 const u64 slot = ggtt->error_capture.start;
993 struct i915_vma_coredump *dst;
994 unsigned long num_pages;
995 struct sgt_iter iter;
996 int ret;
997
998 might_sleep();
999
1000 if (!vma || !vma->pages || !compress)
1001 return NULL;
1002
1003 num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
1004 num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
1005 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
1006 if (!dst)
1007 return NULL;
1008
1009 if (!compress_start(compress)) {
1010 kfree(dst);
1011 return NULL;
1012 }
1013
1014 strcpy(dst->name, name);
1015 dst->next = NULL;
1016
1017 dst->gtt_offset = vma->node.start;
1018 dst->gtt_size = vma->node.size;
1019 dst->gtt_page_sizes = vma->page_sizes.gtt;
1020 dst->num_pages = num_pages;
1021 dst->page_count = 0;
1022 dst->unused = 0;
1023
1024 ret = -EINVAL;
1025 if (drm_mm_node_allocated(&ggtt->error_capture)) {
1026 void __iomem *s;
1027 dma_addr_t dma;
1028
1029 for_each_sgt_daddr(dma, iter, vma->pages) {
1030 mutex_lock(&ggtt->error_mutex);
1031 ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1032 I915_CACHE_NONE, 0);
1033 mb();
1034
1035 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
1036 ret = compress_page(compress,
1037 (void __force *)s, dst,
1038 true);
1039 io_mapping_unmap(s);
1040
1041 mb();
1042 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1043 mutex_unlock(&ggtt->error_mutex);
1044 if (ret)
1045 break;
1046 }
1047 } else if (i915_gem_object_is_lmem(vma->obj)) {
1048 struct intel_memory_region *mem = vma->obj->mm.region;
1049 dma_addr_t dma;
1050
1051 for_each_sgt_daddr(dma, iter, vma->pages) {
1052 void __iomem *s;
1053
1054 s = io_mapping_map_wc(&mem->iomap,
1055 dma - mem->region.start,
1056 PAGE_SIZE);
1057 ret = compress_page(compress,
1058 (void __force *)s, dst,
1059 true);
1060 io_mapping_unmap(s);
1061 if (ret)
1062 break;
1063 }
1064 } else {
1065 struct page *page;
1066
1067 for_each_sgt_page(page, iter, vma->pages) {
1068 void *s;
1069
1070 drm_clflush_pages(&page, 1);
1071
1072 s = kmap(page);
1073 ret = compress_page(compress, s, dst, false);
1074 kunmap(page);
1075
1076 drm_clflush_pages(&page, 1);
1077
1078 if (ret)
1079 break;
1080 }
1081 }
1082
1083 if (ret || compress_flush(compress, dst)) {
1084 while (dst->page_count--)
1085 pool_free(&compress->pool, dst->pages[dst->page_count]);
1086 kfree(dst);
1087 dst = NULL;
1088 }
1089 compress_finish(compress);
1090
1091 return dst;
1092 }
1093
gt_record_fences(struct intel_gt_coredump * gt)1094 static void gt_record_fences(struct intel_gt_coredump *gt)
1095 {
1096 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1097 struct intel_uncore *uncore = gt->_gt->uncore;
1098 int i;
1099
1100 if (INTEL_GEN(uncore->i915) >= 6) {
1101 for (i = 0; i < ggtt->num_fences; i++)
1102 gt->fence[i] =
1103 intel_uncore_read64(uncore,
1104 FENCE_REG_GEN6_LO(i));
1105 } else if (INTEL_GEN(uncore->i915) >= 4) {
1106 for (i = 0; i < ggtt->num_fences; i++)
1107 gt->fence[i] =
1108 intel_uncore_read64(uncore,
1109 FENCE_REG_965_LO(i));
1110 } else {
1111 for (i = 0; i < ggtt->num_fences; i++)
1112 gt->fence[i] =
1113 intel_uncore_read(uncore, FENCE_REG(i));
1114 }
1115 gt->nfence = i;
1116 }
1117
engine_record_registers(struct intel_engine_coredump * ee)1118 static void engine_record_registers(struct intel_engine_coredump *ee)
1119 {
1120 const struct intel_engine_cs *engine = ee->engine;
1121 struct drm_i915_private *i915 = engine->i915;
1122
1123 if (INTEL_GEN(i915) >= 6) {
1124 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
1125
1126 if (INTEL_GEN(i915) >= 12)
1127 ee->fault_reg = intel_uncore_read(engine->uncore,
1128 GEN12_RING_FAULT_REG);
1129 else if (INTEL_GEN(i915) >= 8)
1130 ee->fault_reg = intel_uncore_read(engine->uncore,
1131 GEN8_RING_FAULT_REG);
1132 else
1133 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
1134 }
1135
1136 if (INTEL_GEN(i915) >= 4) {
1137 ee->esr = ENGINE_READ(engine, RING_ESR);
1138 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1139 ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1140 ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1141 ee->instps = ENGINE_READ(engine, RING_INSTPS);
1142 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
1143 ee->ccid = ENGINE_READ(engine, CCID);
1144 if (INTEL_GEN(i915) >= 8) {
1145 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1146 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
1147 }
1148 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
1149 } else {
1150 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1151 ee->ipeir = ENGINE_READ(engine, IPEIR);
1152 ee->ipehr = ENGINE_READ(engine, IPEHR);
1153 }
1154
1155 intel_engine_get_instdone(engine, &ee->instdone);
1156
1157 ee->instpm = ENGINE_READ(engine, RING_INSTPM);
1158 ee->acthd = intel_engine_get_active_head(engine);
1159 ee->start = ENGINE_READ(engine, RING_START);
1160 ee->head = ENGINE_READ(engine, RING_HEAD);
1161 ee->tail = ENGINE_READ(engine, RING_TAIL);
1162 ee->ctl = ENGINE_READ(engine, RING_CTL);
1163 if (INTEL_GEN(i915) > 2)
1164 ee->mode = ENGINE_READ(engine, RING_MI_MODE);
1165
1166 if (!HWS_NEEDS_PHYSICAL(i915)) {
1167 i915_reg_t mmio;
1168
1169 if (IS_GEN(i915, 7)) {
1170 switch (engine->id) {
1171 default:
1172 MISSING_CASE(engine->id);
1173 fallthrough;
1174 case RCS0:
1175 mmio = RENDER_HWS_PGA_GEN7;
1176 break;
1177 case BCS0:
1178 mmio = BLT_HWS_PGA_GEN7;
1179 break;
1180 case VCS0:
1181 mmio = BSD_HWS_PGA_GEN7;
1182 break;
1183 case VECS0:
1184 mmio = VEBOX_HWS_PGA_GEN7;
1185 break;
1186 }
1187 } else if (IS_GEN(engine->i915, 6)) {
1188 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1189 } else {
1190 /* XXX: gen8 returns to sanity */
1191 mmio = RING_HWS_PGA(engine->mmio_base);
1192 }
1193
1194 ee->hws = intel_uncore_read(engine->uncore, mmio);
1195 }
1196
1197 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1198
1199 if (HAS_PPGTT(i915)) {
1200 int i;
1201
1202 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
1203
1204 if (IS_GEN(i915, 6)) {
1205 ee->vm_info.pp_dir_base =
1206 ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
1207 } else if (IS_GEN(i915, 7)) {
1208 ee->vm_info.pp_dir_base =
1209 ENGINE_READ(engine, RING_PP_DIR_BASE);
1210 } else if (INTEL_GEN(i915) >= 8) {
1211 u32 base = engine->mmio_base;
1212
1213 for (i = 0; i < 4; i++) {
1214 ee->vm_info.pdp[i] =
1215 intel_uncore_read(engine->uncore,
1216 GEN8_RING_PDP_UDW(base, i));
1217 ee->vm_info.pdp[i] <<= 32;
1218 ee->vm_info.pdp[i] |=
1219 intel_uncore_read(engine->uncore,
1220 GEN8_RING_PDP_LDW(base, i));
1221 }
1222 }
1223 }
1224 }
1225
record_request(const struct i915_request * request,struct i915_request_coredump * erq)1226 static void record_request(const struct i915_request *request,
1227 struct i915_request_coredump *erq)
1228 {
1229 erq->flags = request->fence.flags;
1230 erq->context = request->fence.context;
1231 erq->seqno = request->fence.seqno;
1232 erq->sched_attr = request->sched.attr;
1233 erq->head = request->head;
1234 erq->tail = request->tail;
1235
1236 erq->pid = 0;
1237 rcu_read_lock();
1238 if (!intel_context_is_closed(request->context)) {
1239 const struct i915_gem_context *ctx;
1240
1241 ctx = rcu_dereference(request->context->gem_context);
1242 if (ctx)
1243 erq->pid = pid_nr(ctx->pid);
1244 }
1245 rcu_read_unlock();
1246 }
1247
engine_record_execlists(struct intel_engine_coredump * ee)1248 static void engine_record_execlists(struct intel_engine_coredump *ee)
1249 {
1250 const struct intel_engine_execlists * const el = &ee->engine->execlists;
1251 struct i915_request * const *port = el->active;
1252 unsigned int n = 0;
1253
1254 while (*port)
1255 record_request(*port++, &ee->execlist[n++]);
1256
1257 ee->num_ports = n;
1258 }
1259
record_context(struct i915_gem_context_coredump * e,const struct i915_request * rq)1260 static bool record_context(struct i915_gem_context_coredump *e,
1261 const struct i915_request *rq)
1262 {
1263 struct i915_gem_context *ctx;
1264 struct task_struct *task;
1265 bool simulated;
1266
1267 rcu_read_lock();
1268 ctx = rcu_dereference(rq->context->gem_context);
1269 if (ctx && !kref_get_unless_zero(&ctx->ref))
1270 ctx = NULL;
1271 rcu_read_unlock();
1272 if (!ctx)
1273 return true;
1274
1275 rcu_read_lock();
1276 task = pid_task(ctx->pid, PIDTYPE_PID);
1277 if (task) {
1278 strcpy(e->comm, task->comm);
1279 e->pid = task->pid;
1280 }
1281 rcu_read_unlock();
1282
1283 e->sched_attr = ctx->sched;
1284 e->guilty = atomic_read(&ctx->guilty_count);
1285 e->active = atomic_read(&ctx->active_count);
1286
1287 e->total_runtime = rq->context->runtime.total;
1288 e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg);
1289
1290 simulated = i915_gem_context_no_error_capture(ctx);
1291
1292 i915_gem_context_put(ctx);
1293 return simulated;
1294 }
1295
1296 struct intel_engine_capture_vma {
1297 struct intel_engine_capture_vma *next;
1298 struct i915_vma *vma;
1299 char name[16];
1300 };
1301
1302 static struct intel_engine_capture_vma *
capture_vma(struct intel_engine_capture_vma * next,struct i915_vma * vma,const char * name,gfp_t gfp)1303 capture_vma(struct intel_engine_capture_vma *next,
1304 struct i915_vma *vma,
1305 const char *name,
1306 gfp_t gfp)
1307 {
1308 struct intel_engine_capture_vma *c;
1309
1310 if (!vma)
1311 return next;
1312
1313 c = kmalloc(sizeof(*c), gfp);
1314 if (!c)
1315 return next;
1316
1317 if (!i915_active_acquire_if_busy(&vma->active)) {
1318 kfree(c);
1319 return next;
1320 }
1321
1322 strcpy(c->name, name);
1323 c->vma = vma; /* reference held while active */
1324
1325 c->next = next;
1326 return c;
1327 }
1328
1329 static struct intel_engine_capture_vma *
capture_user(struct intel_engine_capture_vma * capture,const struct i915_request * rq,gfp_t gfp)1330 capture_user(struct intel_engine_capture_vma *capture,
1331 const struct i915_request *rq,
1332 gfp_t gfp)
1333 {
1334 struct i915_capture_list *c;
1335
1336 for (c = rq->capture_list; c; c = c->next)
1337 capture = capture_vma(capture, c->vma, "user", gfp);
1338
1339 return capture;
1340 }
1341
add_vma(struct intel_engine_coredump * ee,struct i915_vma_coredump * vma)1342 static void add_vma(struct intel_engine_coredump *ee,
1343 struct i915_vma_coredump *vma)
1344 {
1345 if (vma) {
1346 vma->next = ee->vma;
1347 ee->vma = vma;
1348 }
1349 }
1350
1351 struct intel_engine_coredump *
intel_engine_coredump_alloc(struct intel_engine_cs * engine,gfp_t gfp)1352 intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
1353 {
1354 struct intel_engine_coredump *ee;
1355
1356 ee = kzalloc(sizeof(*ee), gfp);
1357 if (!ee)
1358 return NULL;
1359
1360 ee->engine = engine;
1361
1362 engine_record_registers(ee);
1363 engine_record_execlists(ee);
1364
1365 return ee;
1366 }
1367
1368 struct intel_engine_capture_vma *
intel_engine_coredump_add_request(struct intel_engine_coredump * ee,struct i915_request * rq,gfp_t gfp)1369 intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1370 struct i915_request *rq,
1371 gfp_t gfp)
1372 {
1373 struct intel_engine_capture_vma *vma = NULL;
1374
1375 ee->simulated |= record_context(&ee->context, rq);
1376 if (ee->simulated)
1377 return NULL;
1378
1379 /*
1380 * We need to copy these to an anonymous buffer
1381 * as the simplest method to avoid being overwritten
1382 * by userspace.
1383 */
1384 vma = capture_vma(vma, rq->batch, "batch", gfp);
1385 vma = capture_user(vma, rq, gfp);
1386 vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
1387 vma = capture_vma(vma, rq->context->state, "HW context", gfp);
1388
1389 ee->rq_head = rq->head;
1390 ee->rq_post = rq->postfix;
1391 ee->rq_tail = rq->tail;
1392
1393 return vma;
1394 }
1395
1396 void
intel_engine_coredump_add_vma(struct intel_engine_coredump * ee,struct intel_engine_capture_vma * capture,struct i915_vma_compress * compress)1397 intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1398 struct intel_engine_capture_vma *capture,
1399 struct i915_vma_compress *compress)
1400 {
1401 const struct intel_engine_cs *engine = ee->engine;
1402
1403 while (capture) {
1404 struct intel_engine_capture_vma *this = capture;
1405 struct i915_vma *vma = this->vma;
1406
1407 add_vma(ee,
1408 i915_vma_coredump_create(engine->gt,
1409 vma, this->name,
1410 compress));
1411
1412 i915_active_release(&vma->active);
1413
1414 capture = this->next;
1415 kfree(this);
1416 }
1417
1418 add_vma(ee,
1419 i915_vma_coredump_create(engine->gt,
1420 engine->status_page.vma,
1421 "HW Status",
1422 compress));
1423
1424 add_vma(ee,
1425 i915_vma_coredump_create(engine->gt,
1426 engine->wa_ctx.vma,
1427 "WA context",
1428 compress));
1429 }
1430
1431 static struct intel_engine_coredump *
capture_engine(struct intel_engine_cs * engine,struct i915_vma_compress * compress)1432 capture_engine(struct intel_engine_cs *engine,
1433 struct i915_vma_compress *compress)
1434 {
1435 struct intel_engine_capture_vma *capture = NULL;
1436 struct intel_engine_coredump *ee;
1437 struct i915_request *rq;
1438 unsigned long flags;
1439
1440 ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
1441 if (!ee)
1442 return NULL;
1443
1444 spin_lock_irqsave(&engine->active.lock, flags);
1445 rq = intel_engine_find_active_request(engine);
1446 if (rq)
1447 capture = intel_engine_coredump_add_request(ee, rq,
1448 ATOMIC_MAYFAIL);
1449 spin_unlock_irqrestore(&engine->active.lock, flags);
1450 if (!capture) {
1451 kfree(ee);
1452 return NULL;
1453 }
1454
1455 intel_engine_coredump_add_vma(ee, capture, compress);
1456
1457 return ee;
1458 }
1459
1460 static void
gt_record_engines(struct intel_gt_coredump * gt,intel_engine_mask_t engine_mask,struct i915_vma_compress * compress)1461 gt_record_engines(struct intel_gt_coredump *gt,
1462 intel_engine_mask_t engine_mask,
1463 struct i915_vma_compress *compress)
1464 {
1465 struct intel_engine_cs *engine;
1466 enum intel_engine_id id;
1467
1468 for_each_engine(engine, gt->_gt, id) {
1469 struct intel_engine_coredump *ee;
1470
1471 /* Refill our page pool before entering atomic section */
1472 pool_refill(&compress->pool, ALLOW_FAIL);
1473
1474 ee = capture_engine(engine, compress);
1475 if (!ee)
1476 continue;
1477
1478 ee->hung = engine->mask & engine_mask;
1479
1480 gt->simulated |= ee->simulated;
1481 if (ee->simulated) {
1482 kfree(ee);
1483 continue;
1484 }
1485
1486 ee->next = gt->engine;
1487 gt->engine = ee;
1488 }
1489 }
1490
1491 static struct intel_uc_coredump *
gt_record_uc(struct intel_gt_coredump * gt,struct i915_vma_compress * compress)1492 gt_record_uc(struct intel_gt_coredump *gt,
1493 struct i915_vma_compress *compress)
1494 {
1495 const struct intel_uc *uc = >->_gt->uc;
1496 struct intel_uc_coredump *error_uc;
1497
1498 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
1499 if (!error_uc)
1500 return NULL;
1501
1502 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
1503 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
1504
1505 /* Non-default firmware paths will be specified by the modparam.
1506 * As modparams are generally accesible from the userspace make
1507 * explicit copies of the firmware paths.
1508 */
1509 error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
1510 error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
1511 error_uc->guc_log =
1512 i915_vma_coredump_create(gt->_gt,
1513 uc->guc.log.vma, "GuC log buffer",
1514 compress);
1515
1516 return error_uc;
1517 }
1518
1519 /* Capture all registers which don't fit into another category. */
gt_record_regs(struct intel_gt_coredump * gt)1520 static void gt_record_regs(struct intel_gt_coredump *gt)
1521 {
1522 struct intel_uncore *uncore = gt->_gt->uncore;
1523 struct drm_i915_private *i915 = uncore->i915;
1524 int i;
1525
1526 /*
1527 * General organization
1528 * 1. Registers specific to a single generation
1529 * 2. Registers which belong to multiple generations
1530 * 3. Feature specific registers.
1531 * 4. Everything else
1532 * Please try to follow the order.
1533 */
1534
1535 /* 1: Registers specific to a single generation */
1536 if (IS_VALLEYVIEW(i915)) {
1537 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1538 gt->ier = intel_uncore_read(uncore, VLV_IER);
1539 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
1540 }
1541
1542 if (IS_GEN(i915, 7))
1543 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
1544
1545 if (INTEL_GEN(i915) >= 12) {
1546 gt->fault_data0 = intel_uncore_read(uncore,
1547 GEN12_FAULT_TLB_DATA0);
1548 gt->fault_data1 = intel_uncore_read(uncore,
1549 GEN12_FAULT_TLB_DATA1);
1550 } else if (INTEL_GEN(i915) >= 8) {
1551 gt->fault_data0 = intel_uncore_read(uncore,
1552 GEN8_FAULT_TLB_DATA0);
1553 gt->fault_data1 = intel_uncore_read(uncore,
1554 GEN8_FAULT_TLB_DATA1);
1555 }
1556
1557 if (IS_GEN(i915, 6)) {
1558 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
1559 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
1560 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
1561 }
1562
1563 /* 2: Registers which belong to multiple generations */
1564 if (INTEL_GEN(i915) >= 7)
1565 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
1566
1567 if (INTEL_GEN(i915) >= 6) {
1568 gt->derrmr = intel_uncore_read(uncore, DERRMR);
1569 if (INTEL_GEN(i915) < 12) {
1570 gt->error = intel_uncore_read(uncore, ERROR_GEN6);
1571 gt->done_reg = intel_uncore_read(uncore, DONE_REG);
1572 }
1573 }
1574
1575 /* 3: Feature specific registers */
1576 if (IS_GEN_RANGE(i915, 6, 7)) {
1577 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1578 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
1579 }
1580
1581 if (IS_GEN_RANGE(i915, 8, 11))
1582 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
1583
1584 if (IS_GEN(i915, 12))
1585 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
1586
1587 if (INTEL_GEN(i915) >= 12) {
1588 for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
1589 gt->sfc_done[i] =
1590 intel_uncore_read(uncore, GEN12_SFC_DONE(i));
1591 }
1592
1593 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
1594 }
1595
1596 /* 4: Everything else */
1597 if (INTEL_GEN(i915) >= 11) {
1598 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1599 gt->gtier[0] =
1600 intel_uncore_read(uncore,
1601 GEN11_RENDER_COPY_INTR_ENABLE);
1602 gt->gtier[1] =
1603 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
1604 gt->gtier[2] =
1605 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
1606 gt->gtier[3] =
1607 intel_uncore_read(uncore,
1608 GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1609 gt->gtier[4] =
1610 intel_uncore_read(uncore,
1611 GEN11_CRYPTO_RSVD_INTR_ENABLE);
1612 gt->gtier[5] =
1613 intel_uncore_read(uncore,
1614 GEN11_GUNIT_CSME_INTR_ENABLE);
1615 gt->ngtier = 6;
1616 } else if (INTEL_GEN(i915) >= 8) {
1617 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1618 for (i = 0; i < 4; i++)
1619 gt->gtier[i] =
1620 intel_uncore_read(uncore, GEN8_GT_IER(i));
1621 gt->ngtier = 4;
1622 } else if (HAS_PCH_SPLIT(i915)) {
1623 gt->ier = intel_uncore_read(uncore, DEIER);
1624 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1625 gt->ngtier = 1;
1626 } else if (IS_GEN(i915, 2)) {
1627 gt->ier = intel_uncore_read16(uncore, GEN2_IER);
1628 } else if (!IS_VALLEYVIEW(i915)) {
1629 gt->ier = intel_uncore_read(uncore, GEN2_IER);
1630 }
1631 gt->eir = intel_uncore_read(uncore, EIR);
1632 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
1633 }
1634
gt_record_info(struct intel_gt_coredump * gt)1635 static void gt_record_info(struct intel_gt_coredump *gt)
1636 {
1637 memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info));
1638 }
1639
1640 /*
1641 * Generate a semi-unique error code. The code is not meant to have meaning, The
1642 * code's only purpose is to try to prevent false duplicated bug reports by
1643 * grossly estimating a GPU error state.
1644 *
1645 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1646 * the hang if we could strip the GTT offset information from it.
1647 *
1648 * It's only a small step better than a random number in its current form.
1649 */
generate_ecode(const struct intel_engine_coredump * ee)1650 static u32 generate_ecode(const struct intel_engine_coredump *ee)
1651 {
1652 /*
1653 * IPEHR would be an ideal way to detect errors, as it's the gross
1654 * measure of "the command that hung." However, has some very common
1655 * synchronization commands which almost always appear in the case
1656 * strictly a client bug. Use instdone to differentiate those some.
1657 */
1658 return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1659 }
1660
error_msg(struct i915_gpu_coredump * error)1661 static const char *error_msg(struct i915_gpu_coredump *error)
1662 {
1663 struct intel_engine_coredump *first = NULL;
1664 unsigned int hung_classes = 0;
1665 struct intel_gt_coredump *gt;
1666 int len;
1667
1668 for (gt = error->gt; gt; gt = gt->next) {
1669 struct intel_engine_coredump *cs;
1670
1671 for (cs = gt->engine; cs; cs = cs->next) {
1672 if (cs->hung) {
1673 hung_classes |= BIT(cs->engine->uabi_class);
1674 if (!first)
1675 first = cs;
1676 }
1677 }
1678 }
1679
1680 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1681 "GPU HANG: ecode %d:%x:%08x",
1682 INTEL_GEN(error->i915), hung_classes,
1683 generate_ecode(first));
1684 if (first && first->context.pid) {
1685 /* Just show the first executing process, more is confusing */
1686 len += scnprintf(error->error_msg + len,
1687 sizeof(error->error_msg) - len,
1688 ", in %s [%d]",
1689 first->context.comm, first->context.pid);
1690 }
1691
1692 return error->error_msg;
1693 }
1694
capture_gen(struct i915_gpu_coredump * error)1695 static void capture_gen(struct i915_gpu_coredump *error)
1696 {
1697 struct drm_i915_private *i915 = error->i915;
1698
1699 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1700 error->suspended = i915->runtime_pm.suspended;
1701
1702 error->iommu = -1;
1703 #ifdef CONFIG_INTEL_IOMMU
1704 error->iommu = intel_iommu_gfx_mapped;
1705 #endif
1706 error->reset_count = i915_reset_count(&i915->gpu_error);
1707 error->suspend_count = i915->suspend_count;
1708
1709 i915_params_copy(&error->params, &i915->params);
1710 memcpy(&error->device_info,
1711 INTEL_INFO(i915),
1712 sizeof(error->device_info));
1713 memcpy(&error->runtime_info,
1714 RUNTIME_INFO(i915),
1715 sizeof(error->runtime_info));
1716 error->driver_caps = i915->caps;
1717 }
1718
1719 struct i915_gpu_coredump *
i915_gpu_coredump_alloc(struct drm_i915_private * i915,gfp_t gfp)1720 i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
1721 {
1722 struct i915_gpu_coredump *error;
1723
1724 if (!i915->params.error_capture)
1725 return NULL;
1726
1727 error = kzalloc(sizeof(*error), gfp);
1728 if (!error)
1729 return NULL;
1730
1731 kref_init(&error->ref);
1732 error->i915 = i915;
1733
1734 error->time = ktime_get_real();
1735 error->boottime = ktime_get_boottime();
1736 error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
1737 error->capture = jiffies;
1738
1739 capture_gen(error);
1740
1741 return error;
1742 }
1743
1744 #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
1745
1746 struct intel_gt_coredump *
intel_gt_coredump_alloc(struct intel_gt * gt,gfp_t gfp)1747 intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
1748 {
1749 struct intel_gt_coredump *gc;
1750
1751 gc = kzalloc(sizeof(*gc), gfp);
1752 if (!gc)
1753 return NULL;
1754
1755 gc->_gt = gt;
1756 gc->awake = intel_gt_pm_is_awake(gt);
1757
1758 gt_record_regs(gc);
1759 gt_record_fences(gc);
1760
1761 return gc;
1762 }
1763
1764 struct i915_vma_compress *
i915_vma_capture_prepare(struct intel_gt_coredump * gt)1765 i915_vma_capture_prepare(struct intel_gt_coredump *gt)
1766 {
1767 struct i915_vma_compress *compress;
1768
1769 compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
1770 if (!compress)
1771 return NULL;
1772
1773 if (!compress_init(compress)) {
1774 kfree(compress);
1775 return NULL;
1776 }
1777
1778 return compress;
1779 }
1780
i915_vma_capture_finish(struct intel_gt_coredump * gt,struct i915_vma_compress * compress)1781 void i915_vma_capture_finish(struct intel_gt_coredump *gt,
1782 struct i915_vma_compress *compress)
1783 {
1784 if (!compress)
1785 return;
1786
1787 compress_fini(compress);
1788 kfree(compress);
1789 }
1790
1791 struct i915_gpu_coredump *
i915_gpu_coredump(struct intel_gt * gt,intel_engine_mask_t engine_mask)1792 i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
1793 {
1794 struct drm_i915_private *i915 = gt->i915;
1795 struct i915_gpu_coredump *error;
1796
1797 /* Check if GPU capture has been disabled */
1798 error = READ_ONCE(i915->gpu_error.first_error);
1799 if (IS_ERR(error))
1800 return error;
1801
1802 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
1803 if (!error)
1804 return ERR_PTR(-ENOMEM);
1805
1806 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL);
1807 if (error->gt) {
1808 struct i915_vma_compress *compress;
1809
1810 compress = i915_vma_capture_prepare(error->gt);
1811 if (!compress) {
1812 kfree(error->gt);
1813 kfree(error);
1814 return ERR_PTR(-ENOMEM);
1815 }
1816
1817 gt_record_info(error->gt);
1818 gt_record_engines(error->gt, engine_mask, compress);
1819
1820 if (INTEL_INFO(i915)->has_gt_uc)
1821 error->gt->uc = gt_record_uc(error->gt, compress);
1822
1823 i915_vma_capture_finish(error->gt, compress);
1824
1825 error->simulated |= error->gt->simulated;
1826 }
1827
1828 error->overlay = intel_overlay_capture_error_state(i915);
1829 error->display = intel_display_capture_error_state(i915);
1830
1831 return error;
1832 }
1833
i915_error_state_store(struct i915_gpu_coredump * error)1834 void i915_error_state_store(struct i915_gpu_coredump *error)
1835 {
1836 struct drm_i915_private *i915;
1837 static bool warned;
1838
1839 if (IS_ERR_OR_NULL(error))
1840 return;
1841
1842 i915 = error->i915;
1843 drm_info(&i915->drm, "%s\n", error_msg(error));
1844
1845 if (error->simulated ||
1846 cmpxchg(&i915->gpu_error.first_error, NULL, error))
1847 return;
1848
1849 i915_gpu_coredump_get(error);
1850
1851 if (!xchg(&warned, true) &&
1852 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
1853 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1854 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
1855 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
1856 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1857 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
1858 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1859 i915->drm.primary->index);
1860 }
1861 }
1862
1863 /**
1864 * i915_capture_error_state - capture an error record for later analysis
1865 * @gt: intel_gt which originated the hang
1866 * @engine_mask: hung engines
1867 *
1868 *
1869 * Should be called when an error is detected (either a hang or an error
1870 * interrupt) to capture error state from the time of the error. Fills
1871 * out a structure which becomes available in debugfs for user level tools
1872 * to pick up.
1873 */
i915_capture_error_state(struct intel_gt * gt,intel_engine_mask_t engine_mask)1874 void i915_capture_error_state(struct intel_gt *gt,
1875 intel_engine_mask_t engine_mask)
1876 {
1877 struct i915_gpu_coredump *error;
1878
1879 error = i915_gpu_coredump(gt, engine_mask);
1880 if (IS_ERR(error)) {
1881 cmpxchg(>->i915->gpu_error.first_error, NULL, error);
1882 return;
1883 }
1884
1885 i915_error_state_store(error);
1886 i915_gpu_coredump_put(error);
1887 }
1888
1889 struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private * i915)1890 i915_first_error_state(struct drm_i915_private *i915)
1891 {
1892 struct i915_gpu_coredump *error;
1893
1894 spin_lock_irq(&i915->gpu_error.lock);
1895 error = i915->gpu_error.first_error;
1896 if (!IS_ERR_OR_NULL(error))
1897 i915_gpu_coredump_get(error);
1898 spin_unlock_irq(&i915->gpu_error.lock);
1899
1900 return error;
1901 }
1902
i915_reset_error_state(struct drm_i915_private * i915)1903 void i915_reset_error_state(struct drm_i915_private *i915)
1904 {
1905 struct i915_gpu_coredump *error;
1906
1907 spin_lock_irq(&i915->gpu_error.lock);
1908 error = i915->gpu_error.first_error;
1909 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
1910 i915->gpu_error.first_error = NULL;
1911 spin_unlock_irq(&i915->gpu_error.lock);
1912
1913 if (!IS_ERR_OR_NULL(error))
1914 i915_gpu_coredump_put(error);
1915 }
1916
i915_disable_error_state(struct drm_i915_private * i915,int err)1917 void i915_disable_error_state(struct drm_i915_private *i915, int err)
1918 {
1919 spin_lock_irq(&i915->gpu_error.lock);
1920 if (!i915->gpu_error.first_error)
1921 i915->gpu_error.first_error = ERR_PTR(err);
1922 spin_unlock_irq(&i915->gpu_error.lock);
1923 }
1924