Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <linux/ascii85.h>
31#include <linux/highmem.h>
32#include <linux/nmi.h>
33#include <linux/pagevec.h>
34#include <linux/scatterlist.h>
35#include <linux/string_helpers.h>
36#include <linux/utsname.h>
37#include <linux/zlib.h>
38
39#include <drm/drm_cache.h>
40#include <drm/drm_print.h>
41
42#include "display/intel_dmc.h"
43#include "display/intel_overlay.h"
44
45#include "gem/i915_gem_context.h"
46#include "gem/i915_gem_lmem.h"
47#include "gt/intel_engine_regs.h"
48#include "gt/intel_gt.h"
49#include "gt/intel_gt_mcr.h"
50#include "gt/intel_gt_pm.h"
51#include "gt/intel_gt_regs.h"
52#include "gt/uc/intel_guc_capture.h"
53
54#include "i915_driver.h"
55#include "i915_drv.h"
56#include "i915_gpu_error.h"
57#include "i915_memcpy.h"
58#include "i915_reg.h"
59#include "i915_scatterlist.h"
60#include "i915_utils.h"
61
62#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
63#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
64
65static void __sg_set_buf(struct scatterlist *sg,
66 void *addr, unsigned int len, loff_t it)
67{
68 sg->page_link = (unsigned long)virt_to_page(addr);
69 sg->offset = offset_in_page(addr);
70 sg->length = len;
71 sg->dma_address = it;
72}
73
74static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
75{
76 if (!len)
77 return false;
78
79 if (e->bytes + len + 1 <= e->size)
80 return true;
81
82 if (e->bytes) {
83 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
84 e->iter += e->bytes;
85 e->buf = NULL;
86 e->bytes = 0;
87 }
88
89 if (e->cur == e->end) {
90 struct scatterlist *sgl;
91
92 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
93 if (!sgl) {
94 e->err = -ENOMEM;
95 return false;
96 }
97
98 if (e->cur) {
99 e->cur->offset = 0;
100 e->cur->length = 0;
101 e->cur->page_link =
102 (unsigned long)sgl | SG_CHAIN;
103 } else {
104 e->sgl = sgl;
105 }
106
107 e->cur = sgl;
108 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
109 }
110
111 e->size = ALIGN(len + 1, SZ_64K);
112 e->buf = kmalloc(e->size, ALLOW_FAIL);
113 if (!e->buf) {
114 e->size = PAGE_ALIGN(len + 1);
115 e->buf = kmalloc(e->size, GFP_KERNEL);
116 }
117 if (!e->buf) {
118 e->err = -ENOMEM;
119 return false;
120 }
121
122 return true;
123}
124
125__printf(2, 0)
126static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
127 const char *fmt, va_list args)
128{
129 va_list ap;
130 int len;
131
132 if (e->err)
133 return;
134
135 va_copy(ap, args);
136 len = vsnprintf(NULL, 0, fmt, ap);
137 va_end(ap);
138 if (len <= 0) {
139 e->err = len;
140 return;
141 }
142
143 if (!__i915_error_grow(e, len))
144 return;
145
146 GEM_BUG_ON(e->bytes >= e->size);
147 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
148 if (len < 0) {
149 e->err = len;
150 return;
151 }
152 e->bytes += len;
153}
154
155static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
156{
157 unsigned len;
158
159 if (e->err || !str)
160 return;
161
162 len = strlen(str);
163 if (!__i915_error_grow(e, len))
164 return;
165
166 GEM_BUG_ON(e->bytes + len > e->size);
167 memcpy(e->buf + e->bytes, str, len);
168 e->bytes += len;
169}
170
171#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
172#define err_puts(e, s) i915_error_puts(e, s)
173
174static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
175{
176 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
177}
178
179static inline struct drm_printer
180i915_error_printer(struct drm_i915_error_state_buf *e)
181{
182 struct drm_printer p = {
183 .printfn = __i915_printfn_error,
184 .arg = e,
185 };
186 return p;
187}
188
189/* single threaded page allocator with a reserved stash for emergencies */
190static void pool_fini(struct pagevec *pv)
191{
192 pagevec_release(pv);
193}
194
195static int pool_refill(struct pagevec *pv, gfp_t gfp)
196{
197 while (pagevec_space(pv)) {
198 struct page *p;
199
200 p = alloc_page(gfp);
201 if (!p)
202 return -ENOMEM;
203
204 pagevec_add(pv, p);
205 }
206
207 return 0;
208}
209
210static int pool_init(struct pagevec *pv, gfp_t gfp)
211{
212 int err;
213
214 pagevec_init(pv);
215
216 err = pool_refill(pv, gfp);
217 if (err)
218 pool_fini(pv);
219
220 return err;
221}
222
223static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
224{
225 struct page *p;
226
227 p = alloc_page(gfp);
228 if (!p && pagevec_count(pv))
229 p = pv->pages[--pv->nr];
230
231 return p ? page_address(p) : NULL;
232}
233
234static void pool_free(struct pagevec *pv, void *addr)
235{
236 struct page *p = virt_to_page(addr);
237
238 if (pagevec_space(pv))
239 pagevec_add(pv, p);
240 else
241 __free_page(p);
242}
243
244#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
245
246struct i915_vma_compress {
247 struct pagevec pool;
248 struct z_stream_s zstream;
249 void *tmp;
250};
251
252static bool compress_init(struct i915_vma_compress *c)
253{
254 struct z_stream_s *zstream = &c->zstream;
255
256 if (pool_init(&c->pool, ALLOW_FAIL))
257 return false;
258
259 zstream->workspace =
260 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
261 ALLOW_FAIL);
262 if (!zstream->workspace) {
263 pool_fini(&c->pool);
264 return false;
265 }
266
267 c->tmp = NULL;
268 if (i915_has_memcpy_from_wc())
269 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
270
271 return true;
272}
273
274static bool compress_start(struct i915_vma_compress *c)
275{
276 struct z_stream_s *zstream = &c->zstream;
277 void *workspace = zstream->workspace;
278
279 memset(zstream, 0, sizeof(*zstream));
280 zstream->workspace = workspace;
281
282 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
283}
284
285static void *compress_next_page(struct i915_vma_compress *c,
286 struct i915_vma_coredump *dst)
287{
288 void *page_addr;
289 struct page *page;
290
291 page_addr = pool_alloc(&c->pool, ALLOW_FAIL);
292 if (!page_addr)
293 return ERR_PTR(-ENOMEM);
294
295 page = virt_to_page(page_addr);
296 list_add_tail(&page->lru, &dst->page_list);
297 return page_addr;
298}
299
300static int compress_page(struct i915_vma_compress *c,
301 void *src,
302 struct i915_vma_coredump *dst,
303 bool wc)
304{
305 struct z_stream_s *zstream = &c->zstream;
306
307 zstream->next_in = src;
308 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
309 zstream->next_in = c->tmp;
310 zstream->avail_in = PAGE_SIZE;
311
312 do {
313 if (zstream->avail_out == 0) {
314 zstream->next_out = compress_next_page(c, dst);
315 if (IS_ERR(zstream->next_out))
316 return PTR_ERR(zstream->next_out);
317
318 zstream->avail_out = PAGE_SIZE;
319 }
320
321 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
322 return -EIO;
323
324 cond_resched();
325 } while (zstream->avail_in);
326
327 /* Fallback to uncompressed if we increase size? */
328 if (0 && zstream->total_out > zstream->total_in)
329 return -E2BIG;
330
331 return 0;
332}
333
334static int compress_flush(struct i915_vma_compress *c,
335 struct i915_vma_coredump *dst)
336{
337 struct z_stream_s *zstream = &c->zstream;
338
339 do {
340 switch (zlib_deflate(zstream, Z_FINISH)) {
341 case Z_OK: /* more space requested */
342 zstream->next_out = compress_next_page(c, dst);
343 if (IS_ERR(zstream->next_out))
344 return PTR_ERR(zstream->next_out);
345
346 zstream->avail_out = PAGE_SIZE;
347 break;
348
349 case Z_STREAM_END:
350 goto end;
351
352 default: /* any error */
353 return -EIO;
354 }
355 } while (1);
356
357end:
358 memset(zstream->next_out, 0, zstream->avail_out);
359 dst->unused = zstream->avail_out;
360 return 0;
361}
362
363static void compress_finish(struct i915_vma_compress *c)
364{
365 zlib_deflateEnd(&c->zstream);
366}
367
368static void compress_fini(struct i915_vma_compress *c)
369{
370 kfree(c->zstream.workspace);
371 if (c->tmp)
372 pool_free(&c->pool, c->tmp);
373 pool_fini(&c->pool);
374}
375
376static void err_compression_marker(struct drm_i915_error_state_buf *m)
377{
378 err_puts(m, ":");
379}
380
381#else
382
383struct i915_vma_compress {
384 struct pagevec pool;
385};
386
387static bool compress_init(struct i915_vma_compress *c)
388{
389 return pool_init(&c->pool, ALLOW_FAIL) == 0;
390}
391
392static bool compress_start(struct i915_vma_compress *c)
393{
394 return true;
395}
396
397static int compress_page(struct i915_vma_compress *c,
398 void *src,
399 struct i915_vma_coredump *dst,
400 bool wc)
401{
402 void *ptr;
403
404 ptr = pool_alloc(&c->pool, ALLOW_FAIL);
405 if (!ptr)
406 return -ENOMEM;
407
408 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
409 memcpy(ptr, src, PAGE_SIZE);
410 list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list);
411 cond_resched();
412
413 return 0;
414}
415
416static int compress_flush(struct i915_vma_compress *c,
417 struct i915_vma_coredump *dst)
418{
419 return 0;
420}
421
422static void compress_finish(struct i915_vma_compress *c)
423{
424}
425
426static void compress_fini(struct i915_vma_compress *c)
427{
428 pool_fini(&c->pool);
429}
430
431static void err_compression_marker(struct drm_i915_error_state_buf *m)
432{
433 err_puts(m, "~");
434}
435
436#endif
437
438static void error_print_instdone(struct drm_i915_error_state_buf *m,
439 const struct intel_engine_coredump *ee)
440{
441 int slice;
442 int subslice;
443 int iter;
444
445 err_printf(m, " INSTDONE: 0x%08x\n",
446 ee->instdone.instdone);
447
448 if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
449 return;
450
451 err_printf(m, " SC_INSTDONE: 0x%08x\n",
452 ee->instdone.slice_common);
453
454 if (GRAPHICS_VER(m->i915) <= 6)
455 return;
456
457 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
458 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
459 slice, subslice,
460 ee->instdone.sampler[slice][subslice]);
461
462 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
463 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
464 slice, subslice,
465 ee->instdone.row[slice][subslice]);
466
467 if (GRAPHICS_VER(m->i915) < 12)
468 return;
469
470 if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) {
471 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
472 err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n",
473 slice, subslice,
474 ee->instdone.geom_svg[slice][subslice]);
475 }
476
477 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
478 ee->instdone.slice_common_extra[0]);
479 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
480 ee->instdone.slice_common_extra[1]);
481}
482
483static void error_print_request(struct drm_i915_error_state_buf *m,
484 const char *prefix,
485 const struct i915_request_coredump *erq)
486{
487 if (!erq->seqno)
488 return;
489
490 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
491 prefix, erq->pid, erq->context, erq->seqno,
492 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
493 &erq->flags) ? "!" : "",
494 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
495 &erq->flags) ? "+" : "",
496 erq->sched_attr.priority,
497 erq->head, erq->tail);
498}
499
500static void error_print_context(struct drm_i915_error_state_buf *m,
501 const char *header,
502 const struct i915_gem_context_coredump *ctx)
503{
504 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
505 header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
506 ctx->guilty, ctx->active,
507 ctx->total_runtime, ctx->avg_runtime);
508 err_printf(m, " context timeline seqno %u\n", ctx->hwsp_seqno);
509}
510
511static struct i915_vma_coredump *
512__find_vma(struct i915_vma_coredump *vma, const char *name)
513{
514 while (vma) {
515 if (strcmp(vma->name, name) == 0)
516 return vma;
517 vma = vma->next;
518 }
519
520 return NULL;
521}
522
523struct i915_vma_coredump *
524intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
525{
526 return __find_vma(ee->vma, "batch");
527}
528
529static void error_print_engine(struct drm_i915_error_state_buf *m,
530 const struct intel_engine_coredump *ee)
531{
532 struct i915_vma_coredump *batch;
533 int n;
534
535 err_printf(m, "%s command stream:\n", ee->engine->name);
536 err_printf(m, " CCID: 0x%08x\n", ee->ccid);
537 err_printf(m, " START: 0x%08x\n", ee->start);
538 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
539 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
540 ee->tail, ee->rq_post, ee->rq_tail);
541 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
542 err_printf(m, " MODE: 0x%08x\n", ee->mode);
543 err_printf(m, " HWS: 0x%08x\n", ee->hws);
544 err_printf(m, " ACTHD: 0x%08x %08x\n",
545 (u32)(ee->acthd>>32), (u32)ee->acthd);
546 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
547 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
548 err_printf(m, " ESR: 0x%08x\n", ee->esr);
549
550 error_print_instdone(m, ee);
551
552 batch = intel_gpu_error_find_batch(ee);
553 if (batch) {
554 u64 start = batch->gtt_offset;
555 u64 end = start + batch->gtt_size;
556
557 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
558 upper_32_bits(start), lower_32_bits(start),
559 upper_32_bits(end), lower_32_bits(end));
560 }
561 if (GRAPHICS_VER(m->i915) >= 4) {
562 err_printf(m, " BBADDR: 0x%08x_%08x\n",
563 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
564 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
565 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
566 }
567 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
568 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
569 lower_32_bits(ee->faddr));
570 if (GRAPHICS_VER(m->i915) >= 6) {
571 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
572 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
573 }
574 if (GRAPHICS_VER(m->i915) >= 11) {
575 err_printf(m, " NOPID: 0x%08x\n", ee->nopid);
576 err_printf(m, " EXCC: 0x%08x\n", ee->excc);
577 err_printf(m, " CMD_CCTL: 0x%08x\n", ee->cmd_cctl);
578 err_printf(m, " CSCMDOP: 0x%08x\n", ee->cscmdop);
579 err_printf(m, " CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl);
580 err_printf(m, " DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi);
581 err_printf(m, " DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo);
582 }
583 if (HAS_PPGTT(m->i915)) {
584 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
585
586 if (GRAPHICS_VER(m->i915) >= 8) {
587 int i;
588 for (i = 0; i < 4; i++)
589 err_printf(m, " PDP%d: 0x%016llx\n",
590 i, ee->vm_info.pdp[i]);
591 } else {
592 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
593 ee->vm_info.pp_dir_base);
594 }
595 }
596
597 for (n = 0; n < ee->num_ports; n++) {
598 err_printf(m, " ELSP[%d]:", n);
599 error_print_request(m, " ", &ee->execlist[n]);
600 }
601}
602
603void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
604{
605 va_list args;
606
607 va_start(args, f);
608 i915_error_vprintf(e, f, args);
609 va_end(args);
610}
611
612void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
613 const struct intel_engine_cs *engine,
614 const struct i915_vma_coredump *vma)
615{
616 char out[ASCII85_BUFSZ];
617 struct page *page;
618
619 if (!vma)
620 return;
621
622 err_printf(m, "%s --- %s = 0x%08x %08x\n",
623 engine ? engine->name : "global", vma->name,
624 upper_32_bits(vma->gtt_offset),
625 lower_32_bits(vma->gtt_offset));
626
627 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
628 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
629
630 err_compression_marker(m);
631 list_for_each_entry(page, &vma->page_list, lru) {
632 int i, len;
633 const u32 *addr = page_address(page);
634
635 len = PAGE_SIZE;
636 if (page == list_last_entry(&vma->page_list, typeof(*page), lru))
637 len -= vma->unused;
638 len = ascii85_encode_len(len);
639
640 for (i = 0; i < len; i++)
641 err_puts(m, ascii85_encode(addr[i], out));
642 }
643 err_puts(m, "\n");
644}
645
646static void err_print_capabilities(struct drm_i915_error_state_buf *m,
647 struct i915_gpu_coredump *error)
648{
649 struct drm_printer p = i915_error_printer(m);
650
651 intel_device_info_print(&error->device_info, &error->runtime_info, &p);
652 intel_driver_caps_print(&error->driver_caps, &p);
653}
654
655static void err_print_params(struct drm_i915_error_state_buf *m,
656 const struct i915_params *params)
657{
658 struct drm_printer p = i915_error_printer(m);
659
660 i915_params_dump(params, &p);
661}
662
663static void err_print_pciid(struct drm_i915_error_state_buf *m,
664 struct drm_i915_private *i915)
665{
666 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
667
668 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
669 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
670 err_printf(m, "PCI Subsystem: %04x:%04x\n",
671 pdev->subsystem_vendor,
672 pdev->subsystem_device);
673}
674
675static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
676 const char *name,
677 const struct intel_ctb_coredump *ctb)
678{
679 if (!ctb->size)
680 return;
681
682 err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n",
683 name, ctb->raw_status, ctb->raw_head, ctb->raw_tail,
684 ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size);
685}
686
687static void err_print_uc(struct drm_i915_error_state_buf *m,
688 const struct intel_uc_coredump *error_uc)
689{
690 struct drm_printer p = i915_error_printer(m);
691
692 intel_uc_fw_dump(&error_uc->guc_fw, &p);
693 intel_uc_fw_dump(&error_uc->huc_fw, &p);
694 err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp);
695 intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log);
696 err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence);
697 err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
698 err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1);
699 intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_ctb);
700}
701
702static void err_free_sgl(struct scatterlist *sgl)
703{
704 while (sgl) {
705 struct scatterlist *sg;
706
707 for (sg = sgl; !sg_is_chain(sg); sg++) {
708 kfree(sg_virt(sg));
709 if (sg_is_last(sg))
710 break;
711 }
712
713 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
714 free_page((unsigned long)sgl);
715 sgl = sg;
716 }
717}
718
719static void err_print_gt_info(struct drm_i915_error_state_buf *m,
720 struct intel_gt_coredump *gt)
721{
722 struct drm_printer p = i915_error_printer(m);
723
724 intel_gt_info_print(>->info, &p);
725 intel_sseu_print_topology(gt->_gt->i915, >->info.sseu, &p);
726}
727
728static void err_print_gt_display(struct drm_i915_error_state_buf *m,
729 struct intel_gt_coredump *gt)
730{
731 err_printf(m, "IER: 0x%08x\n", gt->ier);
732 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
733}
734
735static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
736 struct intel_gt_coredump *gt)
737{
738 int i;
739
740 err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake));
741 err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
742 gt->clock_frequency, gt->clock_period_ns);
743 err_printf(m, "EIR: 0x%08x\n", gt->eir);
744 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
745
746 for (i = 0; i < gt->ngtier; i++)
747 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
748}
749
750static void err_print_gt_global(struct drm_i915_error_state_buf *m,
751 struct intel_gt_coredump *gt)
752{
753 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
754
755 if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
756 err_printf(m, "ERROR: 0x%08x\n", gt->error);
757 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
758 }
759
760 if (GRAPHICS_VER(m->i915) >= 8)
761 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
762 gt->fault_data1, gt->fault_data0);
763
764 if (GRAPHICS_VER(m->i915) == 7)
765 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
766
767 if (IS_GRAPHICS_VER(m->i915, 8, 11))
768 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
769
770 if (GRAPHICS_VER(m->i915) == 12)
771 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
772
773 if (GRAPHICS_VER(m->i915) >= 12) {
774 int i;
775
776 for (i = 0; i < I915_MAX_SFC; i++) {
777 /*
778 * SFC_DONE resides in the VD forcewake domain, so it
779 * only exists if the corresponding VCS engine is
780 * present.
781 */
782 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
783 !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
784 continue;
785
786 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
787 gt->sfc_done[i]);
788 }
789
790 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
791 }
792}
793
794static void err_print_gt_fences(struct drm_i915_error_state_buf *m,
795 struct intel_gt_coredump *gt)
796{
797 int i;
798
799 for (i = 0; i < gt->nfence; i++)
800 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
801}
802
803static void err_print_gt_engines(struct drm_i915_error_state_buf *m,
804 struct intel_gt_coredump *gt)
805{
806 const struct intel_engine_coredump *ee;
807
808 for (ee = gt->engine; ee; ee = ee->next) {
809 const struct i915_vma_coredump *vma;
810
811 if (ee->guc_capture_node)
812 intel_guc_capture_print_engine_node(m, ee);
813 else
814 error_print_engine(m, ee);
815
816 err_printf(m, " hung: %u\n", ee->hung);
817 err_printf(m, " engine reset count: %u\n", ee->reset_count);
818 error_print_context(m, " Active context: ", &ee->context);
819
820 for (vma = ee->vma; vma; vma = vma->next)
821 intel_gpu_error_print_vma(m, ee->engine, vma);
822 }
823
824}
825
826static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
827 struct i915_gpu_coredump *error)
828{
829 const struct intel_engine_coredump *ee;
830 struct timespec64 ts;
831
832 if (*error->error_msg)
833 err_printf(m, "%s\n", error->error_msg);
834 err_printf(m, "Kernel: %s %s\n",
835 init_utsname()->release,
836 init_utsname()->machine);
837 err_printf(m, "Driver: %s\n", DRIVER_DATE);
838 ts = ktime_to_timespec64(error->time);
839 err_printf(m, "Time: %lld s %ld us\n",
840 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
841 ts = ktime_to_timespec64(error->boottime);
842 err_printf(m, "Boottime: %lld s %ld us\n",
843 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
844 ts = ktime_to_timespec64(error->uptime);
845 err_printf(m, "Uptime: %lld s %ld us\n",
846 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
847 err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
848 error->capture, jiffies_to_msecs(jiffies - error->capture));
849
850 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
851 err_printf(m, "Active process (on ring %s): %s [%d]\n",
852 ee->engine->name,
853 ee->context.comm,
854 ee->context.pid);
855
856 err_printf(m, "Reset count: %u\n", error->reset_count);
857 err_printf(m, "Suspend count: %u\n", error->suspend_count);
858 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
859 err_printf(m, "Subplatform: 0x%x\n",
860 intel_subplatform(&error->runtime_info,
861 error->device_info.platform));
862 err_print_pciid(m, m->i915);
863
864 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
865
866 intel_dmc_print_error_state(m, m->i915);
867
868 err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock));
869 err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended));
870
871 if (error->gt) {
872 bool print_guc_capture = false;
873
874 if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
875 print_guc_capture = true;
876
877 err_print_gt_display(m, error->gt);
878 err_print_gt_global_nonguc(m, error->gt);
879 err_print_gt_fences(m, error->gt);
880
881 /*
882 * GuC dumped global, eng-class and eng-instance registers together
883 * as part of engine state dump so we print in err_print_gt_engines
884 */
885 if (!print_guc_capture)
886 err_print_gt_global(m, error->gt);
887
888 err_print_gt_engines(m, error->gt);
889
890 if (error->gt->uc)
891 err_print_uc(m, error->gt->uc);
892
893 err_print_gt_info(m, error->gt);
894 }
895
896 if (error->overlay)
897 intel_overlay_print_error_state(m, error->overlay);
898
899 err_print_capabilities(m, error);
900 err_print_params(m, &error->params);
901}
902
903static int err_print_to_sgl(struct i915_gpu_coredump *error)
904{
905 struct drm_i915_error_state_buf m;
906
907 if (IS_ERR(error))
908 return PTR_ERR(error);
909
910 if (READ_ONCE(error->sgl))
911 return 0;
912
913 memset(&m, 0, sizeof(m));
914 m.i915 = error->i915;
915
916 __err_print_to_sgl(&m, error);
917
918 if (m.buf) {
919 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
920 m.bytes = 0;
921 m.buf = NULL;
922 }
923 if (m.cur) {
924 GEM_BUG_ON(m.end < m.cur);
925 sg_mark_end(m.cur - 1);
926 }
927 GEM_BUG_ON(m.sgl && !m.cur);
928
929 if (m.err) {
930 err_free_sgl(m.sgl);
931 return m.err;
932 }
933
934 if (cmpxchg(&error->sgl, NULL, m.sgl))
935 err_free_sgl(m.sgl);
936
937 return 0;
938}
939
940ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
941 char *buf, loff_t off, size_t rem)
942{
943 struct scatterlist *sg;
944 size_t count;
945 loff_t pos;
946 int err;
947
948 if (!error || !rem)
949 return 0;
950
951 err = err_print_to_sgl(error);
952 if (err)
953 return err;
954
955 sg = READ_ONCE(error->fit);
956 if (!sg || off < sg->dma_address)
957 sg = error->sgl;
958 if (!sg)
959 return 0;
960
961 pos = sg->dma_address;
962 count = 0;
963 do {
964 size_t len, start;
965
966 if (sg_is_chain(sg)) {
967 sg = sg_chain_ptr(sg);
968 GEM_BUG_ON(sg_is_chain(sg));
969 }
970
971 len = sg->length;
972 if (pos + len <= off) {
973 pos += len;
974 continue;
975 }
976
977 start = sg->offset;
978 if (pos < off) {
979 GEM_BUG_ON(off - pos > len);
980 len -= off - pos;
981 start += off - pos;
982 pos = off;
983 }
984
985 len = min(len, rem);
986 GEM_BUG_ON(!len || len > sg->length);
987
988 memcpy(buf, page_address(sg_page(sg)) + start, len);
989
990 count += len;
991 pos += len;
992
993 buf += len;
994 rem -= len;
995 if (!rem) {
996 WRITE_ONCE(error->fit, sg);
997 break;
998 }
999 } while (!sg_is_last(sg++));
1000
1001 return count;
1002}
1003
1004static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
1005{
1006 while (vma) {
1007 struct i915_vma_coredump *next = vma->next;
1008 struct page *page, *n;
1009
1010 list_for_each_entry_safe(page, n, &vma->page_list, lru) {
1011 list_del_init(&page->lru);
1012 __free_page(page);
1013 }
1014
1015 kfree(vma);
1016 vma = next;
1017 }
1018}
1019
1020static void cleanup_params(struct i915_gpu_coredump *error)
1021{
1022 i915_params_free(&error->params);
1023}
1024
1025static void cleanup_uc(struct intel_uc_coredump *uc)
1026{
1027 kfree(uc->guc_fw.file_selected.path);
1028 kfree(uc->huc_fw.file_selected.path);
1029 kfree(uc->guc_fw.file_wanted.path);
1030 kfree(uc->huc_fw.file_wanted.path);
1031 i915_vma_coredump_free(uc->guc.vma_log);
1032 i915_vma_coredump_free(uc->guc.vma_ctb);
1033
1034 kfree(uc);
1035}
1036
1037static void cleanup_gt(struct intel_gt_coredump *gt)
1038{
1039 while (gt->engine) {
1040 struct intel_engine_coredump *ee = gt->engine;
1041
1042 gt->engine = ee->next;
1043
1044 i915_vma_coredump_free(ee->vma);
1045 intel_guc_capture_free_node(ee);
1046 kfree(ee);
1047 }
1048
1049 if (gt->uc)
1050 cleanup_uc(gt->uc);
1051
1052 kfree(gt);
1053}
1054
1055void __i915_gpu_coredump_free(struct kref *error_ref)
1056{
1057 struct i915_gpu_coredump *error =
1058 container_of(error_ref, typeof(*error), ref);
1059
1060 while (error->gt) {
1061 struct intel_gt_coredump *gt = error->gt;
1062
1063 error->gt = gt->next;
1064 cleanup_gt(gt);
1065 }
1066
1067 kfree(error->overlay);
1068
1069 cleanup_params(error);
1070
1071 err_free_sgl(error->sgl);
1072 kfree(error);
1073}
1074
1075static struct i915_vma_coredump *
1076i915_vma_coredump_create(const struct intel_gt *gt,
1077 const struct i915_vma_resource *vma_res,
1078 struct i915_vma_compress *compress,
1079 const char *name)
1080
1081{
1082 struct i915_ggtt *ggtt = gt->ggtt;
1083 const u64 slot = ggtt->error_capture.start;
1084 struct i915_vma_coredump *dst;
1085 struct sgt_iter iter;
1086 int ret;
1087
1088 might_sleep();
1089
1090 if (!vma_res || !vma_res->bi.pages || !compress)
1091 return NULL;
1092
1093 dst = kmalloc(sizeof(*dst), ALLOW_FAIL);
1094 if (!dst)
1095 return NULL;
1096
1097 if (!compress_start(compress)) {
1098 kfree(dst);
1099 return NULL;
1100 }
1101
1102 INIT_LIST_HEAD(&dst->page_list);
1103 strcpy(dst->name, name);
1104 dst->next = NULL;
1105
1106 dst->gtt_offset = vma_res->start;
1107 dst->gtt_size = vma_res->node_size;
1108 dst->gtt_page_sizes = vma_res->page_sizes_gtt;
1109 dst->unused = 0;
1110
1111 ret = -EINVAL;
1112 if (drm_mm_node_allocated(&ggtt->error_capture)) {
1113 void __iomem *s;
1114 dma_addr_t dma;
1115
1116 for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
1117 mutex_lock(&ggtt->error_mutex);
1118 if (ggtt->vm.raw_insert_page)
1119 ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
1120 I915_CACHE_NONE, 0);
1121 else
1122 ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1123 I915_CACHE_NONE, 0);
1124 mb();
1125
1126 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
1127 ret = compress_page(compress,
1128 (void __force *)s, dst,
1129 true);
1130 io_mapping_unmap(s);
1131
1132 mb();
1133 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1134 mutex_unlock(&ggtt->error_mutex);
1135 if (ret)
1136 break;
1137 }
1138 } else if (vma_res->bi.lmem) {
1139 struct intel_memory_region *mem = vma_res->mr;
1140 dma_addr_t dma;
1141
1142 for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
1143 dma_addr_t offset = dma - mem->region.start;
1144 void __iomem *s;
1145
1146 if (offset + PAGE_SIZE > mem->io_size) {
1147 ret = -EINVAL;
1148 break;
1149 }
1150
1151 s = io_mapping_map_wc(&mem->iomap, offset, PAGE_SIZE);
1152 ret = compress_page(compress,
1153 (void __force *)s, dst,
1154 true);
1155 io_mapping_unmap(s);
1156 if (ret)
1157 break;
1158 }
1159 } else {
1160 struct page *page;
1161
1162 for_each_sgt_page(page, iter, vma_res->bi.pages) {
1163 void *s;
1164
1165 drm_clflush_pages(&page, 1);
1166
1167 s = kmap(page);
1168 ret = compress_page(compress, s, dst, false);
1169 kunmap(page);
1170
1171 drm_clflush_pages(&page, 1);
1172
1173 if (ret)
1174 break;
1175 }
1176 }
1177
1178 if (ret || compress_flush(compress, dst)) {
1179 struct page *page, *n;
1180
1181 list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) {
1182 list_del_init(&page->lru);
1183 pool_free(&compress->pool, page_address(page));
1184 }
1185
1186 kfree(dst);
1187 dst = NULL;
1188 }
1189 compress_finish(compress);
1190
1191 return dst;
1192}
1193
1194static void gt_record_fences(struct intel_gt_coredump *gt)
1195{
1196 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1197 struct intel_uncore *uncore = gt->_gt->uncore;
1198 int i;
1199
1200 if (GRAPHICS_VER(uncore->i915) >= 6) {
1201 for (i = 0; i < ggtt->num_fences; i++)
1202 gt->fence[i] =
1203 intel_uncore_read64(uncore,
1204 FENCE_REG_GEN6_LO(i));
1205 } else if (GRAPHICS_VER(uncore->i915) >= 4) {
1206 for (i = 0; i < ggtt->num_fences; i++)
1207 gt->fence[i] =
1208 intel_uncore_read64(uncore,
1209 FENCE_REG_965_LO(i));
1210 } else {
1211 for (i = 0; i < ggtt->num_fences; i++)
1212 gt->fence[i] =
1213 intel_uncore_read(uncore, FENCE_REG(i));
1214 }
1215 gt->nfence = i;
1216}
1217
1218static void engine_record_registers(struct intel_engine_coredump *ee)
1219{
1220 const struct intel_engine_cs *engine = ee->engine;
1221 struct drm_i915_private *i915 = engine->i915;
1222
1223 if (GRAPHICS_VER(i915) >= 6) {
1224 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
1225
1226 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
1227 ee->fault_reg = intel_gt_mcr_read_any(engine->gt,
1228 XEHP_RING_FAULT_REG);
1229 else if (GRAPHICS_VER(i915) >= 12)
1230 ee->fault_reg = intel_uncore_read(engine->uncore,
1231 GEN12_RING_FAULT_REG);
1232 else if (GRAPHICS_VER(i915) >= 8)
1233 ee->fault_reg = intel_uncore_read(engine->uncore,
1234 GEN8_RING_FAULT_REG);
1235 else
1236 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
1237 }
1238
1239 if (GRAPHICS_VER(i915) >= 4) {
1240 ee->esr = ENGINE_READ(engine, RING_ESR);
1241 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1242 ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1243 ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1244 ee->instps = ENGINE_READ(engine, RING_INSTPS);
1245 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
1246 ee->ccid = ENGINE_READ(engine, CCID);
1247 if (GRAPHICS_VER(i915) >= 8) {
1248 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1249 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
1250 }
1251 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
1252 } else {
1253 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1254 ee->ipeir = ENGINE_READ(engine, IPEIR);
1255 ee->ipehr = ENGINE_READ(engine, IPEHR);
1256 }
1257
1258 if (GRAPHICS_VER(i915) >= 11) {
1259 ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL);
1260 ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP);
1261 ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL);
1262 ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW);
1263 ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD);
1264 ee->nopid = ENGINE_READ(engine, RING_NOPID);
1265 ee->excc = ENGINE_READ(engine, RING_EXCC);
1266 }
1267
1268 intel_engine_get_instdone(engine, &ee->instdone);
1269
1270 ee->instpm = ENGINE_READ(engine, RING_INSTPM);
1271 ee->acthd = intel_engine_get_active_head(engine);
1272 ee->start = ENGINE_READ(engine, RING_START);
1273 ee->head = ENGINE_READ(engine, RING_HEAD);
1274 ee->tail = ENGINE_READ(engine, RING_TAIL);
1275 ee->ctl = ENGINE_READ(engine, RING_CTL);
1276 if (GRAPHICS_VER(i915) > 2)
1277 ee->mode = ENGINE_READ(engine, RING_MI_MODE);
1278
1279 if (!HWS_NEEDS_PHYSICAL(i915)) {
1280 i915_reg_t mmio;
1281
1282 if (GRAPHICS_VER(i915) == 7) {
1283 switch (engine->id) {
1284 default:
1285 MISSING_CASE(engine->id);
1286 fallthrough;
1287 case RCS0:
1288 mmio = RENDER_HWS_PGA_GEN7;
1289 break;
1290 case BCS0:
1291 mmio = BLT_HWS_PGA_GEN7;
1292 break;
1293 case VCS0:
1294 mmio = BSD_HWS_PGA_GEN7;
1295 break;
1296 case VECS0:
1297 mmio = VEBOX_HWS_PGA_GEN7;
1298 break;
1299 }
1300 } else if (GRAPHICS_VER(engine->i915) == 6) {
1301 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1302 } else {
1303 /* XXX: gen8 returns to sanity */
1304 mmio = RING_HWS_PGA(engine->mmio_base);
1305 }
1306
1307 ee->hws = intel_uncore_read(engine->uncore, mmio);
1308 }
1309
1310 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1311
1312 if (HAS_PPGTT(i915)) {
1313 int i;
1314
1315 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
1316
1317 if (GRAPHICS_VER(i915) == 6) {
1318 ee->vm_info.pp_dir_base =
1319 ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
1320 } else if (GRAPHICS_VER(i915) == 7) {
1321 ee->vm_info.pp_dir_base =
1322 ENGINE_READ(engine, RING_PP_DIR_BASE);
1323 } else if (GRAPHICS_VER(i915) >= 8) {
1324 u32 base = engine->mmio_base;
1325
1326 for (i = 0; i < 4; i++) {
1327 ee->vm_info.pdp[i] =
1328 intel_uncore_read(engine->uncore,
1329 GEN8_RING_PDP_UDW(base, i));
1330 ee->vm_info.pdp[i] <<= 32;
1331 ee->vm_info.pdp[i] |=
1332 intel_uncore_read(engine->uncore,
1333 GEN8_RING_PDP_LDW(base, i));
1334 }
1335 }
1336 }
1337}
1338
1339static void record_request(const struct i915_request *request,
1340 struct i915_request_coredump *erq)
1341{
1342 erq->flags = request->fence.flags;
1343 erq->context = request->fence.context;
1344 erq->seqno = request->fence.seqno;
1345 erq->sched_attr = request->sched.attr;
1346 erq->head = request->head;
1347 erq->tail = request->tail;
1348
1349 erq->pid = 0;
1350 rcu_read_lock();
1351 if (!intel_context_is_closed(request->context)) {
1352 const struct i915_gem_context *ctx;
1353
1354 ctx = rcu_dereference(request->context->gem_context);
1355 if (ctx)
1356 erq->pid = pid_nr(ctx->pid);
1357 }
1358 rcu_read_unlock();
1359}
1360
1361static void engine_record_execlists(struct intel_engine_coredump *ee)
1362{
1363 const struct intel_engine_execlists * const el = &ee->engine->execlists;
1364 struct i915_request * const *port = el->active;
1365 unsigned int n = 0;
1366
1367 while (*port)
1368 record_request(*port++, &ee->execlist[n++]);
1369
1370 ee->num_ports = n;
1371}
1372
1373static bool record_context(struct i915_gem_context_coredump *e,
1374 struct intel_context *ce)
1375{
1376 struct i915_gem_context *ctx;
1377 struct task_struct *task;
1378 bool simulated;
1379
1380 rcu_read_lock();
1381 ctx = rcu_dereference(ce->gem_context);
1382 if (ctx && !kref_get_unless_zero(&ctx->ref))
1383 ctx = NULL;
1384 rcu_read_unlock();
1385 if (!ctx)
1386 return true;
1387
1388 rcu_read_lock();
1389 task = pid_task(ctx->pid, PIDTYPE_PID);
1390 if (task) {
1391 strcpy(e->comm, task->comm);
1392 e->pid = task->pid;
1393 }
1394 rcu_read_unlock();
1395
1396 e->sched_attr = ctx->sched;
1397 e->guilty = atomic_read(&ctx->guilty_count);
1398 e->active = atomic_read(&ctx->active_count);
1399 e->hwsp_seqno = (ce->timeline && ce->timeline->hwsp_seqno) ?
1400 *ce->timeline->hwsp_seqno : ~0U;
1401
1402 e->total_runtime = intel_context_get_total_runtime_ns(ce);
1403 e->avg_runtime = intel_context_get_avg_runtime_ns(ce);
1404
1405 simulated = i915_gem_context_no_error_capture(ctx);
1406
1407 i915_gem_context_put(ctx);
1408 return simulated;
1409}
1410
1411struct intel_engine_capture_vma {
1412 struct intel_engine_capture_vma *next;
1413 struct i915_vma_resource *vma_res;
1414 char name[16];
1415 bool lockdep_cookie;
1416};
1417
1418static struct intel_engine_capture_vma *
1419capture_vma_snapshot(struct intel_engine_capture_vma *next,
1420 struct i915_vma_resource *vma_res,
1421 gfp_t gfp, const char *name)
1422{
1423 struct intel_engine_capture_vma *c;
1424
1425 if (!vma_res)
1426 return next;
1427
1428 c = kmalloc(sizeof(*c), gfp);
1429 if (!c)
1430 return next;
1431
1432 if (!i915_vma_resource_hold(vma_res, &c->lockdep_cookie)) {
1433 kfree(c);
1434 return next;
1435 }
1436
1437 strcpy(c->name, name);
1438 c->vma_res = i915_vma_resource_get(vma_res);
1439
1440 c->next = next;
1441 return c;
1442}
1443
1444static struct intel_engine_capture_vma *
1445capture_vma(struct intel_engine_capture_vma *next,
1446 struct i915_vma *vma,
1447 const char *name,
1448 gfp_t gfp)
1449{
1450 if (!vma)
1451 return next;
1452
1453 /*
1454 * If the vma isn't pinned, then the vma should be snapshotted
1455 * to a struct i915_vma_snapshot at command submission time.
1456 * Not here.
1457 */
1458 if (GEM_WARN_ON(!i915_vma_is_pinned(vma)))
1459 return next;
1460
1461 next = capture_vma_snapshot(next, vma->resource, gfp, name);
1462
1463 return next;
1464}
1465
1466static struct intel_engine_capture_vma *
1467capture_user(struct intel_engine_capture_vma *capture,
1468 const struct i915_request *rq,
1469 gfp_t gfp)
1470{
1471 struct i915_capture_list *c;
1472
1473 for (c = rq->capture_list; c; c = c->next)
1474 capture = capture_vma_snapshot(capture, c->vma_res, gfp,
1475 "user");
1476
1477 return capture;
1478}
1479
1480static void add_vma(struct intel_engine_coredump *ee,
1481 struct i915_vma_coredump *vma)
1482{
1483 if (vma) {
1484 vma->next = ee->vma;
1485 ee->vma = vma;
1486 }
1487}
1488
1489static struct i915_vma_coredump *
1490create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma,
1491 const char *name, struct i915_vma_compress *compress)
1492{
1493 struct i915_vma_coredump *ret = NULL;
1494 struct i915_vma_resource *vma_res;
1495 bool lockdep_cookie;
1496
1497 if (!vma)
1498 return NULL;
1499
1500 vma_res = vma->resource;
1501
1502 if (i915_vma_resource_hold(vma_res, &lockdep_cookie)) {
1503 ret = i915_vma_coredump_create(gt, vma_res, compress, name);
1504 i915_vma_resource_unhold(vma_res, lockdep_cookie);
1505 }
1506
1507 return ret;
1508}
1509
1510static void add_vma_coredump(struct intel_engine_coredump *ee,
1511 const struct intel_gt *gt,
1512 struct i915_vma *vma,
1513 const char *name,
1514 struct i915_vma_compress *compress)
1515{
1516 add_vma(ee, create_vma_coredump(gt, vma, name, compress));
1517}
1518
1519struct intel_engine_coredump *
1520intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags)
1521{
1522 struct intel_engine_coredump *ee;
1523
1524 ee = kzalloc(sizeof(*ee), gfp);
1525 if (!ee)
1526 return NULL;
1527
1528 ee->engine = engine;
1529
1530 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) {
1531 engine_record_registers(ee);
1532 engine_record_execlists(ee);
1533 }
1534
1535 return ee;
1536}
1537
1538static struct intel_engine_capture_vma *
1539engine_coredump_add_context(struct intel_engine_coredump *ee,
1540 struct intel_context *ce,
1541 gfp_t gfp)
1542{
1543 struct intel_engine_capture_vma *vma = NULL;
1544
1545 ee->simulated |= record_context(&ee->context, ce);
1546 if (ee->simulated)
1547 return NULL;
1548
1549 /*
1550 * We need to copy these to an anonymous buffer
1551 * as the simplest method to avoid being overwritten
1552 * by userspace.
1553 */
1554 vma = capture_vma(vma, ce->ring->vma, "ring", gfp);
1555 vma = capture_vma(vma, ce->state, "HW context", gfp);
1556
1557 return vma;
1558}
1559
1560struct intel_engine_capture_vma *
1561intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1562 struct i915_request *rq,
1563 gfp_t gfp)
1564{
1565 struct intel_engine_capture_vma *vma;
1566
1567 vma = engine_coredump_add_context(ee, rq->context, gfp);
1568 if (!vma)
1569 return NULL;
1570
1571 /*
1572 * We need to copy these to an anonymous buffer
1573 * as the simplest method to avoid being overwritten
1574 * by userspace.
1575 */
1576 vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch");
1577 vma = capture_user(vma, rq, gfp);
1578
1579 ee->rq_head = rq->head;
1580 ee->rq_post = rq->postfix;
1581 ee->rq_tail = rq->tail;
1582
1583 return vma;
1584}
1585
1586void
1587intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1588 struct intel_engine_capture_vma *capture,
1589 struct i915_vma_compress *compress)
1590{
1591 const struct intel_engine_cs *engine = ee->engine;
1592
1593 while (capture) {
1594 struct intel_engine_capture_vma *this = capture;
1595 struct i915_vma_resource *vma_res = this->vma_res;
1596
1597 add_vma(ee,
1598 i915_vma_coredump_create(engine->gt, vma_res,
1599 compress, this->name));
1600
1601 i915_vma_resource_unhold(vma_res, this->lockdep_cookie);
1602 i915_vma_resource_put(vma_res);
1603
1604 capture = this->next;
1605 kfree(this);
1606 }
1607
1608 add_vma_coredump(ee, engine->gt, engine->status_page.vma,
1609 "HW Status", compress);
1610
1611 add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma,
1612 "WA context", compress);
1613}
1614
1615static struct intel_engine_coredump *
1616capture_engine(struct intel_engine_cs *engine,
1617 struct i915_vma_compress *compress,
1618 u32 dump_flags)
1619{
1620 struct intel_engine_capture_vma *capture = NULL;
1621 struct intel_engine_coredump *ee;
1622 struct intel_context *ce = NULL;
1623 struct i915_request *rq = NULL;
1624
1625 ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
1626 if (!ee)
1627 return NULL;
1628
1629 intel_engine_get_hung_entity(engine, &ce, &rq);
1630 if (rq && !i915_request_started(rq))
1631 drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
1632 engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
1633
1634 if (rq) {
1635 capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
1636 i915_request_put(rq);
1637 } else if (ce) {
1638 capture = engine_coredump_add_context(ee, ce, ATOMIC_MAYFAIL);
1639 }
1640
1641 if (capture) {
1642 intel_engine_coredump_add_vma(ee, capture, compress);
1643
1644 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1645 intel_guc_capture_get_matching_node(engine->gt, ee, ce);
1646 } else {
1647 kfree(ee);
1648 ee = NULL;
1649 }
1650
1651 return ee;
1652}
1653
1654static void
1655gt_record_engines(struct intel_gt_coredump *gt,
1656 intel_engine_mask_t engine_mask,
1657 struct i915_vma_compress *compress,
1658 u32 dump_flags)
1659{
1660 struct intel_engine_cs *engine;
1661 enum intel_engine_id id;
1662
1663 for_each_engine(engine, gt->_gt, id) {
1664 struct intel_engine_coredump *ee;
1665
1666 /* Refill our page pool before entering atomic section */
1667 pool_refill(&compress->pool, ALLOW_FAIL);
1668
1669 ee = capture_engine(engine, compress, dump_flags);
1670 if (!ee)
1671 continue;
1672
1673 ee->hung = engine->mask & engine_mask;
1674
1675 gt->simulated |= ee->simulated;
1676 if (ee->simulated) {
1677 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1678 intel_guc_capture_free_node(ee);
1679 kfree(ee);
1680 continue;
1681 }
1682
1683 ee->next = gt->engine;
1684 gt->engine = ee;
1685 }
1686}
1687
1688static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
1689 const struct intel_guc_ct_buffer *ctb,
1690 const void *blob_ptr, struct intel_guc *guc)
1691{
1692 if (!ctb || !ctb->desc)
1693 return;
1694
1695 saved->raw_status = ctb->desc->status;
1696 saved->raw_head = ctb->desc->head;
1697 saved->raw_tail = ctb->desc->tail;
1698 saved->head = ctb->head;
1699 saved->tail = ctb->tail;
1700 saved->size = ctb->size;
1701 saved->desc_offset = ((void *)ctb->desc) - blob_ptr;
1702 saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
1703}
1704
1705static struct intel_uc_coredump *
1706gt_record_uc(struct intel_gt_coredump *gt,
1707 struct i915_vma_compress *compress)
1708{
1709 const struct intel_uc *uc = >->_gt->uc;
1710 struct intel_uc_coredump *error_uc;
1711
1712 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
1713 if (!error_uc)
1714 return NULL;
1715
1716 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
1717 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
1718
1719 error_uc->guc_fw.file_selected.path = kstrdup(uc->guc.fw.file_selected.path, ALLOW_FAIL);
1720 error_uc->huc_fw.file_selected.path = kstrdup(uc->huc.fw.file_selected.path, ALLOW_FAIL);
1721 error_uc->guc_fw.file_wanted.path = kstrdup(uc->guc.fw.file_wanted.path, ALLOW_FAIL);
1722 error_uc->huc_fw.file_wanted.path = kstrdup(uc->huc.fw.file_wanted.path, ALLOW_FAIL);
1723
1724 /*
1725 * Save the GuC log and include a timestamp reference for converting the
1726 * log times to system times (in conjunction with the error->boottime and
1727 * gt->clock_frequency fields saved elsewhere).
1728 */
1729 error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP);
1730 error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
1731 "GuC log buffer", compress);
1732 error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma,
1733 "GuC CT buffer", compress);
1734 error_uc->guc.last_fence = uc->guc.ct.requests.last_fence;
1735 gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send,
1736 uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1737 gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
1738 uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1739
1740 return error_uc;
1741}
1742
1743/* Capture display registers. */
1744static void gt_record_display_regs(struct intel_gt_coredump *gt)
1745{
1746 struct intel_uncore *uncore = gt->_gt->uncore;
1747 struct drm_i915_private *i915 = uncore->i915;
1748
1749 if (GRAPHICS_VER(i915) >= 6)
1750 gt->derrmr = intel_uncore_read(uncore, DERRMR);
1751
1752 if (GRAPHICS_VER(i915) >= 8)
1753 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1754 else if (IS_VALLEYVIEW(i915))
1755 gt->ier = intel_uncore_read(uncore, VLV_IER);
1756 else if (HAS_PCH_SPLIT(i915))
1757 gt->ier = intel_uncore_read(uncore, DEIER);
1758 else if (GRAPHICS_VER(i915) == 2)
1759 gt->ier = intel_uncore_read16(uncore, GEN2_IER);
1760 else
1761 gt->ier = intel_uncore_read(uncore, GEN2_IER);
1762}
1763
1764/* Capture all other registers that GuC doesn't capture. */
1765static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
1766{
1767 struct intel_uncore *uncore = gt->_gt->uncore;
1768 struct drm_i915_private *i915 = uncore->i915;
1769 int i;
1770
1771 if (IS_VALLEYVIEW(i915)) {
1772 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1773 gt->ngtier = 1;
1774 } else if (GRAPHICS_VER(i915) >= 11) {
1775 gt->gtier[0] =
1776 intel_uncore_read(uncore,
1777 GEN11_RENDER_COPY_INTR_ENABLE);
1778 gt->gtier[1] =
1779 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
1780 gt->gtier[2] =
1781 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
1782 gt->gtier[3] =
1783 intel_uncore_read(uncore,
1784 GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1785 gt->gtier[4] =
1786 intel_uncore_read(uncore,
1787 GEN11_CRYPTO_RSVD_INTR_ENABLE);
1788 gt->gtier[5] =
1789 intel_uncore_read(uncore,
1790 GEN11_GUNIT_CSME_INTR_ENABLE);
1791 gt->ngtier = 6;
1792 } else if (GRAPHICS_VER(i915) >= 8) {
1793 for (i = 0; i < 4; i++)
1794 gt->gtier[i] =
1795 intel_uncore_read(uncore, GEN8_GT_IER(i));
1796 gt->ngtier = 4;
1797 } else if (HAS_PCH_SPLIT(i915)) {
1798 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1799 gt->ngtier = 1;
1800 }
1801
1802 gt->eir = intel_uncore_read(uncore, EIR);
1803 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
1804}
1805
1806/*
1807 * Capture all registers that relate to workload submission.
1808 * NOTE: In GuC submission, when GuC resets an engine, it can dump these for us
1809 */
1810static void gt_record_global_regs(struct intel_gt_coredump *gt)
1811{
1812 struct intel_uncore *uncore = gt->_gt->uncore;
1813 struct drm_i915_private *i915 = uncore->i915;
1814 int i;
1815
1816 /*
1817 * General organization
1818 * 1. Registers specific to a single generation
1819 * 2. Registers which belong to multiple generations
1820 * 3. Feature specific registers.
1821 * 4. Everything else
1822 * Please try to follow the order.
1823 */
1824
1825 /* 1: Registers specific to a single generation */
1826 if (IS_VALLEYVIEW(i915))
1827 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
1828
1829 if (GRAPHICS_VER(i915) == 7)
1830 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
1831
1832 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
1833 gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
1834 XEHP_FAULT_TLB_DATA0);
1835 gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
1836 XEHP_FAULT_TLB_DATA1);
1837 } else if (GRAPHICS_VER(i915) >= 12) {
1838 gt->fault_data0 = intel_uncore_read(uncore,
1839 GEN12_FAULT_TLB_DATA0);
1840 gt->fault_data1 = intel_uncore_read(uncore,
1841 GEN12_FAULT_TLB_DATA1);
1842 } else if (GRAPHICS_VER(i915) >= 8) {
1843 gt->fault_data0 = intel_uncore_read(uncore,
1844 GEN8_FAULT_TLB_DATA0);
1845 gt->fault_data1 = intel_uncore_read(uncore,
1846 GEN8_FAULT_TLB_DATA1);
1847 }
1848
1849 if (GRAPHICS_VER(i915) == 6) {
1850 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
1851 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
1852 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
1853 }
1854
1855 /* 2: Registers which belong to multiple generations */
1856 if (GRAPHICS_VER(i915) >= 7)
1857 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
1858
1859 if (GRAPHICS_VER(i915) >= 6) {
1860 if (GRAPHICS_VER(i915) < 12) {
1861 gt->error = intel_uncore_read(uncore, ERROR_GEN6);
1862 gt->done_reg = intel_uncore_read(uncore, DONE_REG);
1863 }
1864 }
1865
1866 /* 3: Feature specific registers */
1867 if (IS_GRAPHICS_VER(i915, 6, 7)) {
1868 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1869 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
1870 }
1871
1872 if (IS_GRAPHICS_VER(i915, 8, 11))
1873 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
1874
1875 if (GRAPHICS_VER(i915) == 12)
1876 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
1877
1878 if (GRAPHICS_VER(i915) >= 12) {
1879 for (i = 0; i < I915_MAX_SFC; i++) {
1880 /*
1881 * SFC_DONE resides in the VD forcewake domain, so it
1882 * only exists if the corresponding VCS engine is
1883 * present.
1884 */
1885 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
1886 !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
1887 continue;
1888
1889 gt->sfc_done[i] =
1890 intel_uncore_read(uncore, GEN12_SFC_DONE(i));
1891 }
1892
1893 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
1894 }
1895}
1896
1897static void gt_record_info(struct intel_gt_coredump *gt)
1898{
1899 memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info));
1900 gt->clock_frequency = gt->_gt->clock_frequency;
1901 gt->clock_period_ns = gt->_gt->clock_period_ns;
1902}
1903
1904/*
1905 * Generate a semi-unique error code. The code is not meant to have meaning, The
1906 * code's only purpose is to try to prevent false duplicated bug reports by
1907 * grossly estimating a GPU error state.
1908 *
1909 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1910 * the hang if we could strip the GTT offset information from it.
1911 *
1912 * It's only a small step better than a random number in its current form.
1913 */
1914static u32 generate_ecode(const struct intel_engine_coredump *ee)
1915{
1916 /*
1917 * IPEHR would be an ideal way to detect errors, as it's the gross
1918 * measure of "the command that hung." However, has some very common
1919 * synchronization commands which almost always appear in the case
1920 * strictly a client bug. Use instdone to differentiate those some.
1921 */
1922 return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1923}
1924
1925static const char *error_msg(struct i915_gpu_coredump *error)
1926{
1927 struct intel_engine_coredump *first = NULL;
1928 unsigned int hung_classes = 0;
1929 struct intel_gt_coredump *gt;
1930 int len;
1931
1932 for (gt = error->gt; gt; gt = gt->next) {
1933 struct intel_engine_coredump *cs;
1934
1935 for (cs = gt->engine; cs; cs = cs->next) {
1936 if (cs->hung) {
1937 hung_classes |= BIT(cs->engine->uabi_class);
1938 if (!first)
1939 first = cs;
1940 }
1941 }
1942 }
1943
1944 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1945 "GPU HANG: ecode %d:%x:%08x",
1946 GRAPHICS_VER(error->i915), hung_classes,
1947 generate_ecode(first));
1948 if (first && first->context.pid) {
1949 /* Just show the first executing process, more is confusing */
1950 len += scnprintf(error->error_msg + len,
1951 sizeof(error->error_msg) - len,
1952 ", in %s [%d]",
1953 first->context.comm, first->context.pid);
1954 }
1955
1956 return error->error_msg;
1957}
1958
1959static void capture_gen(struct i915_gpu_coredump *error)
1960{
1961 struct drm_i915_private *i915 = error->i915;
1962
1963 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1964 error->suspended = i915->runtime_pm.suspended;
1965
1966 error->iommu = i915_vtd_active(i915);
1967 error->reset_count = i915_reset_count(&i915->gpu_error);
1968 error->suspend_count = i915->suspend_count;
1969
1970 i915_params_copy(&error->params, &i915->params);
1971 memcpy(&error->device_info,
1972 INTEL_INFO(i915),
1973 sizeof(error->device_info));
1974 memcpy(&error->runtime_info,
1975 RUNTIME_INFO(i915),
1976 sizeof(error->runtime_info));
1977 error->driver_caps = i915->caps;
1978}
1979
1980struct i915_gpu_coredump *
1981i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
1982{
1983 struct i915_gpu_coredump *error;
1984
1985 if (!i915->params.error_capture)
1986 return NULL;
1987
1988 error = kzalloc(sizeof(*error), gfp);
1989 if (!error)
1990 return NULL;
1991
1992 kref_init(&error->ref);
1993 error->i915 = i915;
1994
1995 error->time = ktime_get_real();
1996 error->boottime = ktime_get_boottime();
1997 error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time);
1998 error->capture = jiffies;
1999
2000 capture_gen(error);
2001
2002 return error;
2003}
2004
2005#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
2006
2007struct intel_gt_coredump *
2008intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
2009{
2010 struct intel_gt_coredump *gc;
2011
2012 gc = kzalloc(sizeof(*gc), gfp);
2013 if (!gc)
2014 return NULL;
2015
2016 gc->_gt = gt;
2017 gc->awake = intel_gt_pm_is_awake(gt);
2018
2019 gt_record_display_regs(gc);
2020 gt_record_global_nonguc_regs(gc);
2021
2022 /*
2023 * GuC dumps global, eng-class and eng-instance registers
2024 * (that can change as part of engine state during execution)
2025 * before an engine is reset due to a hung context.
2026 * GuC captures and reports all three groups of registers
2027 * together as a single set before the engine is reset.
2028 * Thus, if GuC triggered the context reset we retrieve
2029 * the register values as part of gt_record_engines.
2030 */
2031 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE))
2032 gt_record_global_regs(gc);
2033
2034 gt_record_fences(gc);
2035
2036 return gc;
2037}
2038
2039struct i915_vma_compress *
2040i915_vma_capture_prepare(struct intel_gt_coredump *gt)
2041{
2042 struct i915_vma_compress *compress;
2043
2044 compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
2045 if (!compress)
2046 return NULL;
2047
2048 if (!compress_init(compress)) {
2049 kfree(compress);
2050 return NULL;
2051 }
2052
2053 return compress;
2054}
2055
2056void i915_vma_capture_finish(struct intel_gt_coredump *gt,
2057 struct i915_vma_compress *compress)
2058{
2059 if (!compress)
2060 return;
2061
2062 compress_fini(compress);
2063 kfree(compress);
2064}
2065
2066static struct i915_gpu_coredump *
2067__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2068{
2069 struct drm_i915_private *i915 = gt->i915;
2070 struct i915_gpu_coredump *error;
2071
2072 /* Check if GPU capture has been disabled */
2073 error = READ_ONCE(i915->gpu_error.first_error);
2074 if (IS_ERR(error))
2075 return error;
2076
2077 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
2078 if (!error)
2079 return ERR_PTR(-ENOMEM);
2080
2081 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL, dump_flags);
2082 if (error->gt) {
2083 struct i915_vma_compress *compress;
2084
2085 compress = i915_vma_capture_prepare(error->gt);
2086 if (!compress) {
2087 kfree(error->gt);
2088 kfree(error);
2089 return ERR_PTR(-ENOMEM);
2090 }
2091
2092 if (INTEL_INFO(i915)->has_gt_uc) {
2093 error->gt->uc = gt_record_uc(error->gt, compress);
2094 if (error->gt->uc) {
2095 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
2096 error->gt->uc->guc.is_guc_capture = true;
2097 else
2098 GEM_BUG_ON(error->gt->uc->guc.is_guc_capture);
2099 }
2100 }
2101
2102 gt_record_info(error->gt);
2103 gt_record_engines(error->gt, engine_mask, compress, dump_flags);
2104
2105
2106 i915_vma_capture_finish(error->gt, compress);
2107
2108 error->simulated |= error->gt->simulated;
2109 }
2110
2111 error->overlay = intel_overlay_capture_error_state(i915);
2112
2113 return error;
2114}
2115
2116struct i915_gpu_coredump *
2117i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2118{
2119 static DEFINE_MUTEX(capture_mutex);
2120 int ret = mutex_lock_interruptible(&capture_mutex);
2121 struct i915_gpu_coredump *dump;
2122
2123 if (ret)
2124 return ERR_PTR(ret);
2125
2126 dump = __i915_gpu_coredump(gt, engine_mask, dump_flags);
2127 mutex_unlock(&capture_mutex);
2128
2129 return dump;
2130}
2131
2132void i915_error_state_store(struct i915_gpu_coredump *error)
2133{
2134 struct drm_i915_private *i915;
2135 static bool warned;
2136
2137 if (IS_ERR_OR_NULL(error))
2138 return;
2139
2140 i915 = error->i915;
2141 drm_info(&i915->drm, "%s\n", error_msg(error));
2142
2143 if (error->simulated ||
2144 cmpxchg(&i915->gpu_error.first_error, NULL, error))
2145 return;
2146
2147 i915_gpu_coredump_get(error);
2148
2149 if (!xchg(&warned, true) &&
2150 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
2151 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
2152 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
2153 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
2154 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
2155 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
2156 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
2157 i915->drm.primary->index);
2158 }
2159}
2160
2161/**
2162 * i915_capture_error_state - capture an error record for later analysis
2163 * @gt: intel_gt which originated the hang
2164 * @engine_mask: hung engines
2165 *
2166 *
2167 * Should be called when an error is detected (either a hang or an error
2168 * interrupt) to capture error state from the time of the error. Fills
2169 * out a structure which becomes available in debugfs for user level tools
2170 * to pick up.
2171 */
2172void i915_capture_error_state(struct intel_gt *gt,
2173 intel_engine_mask_t engine_mask, u32 dump_flags)
2174{
2175 struct i915_gpu_coredump *error;
2176
2177 error = i915_gpu_coredump(gt, engine_mask, dump_flags);
2178 if (IS_ERR(error)) {
2179 cmpxchg(>->i915->gpu_error.first_error, NULL, error);
2180 return;
2181 }
2182
2183 i915_error_state_store(error);
2184 i915_gpu_coredump_put(error);
2185}
2186
2187struct i915_gpu_coredump *
2188i915_first_error_state(struct drm_i915_private *i915)
2189{
2190 struct i915_gpu_coredump *error;
2191
2192 spin_lock_irq(&i915->gpu_error.lock);
2193 error = i915->gpu_error.first_error;
2194 if (!IS_ERR_OR_NULL(error))
2195 i915_gpu_coredump_get(error);
2196 spin_unlock_irq(&i915->gpu_error.lock);
2197
2198 return error;
2199}
2200
2201void i915_reset_error_state(struct drm_i915_private *i915)
2202{
2203 struct i915_gpu_coredump *error;
2204
2205 spin_lock_irq(&i915->gpu_error.lock);
2206 error = i915->gpu_error.first_error;
2207 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
2208 i915->gpu_error.first_error = NULL;
2209 spin_unlock_irq(&i915->gpu_error.lock);
2210
2211 if (!IS_ERR_OR_NULL(error))
2212 i915_gpu_coredump_put(error);
2213}
2214
2215void i915_disable_error_state(struct drm_i915_private *i915, int err)
2216{
2217 spin_lock_irq(&i915->gpu_error.lock);
2218 if (!i915->gpu_error.first_error)
2219 i915->gpu_error.first_error = ERR_PTR(err);
2220 spin_unlock_irq(&i915->gpu_error.lock);
2221}