1 /*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30 #include <linux/ascii85.h>
31 #include <linux/nmi.h>
32 #include <linux/pagevec.h>
33 #include <linux/scatterlist.h>
34 #include <linux/utsname.h>
35 #include <linux/zlib.h>
36
37 #include <drm/drm_print.h>
38
39 #include "display/intel_dmc.h"
40 #include "display/intel_overlay.h"
41
42 #include "gem/i915_gem_context.h"
43 #include "gem/i915_gem_lmem.h"
44 #include "gt/intel_gt.h"
45 #include "gt/intel_gt_pm.h"
46
47 #include "i915_drv.h"
48 #include "i915_gpu_error.h"
49 #include "i915_memcpy.h"
50 #include "i915_scatterlist.h"
51
52 #define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
53 #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
54
__sg_set_buf(struct scatterlist * sg,void * addr,unsigned int len,loff_t it)55 static void __sg_set_buf(struct scatterlist *sg,
56 void *addr, unsigned int len, loff_t it)
57 {
58 sg->page_link = (unsigned long)virt_to_page(addr);
59 sg->offset = offset_in_page(addr);
60 sg->length = len;
61 sg->dma_address = it;
62 }
63
__i915_error_grow(struct drm_i915_error_state_buf * e,size_t len)64 static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
65 {
66 if (!len)
67 return false;
68
69 if (e->bytes + len + 1 <= e->size)
70 return true;
71
72 if (e->bytes) {
73 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
74 e->iter += e->bytes;
75 e->buf = NULL;
76 e->bytes = 0;
77 }
78
79 if (e->cur == e->end) {
80 struct scatterlist *sgl;
81
82 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
83 if (!sgl) {
84 e->err = -ENOMEM;
85 return false;
86 }
87
88 if (e->cur) {
89 e->cur->offset = 0;
90 e->cur->length = 0;
91 e->cur->page_link =
92 (unsigned long)sgl | SG_CHAIN;
93 } else {
94 e->sgl = sgl;
95 }
96
97 e->cur = sgl;
98 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
99 }
100
101 e->size = ALIGN(len + 1, SZ_64K);
102 e->buf = kmalloc(e->size, ALLOW_FAIL);
103 if (!e->buf) {
104 e->size = PAGE_ALIGN(len + 1);
105 e->buf = kmalloc(e->size, GFP_KERNEL);
106 }
107 if (!e->buf) {
108 e->err = -ENOMEM;
109 return false;
110 }
111
112 return true;
113 }
114
115 __printf(2, 0)
i915_error_vprintf(struct drm_i915_error_state_buf * e,const char * fmt,va_list args)116 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
117 const char *fmt, va_list args)
118 {
119 va_list ap;
120 int len;
121
122 if (e->err)
123 return;
124
125 va_copy(ap, args);
126 len = vsnprintf(NULL, 0, fmt, ap);
127 va_end(ap);
128 if (len <= 0) {
129 e->err = len;
130 return;
131 }
132
133 if (!__i915_error_grow(e, len))
134 return;
135
136 GEM_BUG_ON(e->bytes >= e->size);
137 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
138 if (len < 0) {
139 e->err = len;
140 return;
141 }
142 e->bytes += len;
143 }
144
i915_error_puts(struct drm_i915_error_state_buf * e,const char * str)145 static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
146 {
147 unsigned len;
148
149 if (e->err || !str)
150 return;
151
152 len = strlen(str);
153 if (!__i915_error_grow(e, len))
154 return;
155
156 GEM_BUG_ON(e->bytes + len > e->size);
157 memcpy(e->buf + e->bytes, str, len);
158 e->bytes += len;
159 }
160
161 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
162 #define err_puts(e, s) i915_error_puts(e, s)
163
__i915_printfn_error(struct drm_printer * p,struct va_format * vaf)164 static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
165 {
166 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
167 }
168
169 static inline struct drm_printer
i915_error_printer(struct drm_i915_error_state_buf * e)170 i915_error_printer(struct drm_i915_error_state_buf *e)
171 {
172 struct drm_printer p = {
173 .printfn = __i915_printfn_error,
174 .arg = e,
175 };
176 return p;
177 }
178
179 /* single threaded page allocator with a reserved stash for emergencies */
pool_fini(struct pagevec * pv)180 static void pool_fini(struct pagevec *pv)
181 {
182 pagevec_release(pv);
183 }
184
pool_refill(struct pagevec * pv,gfp_t gfp)185 static int pool_refill(struct pagevec *pv, gfp_t gfp)
186 {
187 while (pagevec_space(pv)) {
188 struct page *p;
189
190 p = alloc_page(gfp);
191 if (!p)
192 return -ENOMEM;
193
194 pagevec_add(pv, p);
195 }
196
197 return 0;
198 }
199
pool_init(struct pagevec * pv,gfp_t gfp)200 static int pool_init(struct pagevec *pv, gfp_t gfp)
201 {
202 int err;
203
204 pagevec_init(pv);
205
206 err = pool_refill(pv, gfp);
207 if (err)
208 pool_fini(pv);
209
210 return err;
211 }
212
pool_alloc(struct pagevec * pv,gfp_t gfp)213 static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
214 {
215 struct page *p;
216
217 p = alloc_page(gfp);
218 if (!p && pagevec_count(pv))
219 p = pv->pages[--pv->nr];
220
221 return p ? page_address(p) : NULL;
222 }
223
pool_free(struct pagevec * pv,void * addr)224 static void pool_free(struct pagevec *pv, void *addr)
225 {
226 struct page *p = virt_to_page(addr);
227
228 if (pagevec_space(pv))
229 pagevec_add(pv, p);
230 else
231 __free_page(p);
232 }
233
234 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
235
236 struct i915_vma_compress {
237 struct pagevec pool;
238 struct z_stream_s zstream;
239 void *tmp;
240 };
241
compress_init(struct i915_vma_compress * c)242 static bool compress_init(struct i915_vma_compress *c)
243 {
244 struct z_stream_s *zstream = &c->zstream;
245
246 if (pool_init(&c->pool, ALLOW_FAIL))
247 return false;
248
249 zstream->workspace =
250 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
251 ALLOW_FAIL);
252 if (!zstream->workspace) {
253 pool_fini(&c->pool);
254 return false;
255 }
256
257 c->tmp = NULL;
258 if (i915_has_memcpy_from_wc())
259 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
260
261 return true;
262 }
263
compress_start(struct i915_vma_compress * c)264 static bool compress_start(struct i915_vma_compress *c)
265 {
266 struct z_stream_s *zstream = &c->zstream;
267 void *workspace = zstream->workspace;
268
269 memset(zstream, 0, sizeof(*zstream));
270 zstream->workspace = workspace;
271
272 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
273 }
274
compress_next_page(struct i915_vma_compress * c,struct i915_vma_coredump * dst)275 static void *compress_next_page(struct i915_vma_compress *c,
276 struct i915_vma_coredump *dst)
277 {
278 void *page;
279
280 if (dst->page_count >= dst->num_pages)
281 return ERR_PTR(-ENOSPC);
282
283 page = pool_alloc(&c->pool, ALLOW_FAIL);
284 if (!page)
285 return ERR_PTR(-ENOMEM);
286
287 return dst->pages[dst->page_count++] = page;
288 }
289
compress_page(struct i915_vma_compress * c,void * src,struct i915_vma_coredump * dst,bool wc)290 static int compress_page(struct i915_vma_compress *c,
291 void *src,
292 struct i915_vma_coredump *dst,
293 bool wc)
294 {
295 struct z_stream_s *zstream = &c->zstream;
296
297 zstream->next_in = src;
298 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
299 zstream->next_in = c->tmp;
300 zstream->avail_in = PAGE_SIZE;
301
302 do {
303 if (zstream->avail_out == 0) {
304 zstream->next_out = compress_next_page(c, dst);
305 if (IS_ERR(zstream->next_out))
306 return PTR_ERR(zstream->next_out);
307
308 zstream->avail_out = PAGE_SIZE;
309 }
310
311 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
312 return -EIO;
313
314 cond_resched();
315 } while (zstream->avail_in);
316
317 /* Fallback to uncompressed if we increase size? */
318 if (0 && zstream->total_out > zstream->total_in)
319 return -E2BIG;
320
321 return 0;
322 }
323
compress_flush(struct i915_vma_compress * c,struct i915_vma_coredump * dst)324 static int compress_flush(struct i915_vma_compress *c,
325 struct i915_vma_coredump *dst)
326 {
327 struct z_stream_s *zstream = &c->zstream;
328
329 do {
330 switch (zlib_deflate(zstream, Z_FINISH)) {
331 case Z_OK: /* more space requested */
332 zstream->next_out = compress_next_page(c, dst);
333 if (IS_ERR(zstream->next_out))
334 return PTR_ERR(zstream->next_out);
335
336 zstream->avail_out = PAGE_SIZE;
337 break;
338
339 case Z_STREAM_END:
340 goto end;
341
342 default: /* any error */
343 return -EIO;
344 }
345 } while (1);
346
347 end:
348 memset(zstream->next_out, 0, zstream->avail_out);
349 dst->unused = zstream->avail_out;
350 return 0;
351 }
352
compress_finish(struct i915_vma_compress * c)353 static void compress_finish(struct i915_vma_compress *c)
354 {
355 zlib_deflateEnd(&c->zstream);
356 }
357
compress_fini(struct i915_vma_compress * c)358 static void compress_fini(struct i915_vma_compress *c)
359 {
360 kfree(c->zstream.workspace);
361 if (c->tmp)
362 pool_free(&c->pool, c->tmp);
363 pool_fini(&c->pool);
364 }
365
err_compression_marker(struct drm_i915_error_state_buf * m)366 static void err_compression_marker(struct drm_i915_error_state_buf *m)
367 {
368 err_puts(m, ":");
369 }
370
371 #else
372
373 struct i915_vma_compress {
374 struct pagevec pool;
375 };
376
compress_init(struct i915_vma_compress * c)377 static bool compress_init(struct i915_vma_compress *c)
378 {
379 return pool_init(&c->pool, ALLOW_FAIL) == 0;
380 }
381
compress_start(struct i915_vma_compress * c)382 static bool compress_start(struct i915_vma_compress *c)
383 {
384 return true;
385 }
386
compress_page(struct i915_vma_compress * c,void * src,struct i915_vma_coredump * dst,bool wc)387 static int compress_page(struct i915_vma_compress *c,
388 void *src,
389 struct i915_vma_coredump *dst,
390 bool wc)
391 {
392 void *ptr;
393
394 ptr = pool_alloc(&c->pool, ALLOW_FAIL);
395 if (!ptr)
396 return -ENOMEM;
397
398 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
399 memcpy(ptr, src, PAGE_SIZE);
400 dst->pages[dst->page_count++] = ptr;
401 cond_resched();
402
403 return 0;
404 }
405
compress_flush(struct i915_vma_compress * c,struct i915_vma_coredump * dst)406 static int compress_flush(struct i915_vma_compress *c,
407 struct i915_vma_coredump *dst)
408 {
409 return 0;
410 }
411
compress_finish(struct i915_vma_compress * c)412 static void compress_finish(struct i915_vma_compress *c)
413 {
414 }
415
compress_fini(struct i915_vma_compress * c)416 static void compress_fini(struct i915_vma_compress *c)
417 {
418 pool_fini(&c->pool);
419 }
420
err_compression_marker(struct drm_i915_error_state_buf * m)421 static void err_compression_marker(struct drm_i915_error_state_buf *m)
422 {
423 err_puts(m, "~");
424 }
425
426 #endif
427
error_print_instdone(struct drm_i915_error_state_buf * m,const struct intel_engine_coredump * ee)428 static void error_print_instdone(struct drm_i915_error_state_buf *m,
429 const struct intel_engine_coredump *ee)
430 {
431 const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
432 int slice;
433 int subslice;
434
435 err_printf(m, " INSTDONE: 0x%08x\n",
436 ee->instdone.instdone);
437
438 if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
439 return;
440
441 err_printf(m, " SC_INSTDONE: 0x%08x\n",
442 ee->instdone.slice_common);
443
444 if (GRAPHICS_VER(m->i915) <= 6)
445 return;
446
447 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
448 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
449 slice, subslice,
450 ee->instdone.sampler[slice][subslice]);
451
452 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
453 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
454 slice, subslice,
455 ee->instdone.row[slice][subslice]);
456
457 if (GRAPHICS_VER(m->i915) < 12)
458 return;
459
460 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
461 ee->instdone.slice_common_extra[0]);
462 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
463 ee->instdone.slice_common_extra[1]);
464 }
465
error_print_request(struct drm_i915_error_state_buf * m,const char * prefix,const struct i915_request_coredump * erq)466 static void error_print_request(struct drm_i915_error_state_buf *m,
467 const char *prefix,
468 const struct i915_request_coredump *erq)
469 {
470 if (!erq->seqno)
471 return;
472
473 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
474 prefix, erq->pid, erq->context, erq->seqno,
475 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
476 &erq->flags) ? "!" : "",
477 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
478 &erq->flags) ? "+" : "",
479 erq->sched_attr.priority,
480 erq->head, erq->tail);
481 }
482
error_print_context(struct drm_i915_error_state_buf * m,const char * header,const struct i915_gem_context_coredump * ctx)483 static void error_print_context(struct drm_i915_error_state_buf *m,
484 const char *header,
485 const struct i915_gem_context_coredump *ctx)
486 {
487 const u32 period = m->i915->gt.clock_period_ns;
488
489 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
490 header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
491 ctx->guilty, ctx->active,
492 ctx->total_runtime * period,
493 mul_u32_u32(ctx->avg_runtime, period));
494 }
495
496 static struct i915_vma_coredump *
__find_vma(struct i915_vma_coredump * vma,const char * name)497 __find_vma(struct i915_vma_coredump *vma, const char *name)
498 {
499 while (vma) {
500 if (strcmp(vma->name, name) == 0)
501 return vma;
502 vma = vma->next;
503 }
504
505 return NULL;
506 }
507
508 static struct i915_vma_coredump *
find_batch(const struct intel_engine_coredump * ee)509 find_batch(const struct intel_engine_coredump *ee)
510 {
511 return __find_vma(ee->vma, "batch");
512 }
513
error_print_engine(struct drm_i915_error_state_buf * m,const struct intel_engine_coredump * ee)514 static void error_print_engine(struct drm_i915_error_state_buf *m,
515 const struct intel_engine_coredump *ee)
516 {
517 struct i915_vma_coredump *batch;
518 int n;
519
520 err_printf(m, "%s command stream:\n", ee->engine->name);
521 err_printf(m, " CCID: 0x%08x\n", ee->ccid);
522 err_printf(m, " START: 0x%08x\n", ee->start);
523 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
524 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
525 ee->tail, ee->rq_post, ee->rq_tail);
526 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
527 err_printf(m, " MODE: 0x%08x\n", ee->mode);
528 err_printf(m, " HWS: 0x%08x\n", ee->hws);
529 err_printf(m, " ACTHD: 0x%08x %08x\n",
530 (u32)(ee->acthd>>32), (u32)ee->acthd);
531 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
532 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
533 err_printf(m, " ESR: 0x%08x\n", ee->esr);
534
535 error_print_instdone(m, ee);
536
537 batch = find_batch(ee);
538 if (batch) {
539 u64 start = batch->gtt_offset;
540 u64 end = start + batch->gtt_size;
541
542 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
543 upper_32_bits(start), lower_32_bits(start),
544 upper_32_bits(end), lower_32_bits(end));
545 }
546 if (GRAPHICS_VER(m->i915) >= 4) {
547 err_printf(m, " BBADDR: 0x%08x_%08x\n",
548 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
549 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
550 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
551 }
552 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
553 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
554 lower_32_bits(ee->faddr));
555 if (GRAPHICS_VER(m->i915) >= 6) {
556 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
557 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
558 }
559 if (HAS_PPGTT(m->i915)) {
560 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
561
562 if (GRAPHICS_VER(m->i915) >= 8) {
563 int i;
564 for (i = 0; i < 4; i++)
565 err_printf(m, " PDP%d: 0x%016llx\n",
566 i, ee->vm_info.pdp[i]);
567 } else {
568 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
569 ee->vm_info.pp_dir_base);
570 }
571 }
572 err_printf(m, " hung: %u\n", ee->hung);
573 err_printf(m, " engine reset count: %u\n", ee->reset_count);
574
575 for (n = 0; n < ee->num_ports; n++) {
576 err_printf(m, " ELSP[%d]:", n);
577 error_print_request(m, " ", &ee->execlist[n]);
578 }
579
580 error_print_context(m, " Active context: ", &ee->context);
581 }
582
i915_error_printf(struct drm_i915_error_state_buf * e,const char * f,...)583 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
584 {
585 va_list args;
586
587 va_start(args, f);
588 i915_error_vprintf(e, f, args);
589 va_end(args);
590 }
591
print_error_vma(struct drm_i915_error_state_buf * m,const struct intel_engine_cs * engine,const struct i915_vma_coredump * vma)592 static void print_error_vma(struct drm_i915_error_state_buf *m,
593 const struct intel_engine_cs *engine,
594 const struct i915_vma_coredump *vma)
595 {
596 char out[ASCII85_BUFSZ];
597 int page;
598
599 if (!vma)
600 return;
601
602 err_printf(m, "%s --- %s = 0x%08x %08x\n",
603 engine ? engine->name : "global", vma->name,
604 upper_32_bits(vma->gtt_offset),
605 lower_32_bits(vma->gtt_offset));
606
607 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
608 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
609
610 err_compression_marker(m);
611 for (page = 0; page < vma->page_count; page++) {
612 int i, len;
613
614 len = PAGE_SIZE;
615 if (page == vma->page_count - 1)
616 len -= vma->unused;
617 len = ascii85_encode_len(len);
618
619 for (i = 0; i < len; i++)
620 err_puts(m, ascii85_encode(vma->pages[page][i], out));
621 }
622 err_puts(m, "\n");
623 }
624
err_print_capabilities(struct drm_i915_error_state_buf * m,struct i915_gpu_coredump * error)625 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
626 struct i915_gpu_coredump *error)
627 {
628 struct drm_printer p = i915_error_printer(m);
629
630 intel_device_info_print_static(&error->device_info, &p);
631 intel_device_info_print_runtime(&error->runtime_info, &p);
632 intel_driver_caps_print(&error->driver_caps, &p);
633 }
634
err_print_params(struct drm_i915_error_state_buf * m,const struct i915_params * params)635 static void err_print_params(struct drm_i915_error_state_buf *m,
636 const struct i915_params *params)
637 {
638 struct drm_printer p = i915_error_printer(m);
639
640 i915_params_dump(params, &p);
641 }
642
err_print_pciid(struct drm_i915_error_state_buf * m,struct drm_i915_private * i915)643 static void err_print_pciid(struct drm_i915_error_state_buf *m,
644 struct drm_i915_private *i915)
645 {
646 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
647
648 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
649 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
650 err_printf(m, "PCI Subsystem: %04x:%04x\n",
651 pdev->subsystem_vendor,
652 pdev->subsystem_device);
653 }
654
err_print_uc(struct drm_i915_error_state_buf * m,const struct intel_uc_coredump * error_uc)655 static void err_print_uc(struct drm_i915_error_state_buf *m,
656 const struct intel_uc_coredump *error_uc)
657 {
658 struct drm_printer p = i915_error_printer(m);
659
660 intel_uc_fw_dump(&error_uc->guc_fw, &p);
661 intel_uc_fw_dump(&error_uc->huc_fw, &p);
662 print_error_vma(m, NULL, error_uc->guc_log);
663 }
664
err_free_sgl(struct scatterlist * sgl)665 static void err_free_sgl(struct scatterlist *sgl)
666 {
667 while (sgl) {
668 struct scatterlist *sg;
669
670 for (sg = sgl; !sg_is_chain(sg); sg++) {
671 kfree(sg_virt(sg));
672 if (sg_is_last(sg))
673 break;
674 }
675
676 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
677 free_page((unsigned long)sgl);
678 sgl = sg;
679 }
680 }
681
err_print_gt_info(struct drm_i915_error_state_buf * m,struct intel_gt_coredump * gt)682 static void err_print_gt_info(struct drm_i915_error_state_buf *m,
683 struct intel_gt_coredump *gt)
684 {
685 struct drm_printer p = i915_error_printer(m);
686
687 intel_gt_info_print(>->info, &p);
688 intel_sseu_print_topology(>->info.sseu, &p);
689 }
690
err_print_gt(struct drm_i915_error_state_buf * m,struct intel_gt_coredump * gt)691 static void err_print_gt(struct drm_i915_error_state_buf *m,
692 struct intel_gt_coredump *gt)
693 {
694 const struct intel_engine_coredump *ee;
695 int i;
696
697 err_printf(m, "GT awake: %s\n", yesno(gt->awake));
698 err_printf(m, "EIR: 0x%08x\n", gt->eir);
699 err_printf(m, "IER: 0x%08x\n", gt->ier);
700 for (i = 0; i < gt->ngtier; i++)
701 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
702 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
703 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
704 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
705
706 for (i = 0; i < gt->nfence; i++)
707 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
708
709 if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
710 err_printf(m, "ERROR: 0x%08x\n", gt->error);
711 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
712 }
713
714 if (GRAPHICS_VER(m->i915) >= 8)
715 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
716 gt->fault_data1, gt->fault_data0);
717
718 if (GRAPHICS_VER(m->i915) == 7)
719 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
720
721 if (IS_GRAPHICS_VER(m->i915, 8, 11))
722 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
723
724 if (GRAPHICS_VER(m->i915) == 12)
725 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
726
727 if (GRAPHICS_VER(m->i915) >= 12) {
728 int i;
729
730 for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
731 /*
732 * SFC_DONE resides in the VD forcewake domain, so it
733 * only exists if the corresponding VCS engine is
734 * present.
735 */
736 if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
737 continue;
738
739 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
740 gt->sfc_done[i]);
741 }
742
743 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
744 }
745
746 for (ee = gt->engine; ee; ee = ee->next) {
747 const struct i915_vma_coredump *vma;
748
749 error_print_engine(m, ee);
750 for (vma = ee->vma; vma; vma = vma->next)
751 print_error_vma(m, ee->engine, vma);
752 }
753
754 if (gt->uc)
755 err_print_uc(m, gt->uc);
756
757 err_print_gt_info(m, gt);
758 }
759
__err_print_to_sgl(struct drm_i915_error_state_buf * m,struct i915_gpu_coredump * error)760 static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
761 struct i915_gpu_coredump *error)
762 {
763 const struct intel_engine_coredump *ee;
764 struct timespec64 ts;
765
766 if (*error->error_msg)
767 err_printf(m, "%s\n", error->error_msg);
768 err_printf(m, "Kernel: %s %s\n",
769 init_utsname()->release,
770 init_utsname()->machine);
771 err_printf(m, "Driver: %s\n", DRIVER_DATE);
772 ts = ktime_to_timespec64(error->time);
773 err_printf(m, "Time: %lld s %ld us\n",
774 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
775 ts = ktime_to_timespec64(error->boottime);
776 err_printf(m, "Boottime: %lld s %ld us\n",
777 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
778 ts = ktime_to_timespec64(error->uptime);
779 err_printf(m, "Uptime: %lld s %ld us\n",
780 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
781 err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
782 error->capture, jiffies_to_msecs(jiffies - error->capture));
783
784 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
785 err_printf(m, "Active process (on ring %s): %s [%d]\n",
786 ee->engine->name,
787 ee->context.comm,
788 ee->context.pid);
789
790 err_printf(m, "Reset count: %u\n", error->reset_count);
791 err_printf(m, "Suspend count: %u\n", error->suspend_count);
792 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
793 err_printf(m, "Subplatform: 0x%x\n",
794 intel_subplatform(&error->runtime_info,
795 error->device_info.platform));
796 err_print_pciid(m, m->i915);
797
798 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
799
800 if (HAS_DMC(m->i915)) {
801 struct intel_dmc *dmc = &m->i915->dmc;
802
803 err_printf(m, "DMC loaded: %s\n",
804 yesno(intel_dmc_has_payload(m->i915) != 0));
805 err_printf(m, "DMC fw version: %d.%d\n",
806 DMC_VERSION_MAJOR(dmc->version),
807 DMC_VERSION_MINOR(dmc->version));
808 }
809
810 err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
811 err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
812
813 if (error->gt)
814 err_print_gt(m, error->gt);
815
816 if (error->overlay)
817 intel_overlay_print_error_state(m, error->overlay);
818
819 err_print_capabilities(m, error);
820 err_print_params(m, &error->params);
821 }
822
err_print_to_sgl(struct i915_gpu_coredump * error)823 static int err_print_to_sgl(struct i915_gpu_coredump *error)
824 {
825 struct drm_i915_error_state_buf m;
826
827 if (IS_ERR(error))
828 return PTR_ERR(error);
829
830 if (READ_ONCE(error->sgl))
831 return 0;
832
833 memset(&m, 0, sizeof(m));
834 m.i915 = error->i915;
835
836 __err_print_to_sgl(&m, error);
837
838 if (m.buf) {
839 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
840 m.bytes = 0;
841 m.buf = NULL;
842 }
843 if (m.cur) {
844 GEM_BUG_ON(m.end < m.cur);
845 sg_mark_end(m.cur - 1);
846 }
847 GEM_BUG_ON(m.sgl && !m.cur);
848
849 if (m.err) {
850 err_free_sgl(m.sgl);
851 return m.err;
852 }
853
854 if (cmpxchg(&error->sgl, NULL, m.sgl))
855 err_free_sgl(m.sgl);
856
857 return 0;
858 }
859
i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump * error,char * buf,loff_t off,size_t rem)860 ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
861 char *buf, loff_t off, size_t rem)
862 {
863 struct scatterlist *sg;
864 size_t count;
865 loff_t pos;
866 int err;
867
868 if (!error || !rem)
869 return 0;
870
871 err = err_print_to_sgl(error);
872 if (err)
873 return err;
874
875 sg = READ_ONCE(error->fit);
876 if (!sg || off < sg->dma_address)
877 sg = error->sgl;
878 if (!sg)
879 return 0;
880
881 pos = sg->dma_address;
882 count = 0;
883 do {
884 size_t len, start;
885
886 if (sg_is_chain(sg)) {
887 sg = sg_chain_ptr(sg);
888 GEM_BUG_ON(sg_is_chain(sg));
889 }
890
891 len = sg->length;
892 if (pos + len <= off) {
893 pos += len;
894 continue;
895 }
896
897 start = sg->offset;
898 if (pos < off) {
899 GEM_BUG_ON(off - pos > len);
900 len -= off - pos;
901 start += off - pos;
902 pos = off;
903 }
904
905 len = min(len, rem);
906 GEM_BUG_ON(!len || len > sg->length);
907
908 memcpy(buf, page_address(sg_page(sg)) + start, len);
909
910 count += len;
911 pos += len;
912
913 buf += len;
914 rem -= len;
915 if (!rem) {
916 WRITE_ONCE(error->fit, sg);
917 break;
918 }
919 } while (!sg_is_last(sg++));
920
921 return count;
922 }
923
i915_vma_coredump_free(struct i915_vma_coredump * vma)924 static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
925 {
926 while (vma) {
927 struct i915_vma_coredump *next = vma->next;
928 int page;
929
930 for (page = 0; page < vma->page_count; page++)
931 free_page((unsigned long)vma->pages[page]);
932
933 kfree(vma);
934 vma = next;
935 }
936 }
937
cleanup_params(struct i915_gpu_coredump * error)938 static void cleanup_params(struct i915_gpu_coredump *error)
939 {
940 i915_params_free(&error->params);
941 }
942
cleanup_uc(struct intel_uc_coredump * uc)943 static void cleanup_uc(struct intel_uc_coredump *uc)
944 {
945 kfree(uc->guc_fw.path);
946 kfree(uc->huc_fw.path);
947 i915_vma_coredump_free(uc->guc_log);
948
949 kfree(uc);
950 }
951
cleanup_gt(struct intel_gt_coredump * gt)952 static void cleanup_gt(struct intel_gt_coredump *gt)
953 {
954 while (gt->engine) {
955 struct intel_engine_coredump *ee = gt->engine;
956
957 gt->engine = ee->next;
958
959 i915_vma_coredump_free(ee->vma);
960 kfree(ee);
961 }
962
963 if (gt->uc)
964 cleanup_uc(gt->uc);
965
966 kfree(gt);
967 }
968
__i915_gpu_coredump_free(struct kref * error_ref)969 void __i915_gpu_coredump_free(struct kref *error_ref)
970 {
971 struct i915_gpu_coredump *error =
972 container_of(error_ref, typeof(*error), ref);
973
974 while (error->gt) {
975 struct intel_gt_coredump *gt = error->gt;
976
977 error->gt = gt->next;
978 cleanup_gt(gt);
979 }
980
981 kfree(error->overlay);
982
983 cleanup_params(error);
984
985 err_free_sgl(error->sgl);
986 kfree(error);
987 }
988
989 static struct i915_vma_coredump *
i915_vma_coredump_create(const struct intel_gt * gt,const struct i915_vma * vma,const char * name,struct i915_vma_compress * compress)990 i915_vma_coredump_create(const struct intel_gt *gt,
991 const struct i915_vma *vma,
992 const char *name,
993 struct i915_vma_compress *compress)
994 {
995 struct i915_ggtt *ggtt = gt->ggtt;
996 const u64 slot = ggtt->error_capture.start;
997 struct i915_vma_coredump *dst;
998 unsigned long num_pages;
999 struct sgt_iter iter;
1000 int ret;
1001
1002 might_sleep();
1003
1004 if (!vma || !vma->pages || !compress)
1005 return NULL;
1006
1007 num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
1008 num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
1009 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
1010 if (!dst)
1011 return NULL;
1012
1013 if (!compress_start(compress)) {
1014 kfree(dst);
1015 return NULL;
1016 }
1017
1018 strcpy(dst->name, name);
1019 dst->next = NULL;
1020
1021 dst->gtt_offset = vma->node.start;
1022 dst->gtt_size = vma->node.size;
1023 dst->gtt_page_sizes = vma->page_sizes.gtt;
1024 dst->num_pages = num_pages;
1025 dst->page_count = 0;
1026 dst->unused = 0;
1027
1028 ret = -EINVAL;
1029 if (drm_mm_node_allocated(&ggtt->error_capture)) {
1030 void __iomem *s;
1031 dma_addr_t dma;
1032
1033 for_each_sgt_daddr(dma, iter, vma->pages) {
1034 mutex_lock(&ggtt->error_mutex);
1035 ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1036 I915_CACHE_NONE, 0);
1037 mb();
1038
1039 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
1040 ret = compress_page(compress,
1041 (void __force *)s, dst,
1042 true);
1043 io_mapping_unmap(s);
1044
1045 mb();
1046 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1047 mutex_unlock(&ggtt->error_mutex);
1048 if (ret)
1049 break;
1050 }
1051 } else if (__i915_gem_object_is_lmem(vma->obj)) {
1052 struct intel_memory_region *mem = vma->obj->mm.region;
1053 dma_addr_t dma;
1054
1055 for_each_sgt_daddr(dma, iter, vma->pages) {
1056 void __iomem *s;
1057
1058 s = io_mapping_map_wc(&mem->iomap,
1059 dma - mem->region.start,
1060 PAGE_SIZE);
1061 ret = compress_page(compress,
1062 (void __force *)s, dst,
1063 true);
1064 io_mapping_unmap(s);
1065 if (ret)
1066 break;
1067 }
1068 } else {
1069 struct page *page;
1070
1071 for_each_sgt_page(page, iter, vma->pages) {
1072 void *s;
1073
1074 drm_clflush_pages(&page, 1);
1075
1076 s = kmap(page);
1077 ret = compress_page(compress, s, dst, false);
1078 kunmap(page);
1079
1080 drm_clflush_pages(&page, 1);
1081
1082 if (ret)
1083 break;
1084 }
1085 }
1086
1087 if (ret || compress_flush(compress, dst)) {
1088 while (dst->page_count--)
1089 pool_free(&compress->pool, dst->pages[dst->page_count]);
1090 kfree(dst);
1091 dst = NULL;
1092 }
1093 compress_finish(compress);
1094
1095 return dst;
1096 }
1097
gt_record_fences(struct intel_gt_coredump * gt)1098 static void gt_record_fences(struct intel_gt_coredump *gt)
1099 {
1100 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1101 struct intel_uncore *uncore = gt->_gt->uncore;
1102 int i;
1103
1104 if (GRAPHICS_VER(uncore->i915) >= 6) {
1105 for (i = 0; i < ggtt->num_fences; i++)
1106 gt->fence[i] =
1107 intel_uncore_read64(uncore,
1108 FENCE_REG_GEN6_LO(i));
1109 } else if (GRAPHICS_VER(uncore->i915) >= 4) {
1110 for (i = 0; i < ggtt->num_fences; i++)
1111 gt->fence[i] =
1112 intel_uncore_read64(uncore,
1113 FENCE_REG_965_LO(i));
1114 } else {
1115 for (i = 0; i < ggtt->num_fences; i++)
1116 gt->fence[i] =
1117 intel_uncore_read(uncore, FENCE_REG(i));
1118 }
1119 gt->nfence = i;
1120 }
1121
engine_record_registers(struct intel_engine_coredump * ee)1122 static void engine_record_registers(struct intel_engine_coredump *ee)
1123 {
1124 const struct intel_engine_cs *engine = ee->engine;
1125 struct drm_i915_private *i915 = engine->i915;
1126
1127 if (GRAPHICS_VER(i915) >= 6) {
1128 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
1129
1130 if (GRAPHICS_VER(i915) >= 12)
1131 ee->fault_reg = intel_uncore_read(engine->uncore,
1132 GEN12_RING_FAULT_REG);
1133 else if (GRAPHICS_VER(i915) >= 8)
1134 ee->fault_reg = intel_uncore_read(engine->uncore,
1135 GEN8_RING_FAULT_REG);
1136 else
1137 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
1138 }
1139
1140 if (GRAPHICS_VER(i915) >= 4) {
1141 ee->esr = ENGINE_READ(engine, RING_ESR);
1142 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1143 ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1144 ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1145 ee->instps = ENGINE_READ(engine, RING_INSTPS);
1146 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
1147 ee->ccid = ENGINE_READ(engine, CCID);
1148 if (GRAPHICS_VER(i915) >= 8) {
1149 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1150 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
1151 }
1152 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
1153 } else {
1154 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1155 ee->ipeir = ENGINE_READ(engine, IPEIR);
1156 ee->ipehr = ENGINE_READ(engine, IPEHR);
1157 }
1158
1159 intel_engine_get_instdone(engine, &ee->instdone);
1160
1161 ee->instpm = ENGINE_READ(engine, RING_INSTPM);
1162 ee->acthd = intel_engine_get_active_head(engine);
1163 ee->start = ENGINE_READ(engine, RING_START);
1164 ee->head = ENGINE_READ(engine, RING_HEAD);
1165 ee->tail = ENGINE_READ(engine, RING_TAIL);
1166 ee->ctl = ENGINE_READ(engine, RING_CTL);
1167 if (GRAPHICS_VER(i915) > 2)
1168 ee->mode = ENGINE_READ(engine, RING_MI_MODE);
1169
1170 if (!HWS_NEEDS_PHYSICAL(i915)) {
1171 i915_reg_t mmio;
1172
1173 if (GRAPHICS_VER(i915) == 7) {
1174 switch (engine->id) {
1175 default:
1176 MISSING_CASE(engine->id);
1177 fallthrough;
1178 case RCS0:
1179 mmio = RENDER_HWS_PGA_GEN7;
1180 break;
1181 case BCS0:
1182 mmio = BLT_HWS_PGA_GEN7;
1183 break;
1184 case VCS0:
1185 mmio = BSD_HWS_PGA_GEN7;
1186 break;
1187 case VECS0:
1188 mmio = VEBOX_HWS_PGA_GEN7;
1189 break;
1190 }
1191 } else if (GRAPHICS_VER(engine->i915) == 6) {
1192 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1193 } else {
1194 /* XXX: gen8 returns to sanity */
1195 mmio = RING_HWS_PGA(engine->mmio_base);
1196 }
1197
1198 ee->hws = intel_uncore_read(engine->uncore, mmio);
1199 }
1200
1201 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1202
1203 if (HAS_PPGTT(i915)) {
1204 int i;
1205
1206 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
1207
1208 if (GRAPHICS_VER(i915) == 6) {
1209 ee->vm_info.pp_dir_base =
1210 ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
1211 } else if (GRAPHICS_VER(i915) == 7) {
1212 ee->vm_info.pp_dir_base =
1213 ENGINE_READ(engine, RING_PP_DIR_BASE);
1214 } else if (GRAPHICS_VER(i915) >= 8) {
1215 u32 base = engine->mmio_base;
1216
1217 for (i = 0; i < 4; i++) {
1218 ee->vm_info.pdp[i] =
1219 intel_uncore_read(engine->uncore,
1220 GEN8_RING_PDP_UDW(base, i));
1221 ee->vm_info.pdp[i] <<= 32;
1222 ee->vm_info.pdp[i] |=
1223 intel_uncore_read(engine->uncore,
1224 GEN8_RING_PDP_LDW(base, i));
1225 }
1226 }
1227 }
1228 }
1229
record_request(const struct i915_request * request,struct i915_request_coredump * erq)1230 static void record_request(const struct i915_request *request,
1231 struct i915_request_coredump *erq)
1232 {
1233 erq->flags = request->fence.flags;
1234 erq->context = request->fence.context;
1235 erq->seqno = request->fence.seqno;
1236 erq->sched_attr = request->sched.attr;
1237 erq->head = request->head;
1238 erq->tail = request->tail;
1239
1240 erq->pid = 0;
1241 rcu_read_lock();
1242 if (!intel_context_is_closed(request->context)) {
1243 const struct i915_gem_context *ctx;
1244
1245 ctx = rcu_dereference(request->context->gem_context);
1246 if (ctx)
1247 erq->pid = pid_nr(ctx->pid);
1248 }
1249 rcu_read_unlock();
1250 }
1251
engine_record_execlists(struct intel_engine_coredump * ee)1252 static void engine_record_execlists(struct intel_engine_coredump *ee)
1253 {
1254 const struct intel_engine_execlists * const el = &ee->engine->execlists;
1255 struct i915_request * const *port = el->active;
1256 unsigned int n = 0;
1257
1258 while (*port)
1259 record_request(*port++, &ee->execlist[n++]);
1260
1261 ee->num_ports = n;
1262 }
1263
record_context(struct i915_gem_context_coredump * e,const struct i915_request * rq)1264 static bool record_context(struct i915_gem_context_coredump *e,
1265 const struct i915_request *rq)
1266 {
1267 struct i915_gem_context *ctx;
1268 struct task_struct *task;
1269 bool simulated;
1270
1271 rcu_read_lock();
1272 ctx = rcu_dereference(rq->context->gem_context);
1273 if (ctx && !kref_get_unless_zero(&ctx->ref))
1274 ctx = NULL;
1275 rcu_read_unlock();
1276 if (!ctx)
1277 return true;
1278
1279 rcu_read_lock();
1280 task = pid_task(ctx->pid, PIDTYPE_PID);
1281 if (task) {
1282 strcpy(e->comm, task->comm);
1283 e->pid = task->pid;
1284 }
1285 rcu_read_unlock();
1286
1287 e->sched_attr = ctx->sched;
1288 e->guilty = atomic_read(&ctx->guilty_count);
1289 e->active = atomic_read(&ctx->active_count);
1290
1291 e->total_runtime = rq->context->runtime.total;
1292 e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg);
1293
1294 simulated = i915_gem_context_no_error_capture(ctx);
1295
1296 i915_gem_context_put(ctx);
1297 return simulated;
1298 }
1299
1300 struct intel_engine_capture_vma {
1301 struct intel_engine_capture_vma *next;
1302 struct i915_vma *vma;
1303 char name[16];
1304 };
1305
1306 static struct intel_engine_capture_vma *
capture_vma(struct intel_engine_capture_vma * next,struct i915_vma * vma,const char * name,gfp_t gfp)1307 capture_vma(struct intel_engine_capture_vma *next,
1308 struct i915_vma *vma,
1309 const char *name,
1310 gfp_t gfp)
1311 {
1312 struct intel_engine_capture_vma *c;
1313
1314 if (!vma)
1315 return next;
1316
1317 c = kmalloc(sizeof(*c), gfp);
1318 if (!c)
1319 return next;
1320
1321 if (!i915_active_acquire_if_busy(&vma->active)) {
1322 kfree(c);
1323 return next;
1324 }
1325
1326 strcpy(c->name, name);
1327 c->vma = vma; /* reference held while active */
1328
1329 c->next = next;
1330 return c;
1331 }
1332
1333 static struct intel_engine_capture_vma *
capture_user(struct intel_engine_capture_vma * capture,const struct i915_request * rq,gfp_t gfp)1334 capture_user(struct intel_engine_capture_vma *capture,
1335 const struct i915_request *rq,
1336 gfp_t gfp)
1337 {
1338 struct i915_capture_list *c;
1339
1340 for (c = rq->capture_list; c; c = c->next)
1341 capture = capture_vma(capture, c->vma, "user", gfp);
1342
1343 return capture;
1344 }
1345
add_vma(struct intel_engine_coredump * ee,struct i915_vma_coredump * vma)1346 static void add_vma(struct intel_engine_coredump *ee,
1347 struct i915_vma_coredump *vma)
1348 {
1349 if (vma) {
1350 vma->next = ee->vma;
1351 ee->vma = vma;
1352 }
1353 }
1354
1355 struct intel_engine_coredump *
intel_engine_coredump_alloc(struct intel_engine_cs * engine,gfp_t gfp)1356 intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
1357 {
1358 struct intel_engine_coredump *ee;
1359
1360 ee = kzalloc(sizeof(*ee), gfp);
1361 if (!ee)
1362 return NULL;
1363
1364 ee->engine = engine;
1365
1366 engine_record_registers(ee);
1367 engine_record_execlists(ee);
1368
1369 return ee;
1370 }
1371
1372 struct intel_engine_capture_vma *
intel_engine_coredump_add_request(struct intel_engine_coredump * ee,struct i915_request * rq,gfp_t gfp)1373 intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1374 struct i915_request *rq,
1375 gfp_t gfp)
1376 {
1377 struct intel_engine_capture_vma *vma = NULL;
1378
1379 ee->simulated |= record_context(&ee->context, rq);
1380 if (ee->simulated)
1381 return NULL;
1382
1383 /*
1384 * We need to copy these to an anonymous buffer
1385 * as the simplest method to avoid being overwritten
1386 * by userspace.
1387 */
1388 vma = capture_vma(vma, rq->batch, "batch", gfp);
1389 vma = capture_user(vma, rq, gfp);
1390 vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
1391 vma = capture_vma(vma, rq->context->state, "HW context", gfp);
1392
1393 ee->rq_head = rq->head;
1394 ee->rq_post = rq->postfix;
1395 ee->rq_tail = rq->tail;
1396
1397 return vma;
1398 }
1399
1400 void
intel_engine_coredump_add_vma(struct intel_engine_coredump * ee,struct intel_engine_capture_vma * capture,struct i915_vma_compress * compress)1401 intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1402 struct intel_engine_capture_vma *capture,
1403 struct i915_vma_compress *compress)
1404 {
1405 const struct intel_engine_cs *engine = ee->engine;
1406
1407 while (capture) {
1408 struct intel_engine_capture_vma *this = capture;
1409 struct i915_vma *vma = this->vma;
1410
1411 add_vma(ee,
1412 i915_vma_coredump_create(engine->gt,
1413 vma, this->name,
1414 compress));
1415
1416 i915_active_release(&vma->active);
1417
1418 capture = this->next;
1419 kfree(this);
1420 }
1421
1422 add_vma(ee,
1423 i915_vma_coredump_create(engine->gt,
1424 engine->status_page.vma,
1425 "HW Status",
1426 compress));
1427
1428 add_vma(ee,
1429 i915_vma_coredump_create(engine->gt,
1430 engine->wa_ctx.vma,
1431 "WA context",
1432 compress));
1433 }
1434
1435 static struct intel_engine_coredump *
capture_engine(struct intel_engine_cs * engine,struct i915_vma_compress * compress)1436 capture_engine(struct intel_engine_cs *engine,
1437 struct i915_vma_compress *compress)
1438 {
1439 struct intel_engine_capture_vma *capture = NULL;
1440 struct intel_engine_coredump *ee;
1441 struct intel_context *ce;
1442 struct i915_request *rq = NULL;
1443 unsigned long flags;
1444
1445 ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
1446 if (!ee)
1447 return NULL;
1448
1449 ce = intel_engine_get_hung_context(engine);
1450 if (ce) {
1451 intel_engine_clear_hung_context(engine);
1452 rq = intel_context_find_active_request(ce);
1453 if (!rq || !i915_request_started(rq))
1454 goto no_request_capture;
1455 } else {
1456 /*
1457 * Getting here with GuC enabled means it is a forced error capture
1458 * with no actual hang. So, no need to attempt the execlist search.
1459 */
1460 if (!intel_uc_uses_guc_submission(&engine->gt->uc)) {
1461 spin_lock_irqsave(&engine->sched_engine->lock, flags);
1462 rq = intel_engine_execlist_find_hung_request(engine);
1463 spin_unlock_irqrestore(&engine->sched_engine->lock,
1464 flags);
1465 }
1466 }
1467 if (rq)
1468 capture = intel_engine_coredump_add_request(ee, rq,
1469 ATOMIC_MAYFAIL);
1470 if (!capture) {
1471 no_request_capture:
1472 kfree(ee);
1473 return NULL;
1474 }
1475
1476 intel_engine_coredump_add_vma(ee, capture, compress);
1477
1478 return ee;
1479 }
1480
1481 static void
gt_record_engines(struct intel_gt_coredump * gt,intel_engine_mask_t engine_mask,struct i915_vma_compress * compress)1482 gt_record_engines(struct intel_gt_coredump *gt,
1483 intel_engine_mask_t engine_mask,
1484 struct i915_vma_compress *compress)
1485 {
1486 struct intel_engine_cs *engine;
1487 enum intel_engine_id id;
1488
1489 for_each_engine(engine, gt->_gt, id) {
1490 struct intel_engine_coredump *ee;
1491
1492 /* Refill our page pool before entering atomic section */
1493 pool_refill(&compress->pool, ALLOW_FAIL);
1494
1495 ee = capture_engine(engine, compress);
1496 if (!ee)
1497 continue;
1498
1499 ee->hung = engine->mask & engine_mask;
1500
1501 gt->simulated |= ee->simulated;
1502 if (ee->simulated) {
1503 kfree(ee);
1504 continue;
1505 }
1506
1507 ee->next = gt->engine;
1508 gt->engine = ee;
1509 }
1510 }
1511
1512 static struct intel_uc_coredump *
gt_record_uc(struct intel_gt_coredump * gt,struct i915_vma_compress * compress)1513 gt_record_uc(struct intel_gt_coredump *gt,
1514 struct i915_vma_compress *compress)
1515 {
1516 const struct intel_uc *uc = >->_gt->uc;
1517 struct intel_uc_coredump *error_uc;
1518
1519 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
1520 if (!error_uc)
1521 return NULL;
1522
1523 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
1524 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
1525
1526 /* Non-default firmware paths will be specified by the modparam.
1527 * As modparams are generally accesible from the userspace make
1528 * explicit copies of the firmware paths.
1529 */
1530 error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
1531 error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
1532 error_uc->guc_log =
1533 i915_vma_coredump_create(gt->_gt,
1534 uc->guc.log.vma, "GuC log buffer",
1535 compress);
1536
1537 return error_uc;
1538 }
1539
1540 /* Capture all registers which don't fit into another category. */
gt_record_regs(struct intel_gt_coredump * gt)1541 static void gt_record_regs(struct intel_gt_coredump *gt)
1542 {
1543 struct intel_uncore *uncore = gt->_gt->uncore;
1544 struct drm_i915_private *i915 = uncore->i915;
1545 int i;
1546
1547 /*
1548 * General organization
1549 * 1. Registers specific to a single generation
1550 * 2. Registers which belong to multiple generations
1551 * 3. Feature specific registers.
1552 * 4. Everything else
1553 * Please try to follow the order.
1554 */
1555
1556 /* 1: Registers specific to a single generation */
1557 if (IS_VALLEYVIEW(i915)) {
1558 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1559 gt->ier = intel_uncore_read(uncore, VLV_IER);
1560 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
1561 }
1562
1563 if (GRAPHICS_VER(i915) == 7)
1564 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
1565
1566 if (GRAPHICS_VER(i915) >= 12) {
1567 gt->fault_data0 = intel_uncore_read(uncore,
1568 GEN12_FAULT_TLB_DATA0);
1569 gt->fault_data1 = intel_uncore_read(uncore,
1570 GEN12_FAULT_TLB_DATA1);
1571 } else if (GRAPHICS_VER(i915) >= 8) {
1572 gt->fault_data0 = intel_uncore_read(uncore,
1573 GEN8_FAULT_TLB_DATA0);
1574 gt->fault_data1 = intel_uncore_read(uncore,
1575 GEN8_FAULT_TLB_DATA1);
1576 }
1577
1578 if (GRAPHICS_VER(i915) == 6) {
1579 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
1580 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
1581 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
1582 }
1583
1584 /* 2: Registers which belong to multiple generations */
1585 if (GRAPHICS_VER(i915) >= 7)
1586 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
1587
1588 if (GRAPHICS_VER(i915) >= 6) {
1589 gt->derrmr = intel_uncore_read(uncore, DERRMR);
1590 if (GRAPHICS_VER(i915) < 12) {
1591 gt->error = intel_uncore_read(uncore, ERROR_GEN6);
1592 gt->done_reg = intel_uncore_read(uncore, DONE_REG);
1593 }
1594 }
1595
1596 /* 3: Feature specific registers */
1597 if (IS_GRAPHICS_VER(i915, 6, 7)) {
1598 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1599 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
1600 }
1601
1602 if (IS_GRAPHICS_VER(i915, 8, 11))
1603 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
1604
1605 if (GRAPHICS_VER(i915) == 12)
1606 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
1607
1608 if (GRAPHICS_VER(i915) >= 12) {
1609 for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
1610 /*
1611 * SFC_DONE resides in the VD forcewake domain, so it
1612 * only exists if the corresponding VCS engine is
1613 * present.
1614 */
1615 if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
1616 continue;
1617
1618 gt->sfc_done[i] =
1619 intel_uncore_read(uncore, GEN12_SFC_DONE(i));
1620 }
1621
1622 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
1623 }
1624
1625 /* 4: Everything else */
1626 if (GRAPHICS_VER(i915) >= 11) {
1627 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1628 gt->gtier[0] =
1629 intel_uncore_read(uncore,
1630 GEN11_RENDER_COPY_INTR_ENABLE);
1631 gt->gtier[1] =
1632 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
1633 gt->gtier[2] =
1634 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
1635 gt->gtier[3] =
1636 intel_uncore_read(uncore,
1637 GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1638 gt->gtier[4] =
1639 intel_uncore_read(uncore,
1640 GEN11_CRYPTO_RSVD_INTR_ENABLE);
1641 gt->gtier[5] =
1642 intel_uncore_read(uncore,
1643 GEN11_GUNIT_CSME_INTR_ENABLE);
1644 gt->ngtier = 6;
1645 } else if (GRAPHICS_VER(i915) >= 8) {
1646 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1647 for (i = 0; i < 4; i++)
1648 gt->gtier[i] =
1649 intel_uncore_read(uncore, GEN8_GT_IER(i));
1650 gt->ngtier = 4;
1651 } else if (HAS_PCH_SPLIT(i915)) {
1652 gt->ier = intel_uncore_read(uncore, DEIER);
1653 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1654 gt->ngtier = 1;
1655 } else if (GRAPHICS_VER(i915) == 2) {
1656 gt->ier = intel_uncore_read16(uncore, GEN2_IER);
1657 } else if (!IS_VALLEYVIEW(i915)) {
1658 gt->ier = intel_uncore_read(uncore, GEN2_IER);
1659 }
1660 gt->eir = intel_uncore_read(uncore, EIR);
1661 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
1662 }
1663
gt_record_info(struct intel_gt_coredump * gt)1664 static void gt_record_info(struct intel_gt_coredump *gt)
1665 {
1666 memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info));
1667 }
1668
1669 /*
1670 * Generate a semi-unique error code. The code is not meant to have meaning, The
1671 * code's only purpose is to try to prevent false duplicated bug reports by
1672 * grossly estimating a GPU error state.
1673 *
1674 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1675 * the hang if we could strip the GTT offset information from it.
1676 *
1677 * It's only a small step better than a random number in its current form.
1678 */
generate_ecode(const struct intel_engine_coredump * ee)1679 static u32 generate_ecode(const struct intel_engine_coredump *ee)
1680 {
1681 /*
1682 * IPEHR would be an ideal way to detect errors, as it's the gross
1683 * measure of "the command that hung." However, has some very common
1684 * synchronization commands which almost always appear in the case
1685 * strictly a client bug. Use instdone to differentiate those some.
1686 */
1687 return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1688 }
1689
error_msg(struct i915_gpu_coredump * error)1690 static const char *error_msg(struct i915_gpu_coredump *error)
1691 {
1692 struct intel_engine_coredump *first = NULL;
1693 unsigned int hung_classes = 0;
1694 struct intel_gt_coredump *gt;
1695 int len;
1696
1697 for (gt = error->gt; gt; gt = gt->next) {
1698 struct intel_engine_coredump *cs;
1699
1700 for (cs = gt->engine; cs; cs = cs->next) {
1701 if (cs->hung) {
1702 hung_classes |= BIT(cs->engine->uabi_class);
1703 if (!first)
1704 first = cs;
1705 }
1706 }
1707 }
1708
1709 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1710 "GPU HANG: ecode %d:%x:%08x",
1711 GRAPHICS_VER(error->i915), hung_classes,
1712 generate_ecode(first));
1713 if (first && first->context.pid) {
1714 /* Just show the first executing process, more is confusing */
1715 len += scnprintf(error->error_msg + len,
1716 sizeof(error->error_msg) - len,
1717 ", in %s [%d]",
1718 first->context.comm, first->context.pid);
1719 }
1720
1721 return error->error_msg;
1722 }
1723
capture_gen(struct i915_gpu_coredump * error)1724 static void capture_gen(struct i915_gpu_coredump *error)
1725 {
1726 struct drm_i915_private *i915 = error->i915;
1727
1728 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1729 error->suspended = i915->runtime_pm.suspended;
1730
1731 error->iommu = -1;
1732 #ifdef CONFIG_INTEL_IOMMU
1733 error->iommu = intel_iommu_gfx_mapped;
1734 #endif
1735 error->reset_count = i915_reset_count(&i915->gpu_error);
1736 error->suspend_count = i915->suspend_count;
1737
1738 i915_params_copy(&error->params, &i915->params);
1739 memcpy(&error->device_info,
1740 INTEL_INFO(i915),
1741 sizeof(error->device_info));
1742 memcpy(&error->runtime_info,
1743 RUNTIME_INFO(i915),
1744 sizeof(error->runtime_info));
1745 error->driver_caps = i915->caps;
1746 }
1747
1748 struct i915_gpu_coredump *
i915_gpu_coredump_alloc(struct drm_i915_private * i915,gfp_t gfp)1749 i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
1750 {
1751 struct i915_gpu_coredump *error;
1752
1753 if (!i915->params.error_capture)
1754 return NULL;
1755
1756 error = kzalloc(sizeof(*error), gfp);
1757 if (!error)
1758 return NULL;
1759
1760 kref_init(&error->ref);
1761 error->i915 = i915;
1762
1763 error->time = ktime_get_real();
1764 error->boottime = ktime_get_boottime();
1765 error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
1766 error->capture = jiffies;
1767
1768 capture_gen(error);
1769
1770 return error;
1771 }
1772
1773 #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
1774
1775 struct intel_gt_coredump *
intel_gt_coredump_alloc(struct intel_gt * gt,gfp_t gfp)1776 intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
1777 {
1778 struct intel_gt_coredump *gc;
1779
1780 gc = kzalloc(sizeof(*gc), gfp);
1781 if (!gc)
1782 return NULL;
1783
1784 gc->_gt = gt;
1785 gc->awake = intel_gt_pm_is_awake(gt);
1786
1787 gt_record_regs(gc);
1788 gt_record_fences(gc);
1789
1790 return gc;
1791 }
1792
1793 struct i915_vma_compress *
i915_vma_capture_prepare(struct intel_gt_coredump * gt)1794 i915_vma_capture_prepare(struct intel_gt_coredump *gt)
1795 {
1796 struct i915_vma_compress *compress;
1797
1798 compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
1799 if (!compress)
1800 return NULL;
1801
1802 if (!compress_init(compress)) {
1803 kfree(compress);
1804 return NULL;
1805 }
1806
1807 return compress;
1808 }
1809
i915_vma_capture_finish(struct intel_gt_coredump * gt,struct i915_vma_compress * compress)1810 void i915_vma_capture_finish(struct intel_gt_coredump *gt,
1811 struct i915_vma_compress *compress)
1812 {
1813 if (!compress)
1814 return;
1815
1816 compress_fini(compress);
1817 kfree(compress);
1818 }
1819
1820 struct i915_gpu_coredump *
i915_gpu_coredump(struct intel_gt * gt,intel_engine_mask_t engine_mask)1821 i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
1822 {
1823 struct drm_i915_private *i915 = gt->i915;
1824 struct i915_gpu_coredump *error;
1825
1826 /* Check if GPU capture has been disabled */
1827 error = READ_ONCE(i915->gpu_error.first_error);
1828 if (IS_ERR(error))
1829 return error;
1830
1831 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
1832 if (!error)
1833 return ERR_PTR(-ENOMEM);
1834
1835 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL);
1836 if (error->gt) {
1837 struct i915_vma_compress *compress;
1838
1839 compress = i915_vma_capture_prepare(error->gt);
1840 if (!compress) {
1841 kfree(error->gt);
1842 kfree(error);
1843 return ERR_PTR(-ENOMEM);
1844 }
1845
1846 gt_record_info(error->gt);
1847 gt_record_engines(error->gt, engine_mask, compress);
1848
1849 if (INTEL_INFO(i915)->has_gt_uc)
1850 error->gt->uc = gt_record_uc(error->gt, compress);
1851
1852 i915_vma_capture_finish(error->gt, compress);
1853
1854 error->simulated |= error->gt->simulated;
1855 }
1856
1857 error->overlay = intel_overlay_capture_error_state(i915);
1858
1859 return error;
1860 }
1861
i915_error_state_store(struct i915_gpu_coredump * error)1862 void i915_error_state_store(struct i915_gpu_coredump *error)
1863 {
1864 struct drm_i915_private *i915;
1865 static bool warned;
1866
1867 if (IS_ERR_OR_NULL(error))
1868 return;
1869
1870 i915 = error->i915;
1871 drm_info(&i915->drm, "%s\n", error_msg(error));
1872
1873 if (error->simulated ||
1874 cmpxchg(&i915->gpu_error.first_error, NULL, error))
1875 return;
1876
1877 i915_gpu_coredump_get(error);
1878
1879 if (!xchg(&warned, true) &&
1880 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
1881 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1882 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
1883 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
1884 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1885 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
1886 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1887 i915->drm.primary->index);
1888 }
1889 }
1890
1891 /**
1892 * i915_capture_error_state - capture an error record for later analysis
1893 * @gt: intel_gt which originated the hang
1894 * @engine_mask: hung engines
1895 *
1896 *
1897 * Should be called when an error is detected (either a hang or an error
1898 * interrupt) to capture error state from the time of the error. Fills
1899 * out a structure which becomes available in debugfs for user level tools
1900 * to pick up.
1901 */
i915_capture_error_state(struct intel_gt * gt,intel_engine_mask_t engine_mask)1902 void i915_capture_error_state(struct intel_gt *gt,
1903 intel_engine_mask_t engine_mask)
1904 {
1905 struct i915_gpu_coredump *error;
1906
1907 error = i915_gpu_coredump(gt, engine_mask);
1908 if (IS_ERR(error)) {
1909 cmpxchg(>->i915->gpu_error.first_error, NULL, error);
1910 return;
1911 }
1912
1913 i915_error_state_store(error);
1914 i915_gpu_coredump_put(error);
1915 }
1916
1917 struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private * i915)1918 i915_first_error_state(struct drm_i915_private *i915)
1919 {
1920 struct i915_gpu_coredump *error;
1921
1922 spin_lock_irq(&i915->gpu_error.lock);
1923 error = i915->gpu_error.first_error;
1924 if (!IS_ERR_OR_NULL(error))
1925 i915_gpu_coredump_get(error);
1926 spin_unlock_irq(&i915->gpu_error.lock);
1927
1928 return error;
1929 }
1930
i915_reset_error_state(struct drm_i915_private * i915)1931 void i915_reset_error_state(struct drm_i915_private *i915)
1932 {
1933 struct i915_gpu_coredump *error;
1934
1935 spin_lock_irq(&i915->gpu_error.lock);
1936 error = i915->gpu_error.first_error;
1937 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
1938 i915->gpu_error.first_error = NULL;
1939 spin_unlock_irq(&i915->gpu_error.lock);
1940
1941 if (!IS_ERR_OR_NULL(error))
1942 i915_gpu_coredump_put(error);
1943 }
1944
i915_disable_error_state(struct drm_i915_private * i915,int err)1945 void i915_disable_error_state(struct drm_i915_private *i915, int err)
1946 {
1947 spin_lock_irq(&i915->gpu_error.lock);
1948 if (!i915->gpu_error.first_error)
1949 i915->gpu_error.first_error = ERR_PTR(err);
1950 spin_unlock_irq(&i915->gpu_error.lock);
1951 }
1952