2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <generated/utsrelease.h>
33 static const char *ring_str(int ring
)
36 case RCS
: return "render";
37 case VCS
: return "bsd";
38 case BCS
: return "blt";
39 case VECS
: return "vebox";
40 case VCS2
: return "bsd2";
45 static const char *pin_flag(int pinned
)
55 static const char *tiling_flag(int tiling
)
59 case I915_TILING_NONE
: return "";
60 case I915_TILING_X
: return " X";
61 case I915_TILING_Y
: return " Y";
65 static const char *dirty_flag(int dirty
)
67 return dirty
? " dirty" : "";
70 static const char *purgeable_flag(int purgeable
)
72 return purgeable
? " purgeable" : "";
75 static bool __i915_error_ok(struct drm_i915_error_state_buf
*e
)
78 if (!e
->err
&& WARN(e
->bytes
> (e
->size
- 1), "overflow")) {
83 if (e
->bytes
== e
->size
- 1 || e
->err
)
89 static bool __i915_error_seek(struct drm_i915_error_state_buf
*e
,
92 if (e
->pos
+ len
<= e
->start
) {
97 /* First vsnprintf needs to fit in its entirety for memmove */
106 static void __i915_error_advance(struct drm_i915_error_state_buf
*e
,
109 /* If this is first printf in this window, adjust it so that
110 * start position matches start of the buffer
113 if (e
->pos
< e
->start
) {
114 const size_t off
= e
->start
- e
->pos
;
116 /* Should not happen but be paranoid */
117 if (off
> len
|| e
->bytes
) {
122 memmove(e
->buf
, e
->buf
+ off
, len
- off
);
123 e
->bytes
= len
- off
;
132 static void i915_error_vprintf(struct drm_i915_error_state_buf
*e
,
133 const char *f
, va_list args
)
137 if (!__i915_error_ok(e
))
140 /* Seek the first printf which is hits start position */
141 if (e
->pos
< e
->start
) {
145 len
= vsnprintf(NULL
, 0, f
, tmp
);
148 if (!__i915_error_seek(e
, len
))
152 len
= vsnprintf(e
->buf
+ e
->bytes
, e
->size
- e
->bytes
, f
, args
);
153 if (len
>= e
->size
- e
->bytes
)
154 len
= e
->size
- e
->bytes
- 1;
156 __i915_error_advance(e
, len
);
159 static void i915_error_puts(struct drm_i915_error_state_buf
*e
,
164 if (!__i915_error_ok(e
))
169 /* Seek the first printf which is hits start position */
170 if (e
->pos
< e
->start
) {
171 if (!__i915_error_seek(e
, len
))
175 if (len
>= e
->size
- e
->bytes
)
176 len
= e
->size
- e
->bytes
- 1;
177 memcpy(e
->buf
+ e
->bytes
, str
, len
);
179 __i915_error_advance(e
, len
);
182 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
183 #define err_puts(e, s) i915_error_puts(e, s)
185 static void print_error_buffers(struct drm_i915_error_state_buf
*m
,
187 struct drm_i915_error_buffer
*err
,
192 err_printf(m
, " %s [%d]:\n", name
, count
);
195 err_printf(m
, " %08x_%08x %8u %02x %02x [ ",
196 upper_32_bits(err
->gtt_offset
),
197 lower_32_bits(err
->gtt_offset
),
201 for (i
= 0; i
< I915_NUM_ENGINES
; i
++)
202 err_printf(m
, "%02x ", err
->rseqno
[i
]);
204 err_printf(m
, "] %02x", err
->wseqno
);
205 err_puts(m
, pin_flag(err
->pinned
));
206 err_puts(m
, tiling_flag(err
->tiling
));
207 err_puts(m
, dirty_flag(err
->dirty
));
208 err_puts(m
, purgeable_flag(err
->purgeable
));
209 err_puts(m
, err
->userptr
? " userptr" : "");
210 err_puts(m
, err
->ring
!= -1 ? " " : "");
211 err_puts(m
, ring_str(err
->ring
));
212 err_puts(m
, i915_cache_level_str(m
->i915
, err
->cache_level
));
215 err_printf(m
, " (name: %d)", err
->name
);
216 if (err
->fence_reg
!= I915_FENCE_REG_NONE
)
217 err_printf(m
, " (fence: %d)", err
->fence_reg
);
224 static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a
)
231 case HANGCHECK_ACTIVE
:
242 static void i915_ring_error_state(struct drm_i915_error_state_buf
*m
,
243 struct drm_device
*dev
,
244 struct drm_i915_error_state
*error
,
247 struct drm_i915_error_ring
*ring
= &error
->ring
[ring_idx
];
252 err_printf(m
, "%s command stream:\n", ring_str(ring_idx
));
253 err_printf(m
, " START: 0x%08x\n", ring
->start
);
254 err_printf(m
, " HEAD: 0x%08x\n", ring
->head
);
255 err_printf(m
, " TAIL: 0x%08x\n", ring
->tail
);
256 err_printf(m
, " CTL: 0x%08x\n", ring
->ctl
);
257 err_printf(m
, " HWS: 0x%08x\n", ring
->hws
);
258 err_printf(m
, " ACTHD: 0x%08x %08x\n", (u32
)(ring
->acthd
>>32), (u32
)ring
->acthd
);
259 err_printf(m
, " IPEIR: 0x%08x\n", ring
->ipeir
);
260 err_printf(m
, " IPEHR: 0x%08x\n", ring
->ipehr
);
261 err_printf(m
, " INSTDONE: 0x%08x\n", ring
->instdone
);
262 if (INTEL_INFO(dev
)->gen
>= 4) {
263 err_printf(m
, " BBADDR: 0x%08x %08x\n", (u32
)(ring
->bbaddr
>>32), (u32
)ring
->bbaddr
);
264 err_printf(m
, " BB_STATE: 0x%08x\n", ring
->bbstate
);
265 err_printf(m
, " INSTPS: 0x%08x\n", ring
->instps
);
267 err_printf(m
, " INSTPM: 0x%08x\n", ring
->instpm
);
268 err_printf(m
, " FADDR: 0x%08x %08x\n", upper_32_bits(ring
->faddr
),
269 lower_32_bits(ring
->faddr
));
270 if (INTEL_INFO(dev
)->gen
>= 6) {
271 err_printf(m
, " RC PSMI: 0x%08x\n", ring
->rc_psmi
);
272 err_printf(m
, " FAULT_REG: 0x%08x\n", ring
->fault_reg
);
273 err_printf(m
, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
274 ring
->semaphore_mboxes
[0],
275 ring
->semaphore_seqno
[0]);
276 err_printf(m
, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
277 ring
->semaphore_mboxes
[1],
278 ring
->semaphore_seqno
[1]);
279 if (HAS_VEBOX(dev
)) {
280 err_printf(m
, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
281 ring
->semaphore_mboxes
[2],
282 ring
->semaphore_seqno
[2]);
285 if (USES_PPGTT(dev
)) {
286 err_printf(m
, " GFX_MODE: 0x%08x\n", ring
->vm_info
.gfx_mode
);
288 if (INTEL_INFO(dev
)->gen
>= 8) {
290 for (i
= 0; i
< 4; i
++)
291 err_printf(m
, " PDP%d: 0x%016llx\n",
292 i
, ring
->vm_info
.pdp
[i
]);
294 err_printf(m
, " PP_DIR_BASE: 0x%08x\n",
295 ring
->vm_info
.pp_dir_base
);
298 err_printf(m
, " seqno: 0x%08x\n", ring
->seqno
);
299 err_printf(m
, " waiting: %s\n", yesno(ring
->waiting
));
300 err_printf(m
, " ring->head: 0x%08x\n", ring
->cpu_ring_head
);
301 err_printf(m
, " ring->tail: 0x%08x\n", ring
->cpu_ring_tail
);
302 err_printf(m
, " hangcheck: %s [%d]\n",
303 hangcheck_action_to_str(ring
->hangcheck_action
),
304 ring
->hangcheck_score
);
307 void i915_error_printf(struct drm_i915_error_state_buf
*e
, const char *f
, ...)
312 i915_error_vprintf(e
, f
, args
);
316 static void print_error_obj(struct drm_i915_error_state_buf
*m
,
317 struct drm_i915_error_object
*obj
)
319 int page
, offset
, elt
;
321 for (page
= offset
= 0; page
< obj
->page_count
; page
++) {
322 for (elt
= 0; elt
< PAGE_SIZE
/4; elt
++) {
323 err_printf(m
, "%08x : %08x\n", offset
,
324 obj
->pages
[page
][elt
]);
330 int i915_error_state_to_str(struct drm_i915_error_state_buf
*m
,
331 const struct i915_error_state_file_priv
*error_priv
)
333 struct drm_device
*dev
= error_priv
->dev
;
334 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
335 struct drm_i915_error_state
*error
= error_priv
->error
;
336 struct drm_i915_error_object
*obj
;
337 int i
, j
, offset
, elt
;
338 int max_hangcheck_score
;
341 err_printf(m
, "no error state collected\n");
345 err_printf(m
, "%s\n", error
->error_msg
);
346 err_printf(m
, "Time: %ld s %ld us\n", error
->time
.tv_sec
,
347 error
->time
.tv_usec
);
348 err_printf(m
, "Kernel: " UTS_RELEASE
"\n");
349 max_hangcheck_score
= 0;
350 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
351 if (error
->ring
[i
].hangcheck_score
> max_hangcheck_score
)
352 max_hangcheck_score
= error
->ring
[i
].hangcheck_score
;
354 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
355 if (error
->ring
[i
].hangcheck_score
== max_hangcheck_score
&&
356 error
->ring
[i
].pid
!= -1) {
357 err_printf(m
, "Active process (on ring %s): %s [%d]\n",
363 err_printf(m
, "Reset count: %u\n", error
->reset_count
);
364 err_printf(m
, "Suspend count: %u\n", error
->suspend_count
);
365 err_printf(m
, "PCI ID: 0x%04x\n", dev
->pdev
->device
);
366 err_printf(m
, "PCI Revision: 0x%02x\n", dev
->pdev
->revision
);
367 err_printf(m
, "PCI Subsystem: %04x:%04x\n",
368 dev
->pdev
->subsystem_vendor
,
369 dev
->pdev
->subsystem_device
);
370 err_printf(m
, "IOMMU enabled?: %d\n", error
->iommu
);
373 struct intel_csr
*csr
= &dev_priv
->csr
;
375 err_printf(m
, "DMC loaded: %s\n",
376 yesno(csr
->dmc_payload
!= NULL
));
377 err_printf(m
, "DMC fw version: %d.%d\n",
378 CSR_VERSION_MAJOR(csr
->version
),
379 CSR_VERSION_MINOR(csr
->version
));
382 err_printf(m
, "EIR: 0x%08x\n", error
->eir
);
383 err_printf(m
, "IER: 0x%08x\n", error
->ier
);
384 if (INTEL_INFO(dev
)->gen
>= 8) {
385 for (i
= 0; i
< 4; i
++)
386 err_printf(m
, "GTIER gt %d: 0x%08x\n", i
,
388 } else if (HAS_PCH_SPLIT(dev
) || IS_VALLEYVIEW(dev
))
389 err_printf(m
, "GTIER: 0x%08x\n", error
->gtier
[0]);
390 err_printf(m
, "PGTBL_ER: 0x%08x\n", error
->pgtbl_er
);
391 err_printf(m
, "FORCEWAKE: 0x%08x\n", error
->forcewake
);
392 err_printf(m
, "DERRMR: 0x%08x\n", error
->derrmr
);
393 err_printf(m
, "CCID: 0x%08x\n", error
->ccid
);
394 err_printf(m
, "Missed interrupts: 0x%08lx\n", dev_priv
->gpu_error
.missed_irq_rings
);
396 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
397 err_printf(m
, " fence[%d] = %08llx\n", i
, error
->fence
[i
]);
399 for (i
= 0; i
< ARRAY_SIZE(error
->extra_instdone
); i
++)
400 err_printf(m
, " INSTDONE_%d: 0x%08x\n", i
,
401 error
->extra_instdone
[i
]);
403 if (INTEL_INFO(dev
)->gen
>= 6) {
404 err_printf(m
, "ERROR: 0x%08x\n", error
->error
);
406 if (INTEL_INFO(dev
)->gen
>= 8)
407 err_printf(m
, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
408 error
->fault_data1
, error
->fault_data0
);
410 err_printf(m
, "DONE_REG: 0x%08x\n", error
->done_reg
);
413 if (INTEL_INFO(dev
)->gen
== 7)
414 err_printf(m
, "ERR_INT: 0x%08x\n", error
->err_int
);
416 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++)
417 i915_ring_error_state(m
, dev
, error
, i
);
419 for (i
= 0; i
< error
->vm_count
; i
++) {
420 err_printf(m
, "vm[%d]\n", i
);
422 print_error_buffers(m
, "Active",
424 error
->active_bo_count
[i
]);
426 print_error_buffers(m
, "Pinned",
428 error
->pinned_bo_count
[i
]);
431 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
432 obj
= error
->ring
[i
].batchbuffer
;
434 err_puts(m
, dev_priv
->engine
[i
].name
);
435 if (error
->ring
[i
].pid
!= -1)
436 err_printf(m
, " (submitted by %s [%d])",
439 err_printf(m
, " --- gtt_offset = 0x%08x %08x\n",
440 upper_32_bits(obj
->gtt_offset
),
441 lower_32_bits(obj
->gtt_offset
));
442 print_error_obj(m
, obj
);
445 obj
= error
->ring
[i
].wa_batchbuffer
;
447 err_printf(m
, "%s (w/a) --- gtt_offset = 0x%08x\n",
448 dev_priv
->engine
[i
].name
,
449 lower_32_bits(obj
->gtt_offset
));
450 print_error_obj(m
, obj
);
453 if (error
->ring
[i
].num_requests
) {
454 err_printf(m
, "%s --- %d requests\n",
455 dev_priv
->engine
[i
].name
,
456 error
->ring
[i
].num_requests
);
457 for (j
= 0; j
< error
->ring
[i
].num_requests
; j
++) {
458 err_printf(m
, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
459 error
->ring
[i
].requests
[j
].seqno
,
460 error
->ring
[i
].requests
[j
].jiffies
,
461 error
->ring
[i
].requests
[j
].tail
);
465 if ((obj
= error
->ring
[i
].ringbuffer
)) {
466 err_printf(m
, "%s --- ringbuffer = 0x%08x\n",
467 dev_priv
->engine
[i
].name
,
468 lower_32_bits(obj
->gtt_offset
));
469 print_error_obj(m
, obj
);
472 if ((obj
= error
->ring
[i
].hws_page
)) {
473 u64 hws_offset
= obj
->gtt_offset
;
474 u32
*hws_page
= &obj
->pages
[0][0];
476 if (i915
.enable_execlists
) {
477 hws_offset
+= LRC_PPHWSP_PN
* PAGE_SIZE
;
478 hws_page
= &obj
->pages
[LRC_PPHWSP_PN
][0];
480 err_printf(m
, "%s --- HW Status = 0x%08llx\n",
481 dev_priv
->engine
[i
].name
, hws_offset
);
483 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
484 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
494 obj
= error
->ring
[i
].wa_ctx
;
496 u64 wa_ctx_offset
= obj
->gtt_offset
;
497 u32
*wa_ctx_page
= &obj
->pages
[0][0];
498 struct intel_engine_cs
*engine
= &dev_priv
->engine
[RCS
];
499 u32 wa_ctx_size
= (engine
->wa_ctx
.indirect_ctx
.size
+
500 engine
->wa_ctx
.per_ctx
.size
);
502 err_printf(m
, "%s --- WA ctx batch buffer = 0x%08llx\n",
503 dev_priv
->engine
[i
].name
, wa_ctx_offset
);
505 for (elt
= 0; elt
< wa_ctx_size
; elt
+= 4) {
506 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
508 wa_ctx_page
[elt
+ 0],
509 wa_ctx_page
[elt
+ 1],
510 wa_ctx_page
[elt
+ 2],
511 wa_ctx_page
[elt
+ 3]);
516 if ((obj
= error
->ring
[i
].ctx
)) {
517 err_printf(m
, "%s --- HW Context = 0x%08x\n",
518 dev_priv
->engine
[i
].name
,
519 lower_32_bits(obj
->gtt_offset
));
520 print_error_obj(m
, obj
);
524 if ((obj
= error
->semaphore_obj
)) {
525 err_printf(m
, "Semaphore page = 0x%08x\n",
526 lower_32_bits(obj
->gtt_offset
));
527 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
528 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
531 obj
->pages
[0][elt
+1],
532 obj
->pages
[0][elt
+2],
533 obj
->pages
[0][elt
+3]);
538 intel_overlay_print_error_state(m
, error
->overlay
);
541 intel_display_print_error_state(m
, dev
, error
->display
);
544 if (m
->bytes
== 0 && m
->err
)
550 int i915_error_state_buf_init(struct drm_i915_error_state_buf
*ebuf
,
551 struct drm_i915_private
*i915
,
552 size_t count
, loff_t pos
)
554 memset(ebuf
, 0, sizeof(*ebuf
));
557 /* We need to have enough room to store any i915_error_state printf
558 * so that we can move it to start position.
560 ebuf
->size
= count
+ 1 > PAGE_SIZE
? count
+ 1 : PAGE_SIZE
;
561 ebuf
->buf
= kmalloc(ebuf
->size
,
562 GFP_TEMPORARY
| __GFP_NORETRY
| __GFP_NOWARN
);
564 if (ebuf
->buf
== NULL
) {
565 ebuf
->size
= PAGE_SIZE
;
566 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
569 if (ebuf
->buf
== NULL
) {
571 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
574 if (ebuf
->buf
== NULL
)
582 static void i915_error_object_free(struct drm_i915_error_object
*obj
)
589 for (page
= 0; page
< obj
->page_count
; page
++)
590 kfree(obj
->pages
[page
]);
595 static void i915_error_state_free(struct kref
*error_ref
)
597 struct drm_i915_error_state
*error
= container_of(error_ref
,
598 typeof(*error
), ref
);
601 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
602 i915_error_object_free(error
->ring
[i
].batchbuffer
);
603 i915_error_object_free(error
->ring
[i
].wa_batchbuffer
);
604 i915_error_object_free(error
->ring
[i
].ringbuffer
);
605 i915_error_object_free(error
->ring
[i
].hws_page
);
606 i915_error_object_free(error
->ring
[i
].ctx
);
607 kfree(error
->ring
[i
].requests
);
608 i915_error_object_free(error
->ring
[i
].wa_ctx
);
611 i915_error_object_free(error
->semaphore_obj
);
613 for (i
= 0; i
< error
->vm_count
; i
++)
614 kfree(error
->active_bo
[i
]);
616 kfree(error
->active_bo
);
617 kfree(error
->active_bo_count
);
618 kfree(error
->pinned_bo
);
619 kfree(error
->pinned_bo_count
);
620 kfree(error
->overlay
);
621 kfree(error
->display
);
625 static struct drm_i915_error_object
*
626 i915_error_object_create(struct drm_i915_private
*dev_priv
,
627 struct drm_i915_gem_object
*src
,
628 struct i915_address_space
*vm
)
630 struct drm_i915_error_object
*dst
;
631 struct i915_vma
*vma
= NULL
;
637 if (src
== NULL
|| src
->pages
== NULL
)
640 num_pages
= src
->base
.size
>> PAGE_SHIFT
;
642 dst
= kmalloc(sizeof(*dst
) + num_pages
* sizeof(u32
*), GFP_ATOMIC
);
646 if (i915_gem_obj_bound(src
, vm
))
647 dst
->gtt_offset
= i915_gem_obj_offset(src
, vm
);
649 dst
->gtt_offset
= -1;
651 reloc_offset
= dst
->gtt_offset
;
652 if (i915_is_ggtt(vm
))
653 vma
= i915_gem_obj_to_ggtt(src
);
654 use_ggtt
= (src
->cache_level
== I915_CACHE_NONE
&&
655 vma
&& (vma
->bound
& GLOBAL_BIND
) &&
656 reloc_offset
+ num_pages
* PAGE_SIZE
<= dev_priv
->ggtt
.mappable_end
);
658 /* Cannot access stolen address directly, try to use the aperture */
662 if (!(vma
&& vma
->bound
& GLOBAL_BIND
))
665 reloc_offset
= i915_gem_obj_ggtt_offset(src
);
666 if (reloc_offset
+ num_pages
* PAGE_SIZE
> dev_priv
->ggtt
.mappable_end
)
670 /* Cannot access snooped pages through the aperture */
671 if (use_ggtt
&& src
->cache_level
!= I915_CACHE_NONE
&& !HAS_LLC(dev_priv
->dev
))
674 dst
->page_count
= num_pages
;
675 while (num_pages
--) {
679 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
683 local_irq_save(flags
);
687 /* Simply ignore tiling or any overlapping fence.
688 * It's part of the error state, and this hopefully
689 * captures what the GPU read.
692 s
= io_mapping_map_atomic_wc(dev_priv
->ggtt
.mappable
,
694 memcpy_fromio(d
, s
, PAGE_SIZE
);
695 io_mapping_unmap_atomic(s
);
700 page
= i915_gem_object_get_page(src
, i
);
702 drm_clflush_pages(&page
, 1);
704 s
= kmap_atomic(page
);
705 memcpy(d
, s
, PAGE_SIZE
);
708 drm_clflush_pages(&page
, 1);
710 local_irq_restore(flags
);
713 reloc_offset
+= PAGE_SIZE
;
720 kfree(dst
->pages
[i
]);
724 #define i915_error_ggtt_object_create(dev_priv, src) \
725 i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
727 static void capture_bo(struct drm_i915_error_buffer
*err
,
728 struct i915_vma
*vma
)
730 struct drm_i915_gem_object
*obj
= vma
->obj
;
733 err
->size
= obj
->base
.size
;
734 err
->name
= obj
->base
.name
;
735 for (i
= 0; i
< I915_NUM_ENGINES
; i
++)
736 err
->rseqno
[i
] = i915_gem_request_get_seqno(obj
->last_read_req
[i
]);
737 err
->wseqno
= i915_gem_request_get_seqno(obj
->last_write_req
);
738 err
->gtt_offset
= vma
->node
.start
;
739 err
->read_domains
= obj
->base
.read_domains
;
740 err
->write_domain
= obj
->base
.write_domain
;
741 err
->fence_reg
= obj
->fence_reg
;
743 if (i915_gem_obj_is_pinned(obj
))
745 err
->tiling
= obj
->tiling_mode
;
746 err
->dirty
= obj
->dirty
;
747 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
748 err
->userptr
= obj
->userptr
.mm
!= NULL
;
749 err
->ring
= obj
->last_write_req
?
750 i915_gem_request_get_engine(obj
->last_write_req
)->id
: -1;
751 err
->cache_level
= obj
->cache_level
;
754 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
755 int count
, struct list_head
*head
)
757 struct i915_vma
*vma
;
760 list_for_each_entry(vma
, head
, vm_link
) {
761 capture_bo(err
++, vma
);
769 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
770 int count
, struct list_head
*head
,
771 struct i915_address_space
*vm
)
773 struct drm_i915_gem_object
*obj
;
774 struct drm_i915_error_buffer
* const first
= err
;
775 struct drm_i915_error_buffer
* const last
= err
+ count
;
777 list_for_each_entry(obj
, head
, global_list
) {
778 struct i915_vma
*vma
;
783 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
)
784 if (vma
->vm
== vm
&& vma
->pin_count
> 0)
785 capture_bo(err
++, vma
);
791 /* Generate a semi-unique error code. The code is not meant to have meaning, The
792 * code's only purpose is to try to prevent false duplicated bug reports by
793 * grossly estimating a GPU error state.
795 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
796 * the hang if we could strip the GTT offset information from it.
798 * It's only a small step better than a random number in its current form.
800 static uint32_t i915_error_generate_code(struct drm_i915_private
*dev_priv
,
801 struct drm_i915_error_state
*error
,
804 uint32_t error_code
= 0;
807 /* IPEHR would be an ideal way to detect errors, as it's the gross
808 * measure of "the command that hung." However, has some very common
809 * synchronization commands which almost always appear in the case
810 * strictly a client bug. Use instdone to differentiate those some.
812 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
813 if (error
->ring
[i
].hangcheck_action
== HANGCHECK_HUNG
) {
817 return error
->ring
[i
].ipehr
^ error
->ring
[i
].instdone
;
824 static void i915_gem_record_fences(struct drm_device
*dev
,
825 struct drm_i915_error_state
*error
)
827 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
830 if (IS_GEN3(dev
) || IS_GEN2(dev
)) {
831 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
832 error
->fence
[i
] = I915_READ(FENCE_REG(i
));
833 } else if (IS_GEN5(dev
) || IS_GEN4(dev
)) {
834 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
835 error
->fence
[i
] = I915_READ64(FENCE_REG_965_LO(i
));
836 } else if (INTEL_INFO(dev
)->gen
>= 6) {
837 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
838 error
->fence
[i
] = I915_READ64(FENCE_REG_GEN6_LO(i
));
843 static void gen8_record_semaphore_state(struct drm_i915_private
*dev_priv
,
844 struct drm_i915_error_state
*error
,
845 struct intel_engine_cs
*engine
,
846 struct drm_i915_error_ring
*ering
)
848 struct intel_engine_cs
*to
;
849 enum intel_engine_id id
;
851 if (!i915_semaphore_is_enabled(dev_priv
->dev
))
854 if (!error
->semaphore_obj
)
855 error
->semaphore_obj
=
856 i915_error_ggtt_object_create(dev_priv
,
857 dev_priv
->semaphore_obj
);
859 for_each_engine_id(to
, dev_priv
, id
) {
867 signal_offset
= (GEN8_SIGNAL_OFFSET(engine
, id
) & (PAGE_SIZE
- 1))
869 tmp
= error
->semaphore_obj
->pages
[0];
870 idx
= intel_ring_sync_index(engine
, to
);
872 ering
->semaphore_mboxes
[idx
] = tmp
[signal_offset
];
873 ering
->semaphore_seqno
[idx
] = engine
->semaphore
.sync_seqno
[idx
];
877 static void gen6_record_semaphore_state(struct drm_i915_private
*dev_priv
,
878 struct intel_engine_cs
*engine
,
879 struct drm_i915_error_ring
*ering
)
881 ering
->semaphore_mboxes
[0] = I915_READ(RING_SYNC_0(engine
->mmio_base
));
882 ering
->semaphore_mboxes
[1] = I915_READ(RING_SYNC_1(engine
->mmio_base
));
883 ering
->semaphore_seqno
[0] = engine
->semaphore
.sync_seqno
[0];
884 ering
->semaphore_seqno
[1] = engine
->semaphore
.sync_seqno
[1];
886 if (HAS_VEBOX(dev_priv
->dev
)) {
887 ering
->semaphore_mboxes
[2] =
888 I915_READ(RING_SYNC_2(engine
->mmio_base
));
889 ering
->semaphore_seqno
[2] = engine
->semaphore
.sync_seqno
[2];
893 static void i915_record_ring_state(struct drm_device
*dev
,
894 struct drm_i915_error_state
*error
,
895 struct intel_engine_cs
*engine
,
896 struct drm_i915_error_ring
*ering
)
898 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
900 if (INTEL_INFO(dev
)->gen
>= 6) {
901 ering
->rc_psmi
= I915_READ(RING_PSMI_CTL(engine
->mmio_base
));
902 ering
->fault_reg
= I915_READ(RING_FAULT_REG(engine
));
903 if (INTEL_INFO(dev
)->gen
>= 8)
904 gen8_record_semaphore_state(dev_priv
, error
, engine
,
907 gen6_record_semaphore_state(dev_priv
, engine
, ering
);
910 if (INTEL_INFO(dev
)->gen
>= 4) {
911 ering
->faddr
= I915_READ(RING_DMA_FADD(engine
->mmio_base
));
912 ering
->ipeir
= I915_READ(RING_IPEIR(engine
->mmio_base
));
913 ering
->ipehr
= I915_READ(RING_IPEHR(engine
->mmio_base
));
914 ering
->instdone
= I915_READ(RING_INSTDONE(engine
->mmio_base
));
915 ering
->instps
= I915_READ(RING_INSTPS(engine
->mmio_base
));
916 ering
->bbaddr
= I915_READ(RING_BBADDR(engine
->mmio_base
));
917 if (INTEL_INFO(dev
)->gen
>= 8) {
918 ering
->faddr
|= (u64
) I915_READ(RING_DMA_FADD_UDW(engine
->mmio_base
)) << 32;
919 ering
->bbaddr
|= (u64
) I915_READ(RING_BBADDR_UDW(engine
->mmio_base
)) << 32;
921 ering
->bbstate
= I915_READ(RING_BBSTATE(engine
->mmio_base
));
923 ering
->faddr
= I915_READ(DMA_FADD_I8XX
);
924 ering
->ipeir
= I915_READ(IPEIR
);
925 ering
->ipehr
= I915_READ(IPEHR
);
926 ering
->instdone
= I915_READ(GEN2_INSTDONE
);
929 ering
->waiting
= waitqueue_active(&engine
->irq_queue
);
930 ering
->instpm
= I915_READ(RING_INSTPM(engine
->mmio_base
));
931 ering
->seqno
= engine
->get_seqno(engine
, false);
932 ering
->acthd
= intel_ring_get_active_head(engine
);
933 ering
->start
= I915_READ_START(engine
);
934 ering
->head
= I915_READ_HEAD(engine
);
935 ering
->tail
= I915_READ_TAIL(engine
);
936 ering
->ctl
= I915_READ_CTL(engine
);
938 if (I915_NEED_GFX_HWS(dev
)) {
942 switch (engine
->id
) {
945 mmio
= RENDER_HWS_PGA_GEN7
;
948 mmio
= BLT_HWS_PGA_GEN7
;
951 mmio
= BSD_HWS_PGA_GEN7
;
954 mmio
= VEBOX_HWS_PGA_GEN7
;
957 } else if (IS_GEN6(engine
->dev
)) {
958 mmio
= RING_HWS_PGA_GEN6(engine
->mmio_base
);
960 /* XXX: gen8 returns to sanity */
961 mmio
= RING_HWS_PGA(engine
->mmio_base
);
964 ering
->hws
= I915_READ(mmio
);
967 ering
->hangcheck_score
= engine
->hangcheck
.score
;
968 ering
->hangcheck_action
= engine
->hangcheck
.action
;
970 if (USES_PPGTT(dev
)) {
973 ering
->vm_info
.gfx_mode
= I915_READ(RING_MODE_GEN7(engine
));
976 ering
->vm_info
.pp_dir_base
=
977 I915_READ(RING_PP_DIR_BASE_READ(engine
));
978 else if (IS_GEN7(dev
))
979 ering
->vm_info
.pp_dir_base
=
980 I915_READ(RING_PP_DIR_BASE(engine
));
981 else if (INTEL_INFO(dev
)->gen
>= 8)
982 for (i
= 0; i
< 4; i
++) {
983 ering
->vm_info
.pdp
[i
] =
984 I915_READ(GEN8_RING_PDP_UDW(engine
, i
));
985 ering
->vm_info
.pdp
[i
] <<= 32;
986 ering
->vm_info
.pdp
[i
] |=
987 I915_READ(GEN8_RING_PDP_LDW(engine
, i
));
993 static void i915_gem_record_active_context(struct intel_engine_cs
*engine
,
994 struct drm_i915_error_state
*error
,
995 struct drm_i915_error_ring
*ering
)
997 struct drm_i915_private
*dev_priv
= engine
->dev
->dev_private
;
998 struct drm_i915_gem_object
*obj
;
1000 /* Currently render ring is the only HW context user */
1001 if (engine
->id
!= RCS
|| !error
->ccid
)
1004 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
1005 if (!i915_gem_obj_ggtt_bound(obj
))
1008 if ((error
->ccid
& PAGE_MASK
) == i915_gem_obj_ggtt_offset(obj
)) {
1009 ering
->ctx
= i915_error_ggtt_object_create(dev_priv
, obj
);
1015 static void i915_gem_record_rings(struct drm_device
*dev
,
1016 struct drm_i915_error_state
*error
)
1018 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1019 struct drm_i915_gem_request
*request
;
1022 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
1023 struct intel_engine_cs
*engine
= &dev_priv
->engine
[i
];
1024 struct intel_ringbuffer
*rbuf
;
1026 error
->ring
[i
].pid
= -1;
1028 if (engine
->dev
== NULL
)
1031 error
->ring
[i
].valid
= true;
1033 i915_record_ring_state(dev
, error
, engine
, &error
->ring
[i
]);
1035 request
= i915_gem_find_active_request(engine
);
1037 struct i915_address_space
*vm
;
1039 vm
= request
->ctx
&& request
->ctx
->ppgtt
?
1040 &request
->ctx
->ppgtt
->base
:
1041 &dev_priv
->ggtt
.base
;
1043 /* We need to copy these to an anonymous buffer
1044 * as the simplest method to avoid being overwritten
1047 error
->ring
[i
].batchbuffer
=
1048 i915_error_object_create(dev_priv
,
1052 if (HAS_BROKEN_CS_TLB(dev_priv
->dev
))
1053 error
->ring
[i
].wa_batchbuffer
=
1054 i915_error_ggtt_object_create(dev_priv
,
1055 engine
->scratch
.obj
);
1058 struct task_struct
*task
;
1061 task
= pid_task(request
->pid
, PIDTYPE_PID
);
1063 strcpy(error
->ring
[i
].comm
, task
->comm
);
1064 error
->ring
[i
].pid
= task
->pid
;
1070 if (i915
.enable_execlists
) {
1071 /* TODO: This is only a small fix to keep basic error
1072 * capture working, but we need to add more information
1073 * for it to be useful (e.g. dump the context being
1077 rbuf
= request
->ctx
->engine
[engine
->id
].ringbuf
;
1079 rbuf
= dev_priv
->kernel_context
->engine
[engine
->id
].ringbuf
;
1081 rbuf
= engine
->buffer
;
1083 error
->ring
[i
].cpu_ring_head
= rbuf
->head
;
1084 error
->ring
[i
].cpu_ring_tail
= rbuf
->tail
;
1086 error
->ring
[i
].ringbuffer
=
1087 i915_error_ggtt_object_create(dev_priv
, rbuf
->obj
);
1089 error
->ring
[i
].hws_page
=
1090 i915_error_ggtt_object_create(dev_priv
,
1091 engine
->status_page
.obj
);
1093 if (engine
->wa_ctx
.obj
) {
1094 error
->ring
[i
].wa_ctx
=
1095 i915_error_ggtt_object_create(dev_priv
,
1096 engine
->wa_ctx
.obj
);
1099 i915_gem_record_active_context(engine
, error
, &error
->ring
[i
]);
1102 list_for_each_entry(request
, &engine
->request_list
, list
)
1105 error
->ring
[i
].num_requests
= count
;
1106 error
->ring
[i
].requests
=
1107 kcalloc(count
, sizeof(*error
->ring
[i
].requests
),
1109 if (error
->ring
[i
].requests
== NULL
) {
1110 error
->ring
[i
].num_requests
= 0;
1115 list_for_each_entry(request
, &engine
->request_list
, list
) {
1116 struct drm_i915_error_request
*erq
;
1118 if (count
>= error
->ring
[i
].num_requests
) {
1120 * If the ring request list was changed in
1121 * between the point where the error request
1122 * list was created and dimensioned and this
1123 * point then just exit early to avoid crashes.
1125 * We don't need to communicate that the
1126 * request list changed state during error
1127 * state capture and that the error state is
1128 * slightly incorrect as a consequence since we
1129 * are typically only interested in the request
1130 * list state at the point of error state
1131 * capture, not in any changes happening during
1137 erq
= &error
->ring
[i
].requests
[count
++];
1138 erq
->seqno
= request
->seqno
;
1139 erq
->jiffies
= request
->emitted_jiffies
;
1140 erq
->tail
= request
->postfix
;
1145 /* FIXME: Since pin count/bound list is global, we duplicate what we capture per
1148 static void i915_gem_capture_vm(struct drm_i915_private
*dev_priv
,
1149 struct drm_i915_error_state
*error
,
1150 struct i915_address_space
*vm
,
1153 struct drm_i915_error_buffer
*active_bo
= NULL
, *pinned_bo
= NULL
;
1154 struct drm_i915_gem_object
*obj
;
1155 struct i915_vma
*vma
;
1159 list_for_each_entry(vma
, &vm
->active_list
, vm_link
)
1161 error
->active_bo_count
[ndx
] = i
;
1163 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
1164 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
)
1165 if (vma
->vm
== vm
&& vma
->pin_count
> 0)
1168 error
->pinned_bo_count
[ndx
] = i
- error
->active_bo_count
[ndx
];
1171 active_bo
= kcalloc(i
, sizeof(*active_bo
), GFP_ATOMIC
);
1173 pinned_bo
= active_bo
+ error
->active_bo_count
[ndx
];
1177 error
->active_bo_count
[ndx
] =
1178 capture_active_bo(active_bo
,
1179 error
->active_bo_count
[ndx
],
1183 error
->pinned_bo_count
[ndx
] =
1184 capture_pinned_bo(pinned_bo
,
1185 error
->pinned_bo_count
[ndx
],
1186 &dev_priv
->mm
.bound_list
, vm
);
1187 error
->active_bo
[ndx
] = active_bo
;
1188 error
->pinned_bo
[ndx
] = pinned_bo
;
1191 static void i915_gem_capture_buffers(struct drm_i915_private
*dev_priv
,
1192 struct drm_i915_error_state
*error
)
1194 struct i915_address_space
*vm
;
1197 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1200 error
->active_bo
= kcalloc(cnt
, sizeof(*error
->active_bo
), GFP_ATOMIC
);
1201 error
->pinned_bo
= kcalloc(cnt
, sizeof(*error
->pinned_bo
), GFP_ATOMIC
);
1202 error
->active_bo_count
= kcalloc(cnt
, sizeof(*error
->active_bo_count
),
1204 error
->pinned_bo_count
= kcalloc(cnt
, sizeof(*error
->pinned_bo_count
),
1207 if (error
->active_bo
== NULL
||
1208 error
->pinned_bo
== NULL
||
1209 error
->active_bo_count
== NULL
||
1210 error
->pinned_bo_count
== NULL
) {
1211 kfree(error
->active_bo
);
1212 kfree(error
->active_bo_count
);
1213 kfree(error
->pinned_bo
);
1214 kfree(error
->pinned_bo_count
);
1216 error
->active_bo
= NULL
;
1217 error
->active_bo_count
= NULL
;
1218 error
->pinned_bo
= NULL
;
1219 error
->pinned_bo_count
= NULL
;
1221 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1222 i915_gem_capture_vm(dev_priv
, error
, vm
, i
++);
1224 error
->vm_count
= cnt
;
1228 /* Capture all registers which don't fit into another category. */
1229 static void i915_capture_reg_state(struct drm_i915_private
*dev_priv
,
1230 struct drm_i915_error_state
*error
)
1232 struct drm_device
*dev
= dev_priv
->dev
;
1235 /* General organization
1236 * 1. Registers specific to a single generation
1237 * 2. Registers which belong to multiple generations
1238 * 3. Feature specific registers.
1239 * 4. Everything else
1240 * Please try to follow the order.
1243 /* 1: Registers specific to a single generation */
1244 if (IS_VALLEYVIEW(dev
)) {
1245 error
->gtier
[0] = I915_READ(GTIER
);
1246 error
->ier
= I915_READ(VLV_IER
);
1247 error
->forcewake
= I915_READ_FW(FORCEWAKE_VLV
);
1251 error
->err_int
= I915_READ(GEN7_ERR_INT
);
1253 if (INTEL_INFO(dev
)->gen
>= 8) {
1254 error
->fault_data0
= I915_READ(GEN8_FAULT_TLB_DATA0
);
1255 error
->fault_data1
= I915_READ(GEN8_FAULT_TLB_DATA1
);
1259 error
->forcewake
= I915_READ_FW(FORCEWAKE
);
1260 error
->gab_ctl
= I915_READ(GAB_CTL
);
1261 error
->gfx_mode
= I915_READ(GFX_MODE
);
1264 /* 2: Registers which belong to multiple generations */
1265 if (INTEL_INFO(dev
)->gen
>= 7)
1266 error
->forcewake
= I915_READ_FW(FORCEWAKE_MT
);
1268 if (INTEL_INFO(dev
)->gen
>= 6) {
1269 error
->derrmr
= I915_READ(DERRMR
);
1270 error
->error
= I915_READ(ERROR_GEN6
);
1271 error
->done_reg
= I915_READ(DONE_REG
);
1274 /* 3: Feature specific registers */
1275 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1276 error
->gam_ecochk
= I915_READ(GAM_ECOCHK
);
1277 error
->gac_eco
= I915_READ(GAC_ECO_BITS
);
1280 /* 4: Everything else */
1281 if (HAS_HW_CONTEXTS(dev
))
1282 error
->ccid
= I915_READ(CCID
);
1284 if (INTEL_INFO(dev
)->gen
>= 8) {
1285 error
->ier
= I915_READ(GEN8_DE_MISC_IER
);
1286 for (i
= 0; i
< 4; i
++)
1287 error
->gtier
[i
] = I915_READ(GEN8_GT_IER(i
));
1288 } else if (HAS_PCH_SPLIT(dev
)) {
1289 error
->ier
= I915_READ(DEIER
);
1290 error
->gtier
[0] = I915_READ(GTIER
);
1291 } else if (IS_GEN2(dev
)) {
1292 error
->ier
= I915_READ16(IER
);
1293 } else if (!IS_VALLEYVIEW(dev
)) {
1294 error
->ier
= I915_READ(IER
);
1296 error
->eir
= I915_READ(EIR
);
1297 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1299 i915_get_extra_instdone(dev
, error
->extra_instdone
);
1302 static void i915_error_capture_msg(struct drm_device
*dev
,
1303 struct drm_i915_error_state
*error
,
1305 const char *error_msg
)
1307 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1309 int ring_id
= -1, len
;
1311 ecode
= i915_error_generate_code(dev_priv
, error
, &ring_id
);
1313 len
= scnprintf(error
->error_msg
, sizeof(error
->error_msg
),
1314 "GPU HANG: ecode %d:%d:0x%08x",
1315 INTEL_INFO(dev
)->gen
, ring_id
, ecode
);
1317 if (ring_id
!= -1 && error
->ring
[ring_id
].pid
!= -1)
1318 len
+= scnprintf(error
->error_msg
+ len
,
1319 sizeof(error
->error_msg
) - len
,
1321 error
->ring
[ring_id
].comm
,
1322 error
->ring
[ring_id
].pid
);
1324 scnprintf(error
->error_msg
+ len
, sizeof(error
->error_msg
) - len
,
1325 ", reason: %s, action: %s",
1327 engine_mask
? "reset" : "continue");
1330 static void i915_capture_gen_state(struct drm_i915_private
*dev_priv
,
1331 struct drm_i915_error_state
*error
)
1334 #ifdef CONFIG_INTEL_IOMMU
1335 error
->iommu
= intel_iommu_gfx_mapped
;
1337 error
->reset_count
= i915_reset_count(&dev_priv
->gpu_error
);
1338 error
->suspend_count
= dev_priv
->suspend_count
;
1342 * i915_capture_error_state - capture an error record for later analysis
1345 * Should be called when an error is detected (either a hang or an error
1346 * interrupt) to capture error state from the time of the error. Fills
1347 * out a structure which becomes available in debugfs for user level tools
1350 void i915_capture_error_state(struct drm_device
*dev
, u32 engine_mask
,
1351 const char *error_msg
)
1354 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1355 struct drm_i915_error_state
*error
;
1356 unsigned long flags
;
1358 /* Account for pipe specific data like PIPE*STAT */
1359 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1361 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1365 kref_init(&error
->ref
);
1367 i915_capture_gen_state(dev_priv
, error
);
1368 i915_capture_reg_state(dev_priv
, error
);
1369 i915_gem_capture_buffers(dev_priv
, error
);
1370 i915_gem_record_fences(dev
, error
);
1371 i915_gem_record_rings(dev
, error
);
1373 do_gettimeofday(&error
->time
);
1375 error
->overlay
= intel_overlay_capture_error_state(dev
);
1376 error
->display
= intel_display_capture_error_state(dev
);
1378 i915_error_capture_msg(dev
, error
, engine_mask
, error_msg
);
1379 DRM_INFO("%s\n", error
->error_msg
);
1381 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1382 if (dev_priv
->gpu_error
.first_error
== NULL
) {
1383 dev_priv
->gpu_error
.first_error
= error
;
1386 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1389 i915_error_state_free(&error
->ref
);
1394 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1395 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1396 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1397 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1398 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev
->primary
->index
);
1403 void i915_error_state_get(struct drm_device
*dev
,
1404 struct i915_error_state_file_priv
*error_priv
)
1406 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1408 spin_lock_irq(&dev_priv
->gpu_error
.lock
);
1409 error_priv
->error
= dev_priv
->gpu_error
.first_error
;
1410 if (error_priv
->error
)
1411 kref_get(&error_priv
->error
->ref
);
1412 spin_unlock_irq(&dev_priv
->gpu_error
.lock
);
1416 void i915_error_state_put(struct i915_error_state_file_priv
*error_priv
)
1418 if (error_priv
->error
)
1419 kref_put(&error_priv
->error
->ref
, i915_error_state_free
);
1422 void i915_destroy_error_state(struct drm_device
*dev
)
1424 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1425 struct drm_i915_error_state
*error
;
1427 spin_lock_irq(&dev_priv
->gpu_error
.lock
);
1428 error
= dev_priv
->gpu_error
.first_error
;
1429 dev_priv
->gpu_error
.first_error
= NULL
;
1430 spin_unlock_irq(&dev_priv
->gpu_error
.lock
);
1433 kref_put(&error
->ref
, i915_error_state_free
);
1436 const char *i915_cache_level_str(struct drm_i915_private
*i915
, int type
)
1439 case I915_CACHE_NONE
: return " uncached";
1440 case I915_CACHE_LLC
: return HAS_LLC(i915
) ? " LLC" : " snooped";
1441 case I915_CACHE_L3_LLC
: return " L3+LLC";
1442 case I915_CACHE_WT
: return " WT";
1447 /* NB: please notice the memset */
1448 void i915_get_extra_instdone(struct drm_device
*dev
, uint32_t *instdone
)
1450 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1451 memset(instdone
, 0, sizeof(*instdone
) * I915_NUM_INSTDONE_REG
);
1453 if (IS_GEN2(dev
) || IS_GEN3(dev
))
1454 instdone
[0] = I915_READ(GEN2_INSTDONE
);
1455 else if (IS_GEN4(dev
) || IS_GEN5(dev
) || IS_GEN6(dev
)) {
1456 instdone
[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE
));
1457 instdone
[1] = I915_READ(GEN4_INSTDONE1
);
1458 } else if (INTEL_INFO(dev
)->gen
>= 7) {
1459 instdone
[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE
));
1460 instdone
[1] = I915_READ(GEN7_SC_INSTDONE
);
1461 instdone
[2] = I915_READ(GEN7_SAMPLER_INSTDONE
);
1462 instdone
[3] = I915_READ(GEN7_ROW_INSTDONE
);