2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <generated/utsrelease.h>
33 static const char *yesno(int v
)
35 return v
? "yes" : "no";
38 static const char *ring_str(int ring
)
41 case RCS
: return "render";
42 case VCS
: return "bsd";
43 case BCS
: return "blt";
44 case VECS
: return "vebox";
45 case VCS2
: return "bsd2";
50 static const char *pin_flag(int pinned
)
60 static const char *tiling_flag(int tiling
)
64 case I915_TILING_NONE
: return "";
65 case I915_TILING_X
: return " X";
66 case I915_TILING_Y
: return " Y";
70 static const char *dirty_flag(int dirty
)
72 return dirty
? " dirty" : "";
75 static const char *purgeable_flag(int purgeable
)
77 return purgeable
? " purgeable" : "";
80 static bool __i915_error_ok(struct drm_i915_error_state_buf
*e
)
83 if (!e
->err
&& WARN(e
->bytes
> (e
->size
- 1), "overflow")) {
88 if (e
->bytes
== e
->size
- 1 || e
->err
)
94 static bool __i915_error_seek(struct drm_i915_error_state_buf
*e
,
97 if (e
->pos
+ len
<= e
->start
) {
102 /* First vsnprintf needs to fit in its entirety for memmove */
103 if (len
>= e
->size
) {
111 static void __i915_error_advance(struct drm_i915_error_state_buf
*e
,
114 /* If this is first printf in this window, adjust it so that
115 * start position matches start of the buffer
118 if (e
->pos
< e
->start
) {
119 const size_t off
= e
->start
- e
->pos
;
121 /* Should not happen but be paranoid */
122 if (off
> len
|| e
->bytes
) {
127 memmove(e
->buf
, e
->buf
+ off
, len
- off
);
128 e
->bytes
= len
- off
;
137 static void i915_error_vprintf(struct drm_i915_error_state_buf
*e
,
138 const char *f
, va_list args
)
142 if (!__i915_error_ok(e
))
145 /* Seek the first printf which is hits start position */
146 if (e
->pos
< e
->start
) {
150 len
= vsnprintf(NULL
, 0, f
, tmp
);
153 if (!__i915_error_seek(e
, len
))
157 len
= vsnprintf(e
->buf
+ e
->bytes
, e
->size
- e
->bytes
, f
, args
);
158 if (len
>= e
->size
- e
->bytes
)
159 len
= e
->size
- e
->bytes
- 1;
161 __i915_error_advance(e
, len
);
164 static void i915_error_puts(struct drm_i915_error_state_buf
*e
,
169 if (!__i915_error_ok(e
))
174 /* Seek the first printf which is hits start position */
175 if (e
->pos
< e
->start
) {
176 if (!__i915_error_seek(e
, len
))
180 if (len
>= e
->size
- e
->bytes
)
181 len
= e
->size
- e
->bytes
- 1;
182 memcpy(e
->buf
+ e
->bytes
, str
, len
);
184 __i915_error_advance(e
, len
);
187 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
188 #define err_puts(e, s) i915_error_puts(e, s)
190 static void print_error_buffers(struct drm_i915_error_state_buf
*m
,
192 struct drm_i915_error_buffer
*err
,
195 err_printf(m
, " %s [%d]:\n", name
, count
);
198 err_printf(m
, " %08x %8u %02x %02x %x %x",
203 err
->rseqno
, err
->wseqno
);
204 err_puts(m
, pin_flag(err
->pinned
));
205 err_puts(m
, tiling_flag(err
->tiling
));
206 err_puts(m
, dirty_flag(err
->dirty
));
207 err_puts(m
, purgeable_flag(err
->purgeable
));
208 err_puts(m
, err
->userptr
? " userptr" : "");
209 err_puts(m
, err
->ring
!= -1 ? " " : "");
210 err_puts(m
, ring_str(err
->ring
));
211 err_puts(m
, i915_cache_level_str(m
->i915
, err
->cache_level
));
214 err_printf(m
, " (name: %d)", err
->name
);
215 if (err
->fence_reg
!= I915_FENCE_REG_NONE
)
216 err_printf(m
, " (fence: %d)", err
->fence_reg
);
223 static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a
)
230 case HANGCHECK_ACTIVE
:
232 case HANGCHECK_ACTIVE_LOOP
:
233 return "active (loop)";
243 static void i915_ring_error_state(struct drm_i915_error_state_buf
*m
,
244 struct drm_device
*dev
,
245 struct drm_i915_error_state
*error
,
248 struct drm_i915_error_ring
*ring
= &error
->ring
[ring_idx
];
253 err_printf(m
, "%s command stream:\n", ring_str(ring_idx
));
254 err_printf(m
, " HEAD: 0x%08x\n", ring
->head
);
255 err_printf(m
, " TAIL: 0x%08x\n", ring
->tail
);
256 err_printf(m
, " CTL: 0x%08x\n", ring
->ctl
);
257 err_printf(m
, " HWS: 0x%08x\n", ring
->hws
);
258 err_printf(m
, " ACTHD: 0x%08x %08x\n", (u32
)(ring
->acthd
>>32), (u32
)ring
->acthd
);
259 err_printf(m
, " IPEIR: 0x%08x\n", ring
->ipeir
);
260 err_printf(m
, " IPEHR: 0x%08x\n", ring
->ipehr
);
261 err_printf(m
, " INSTDONE: 0x%08x\n", ring
->instdone
);
262 if (INTEL_INFO(dev
)->gen
>= 4) {
263 err_printf(m
, " BBADDR: 0x%08x %08x\n", (u32
)(ring
->bbaddr
>>32), (u32
)ring
->bbaddr
);
264 err_printf(m
, " BB_STATE: 0x%08x\n", ring
->bbstate
);
265 err_printf(m
, " INSTPS: 0x%08x\n", ring
->instps
);
267 err_printf(m
, " INSTPM: 0x%08x\n", ring
->instpm
);
268 err_printf(m
, " FADDR: 0x%08x %08x\n", upper_32_bits(ring
->faddr
),
269 lower_32_bits(ring
->faddr
));
270 if (INTEL_INFO(dev
)->gen
>= 6) {
271 err_printf(m
, " RC PSMI: 0x%08x\n", ring
->rc_psmi
);
272 err_printf(m
, " FAULT_REG: 0x%08x\n", ring
->fault_reg
);
273 err_printf(m
, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
274 ring
->semaphore_mboxes
[0],
275 ring
->semaphore_seqno
[0]);
276 err_printf(m
, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
277 ring
->semaphore_mboxes
[1],
278 ring
->semaphore_seqno
[1]);
279 if (HAS_VEBOX(dev
)) {
280 err_printf(m
, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
281 ring
->semaphore_mboxes
[2],
282 ring
->semaphore_seqno
[2]);
285 if (USES_PPGTT(dev
)) {
286 err_printf(m
, " GFX_MODE: 0x%08x\n", ring
->vm_info
.gfx_mode
);
288 if (INTEL_INFO(dev
)->gen
>= 8) {
290 for (i
= 0; i
< 4; i
++)
291 err_printf(m
, " PDP%d: 0x%016llx\n",
292 i
, ring
->vm_info
.pdp
[i
]);
294 err_printf(m
, " PP_DIR_BASE: 0x%08x\n",
295 ring
->vm_info
.pp_dir_base
);
298 err_printf(m
, " seqno: 0x%08x\n", ring
->seqno
);
299 err_printf(m
, " waiting: %s\n", yesno(ring
->waiting
));
300 err_printf(m
, " ring->head: 0x%08x\n", ring
->cpu_ring_head
);
301 err_printf(m
, " ring->tail: 0x%08x\n", ring
->cpu_ring_tail
);
302 err_printf(m
, " hangcheck: %s [%d]\n",
303 hangcheck_action_to_str(ring
->hangcheck_action
),
304 ring
->hangcheck_score
);
307 void i915_error_printf(struct drm_i915_error_state_buf
*e
, const char *f
, ...)
312 i915_error_vprintf(e
, f
, args
);
316 static void print_error_obj(struct drm_i915_error_state_buf
*m
,
317 struct drm_i915_error_object
*obj
)
319 int page
, offset
, elt
;
321 for (page
= offset
= 0; page
< obj
->page_count
; page
++) {
322 for (elt
= 0; elt
< PAGE_SIZE
/4; elt
++) {
323 err_printf(m
, "%08x : %08x\n", offset
,
324 obj
->pages
[page
][elt
]);
330 int i915_error_state_to_str(struct drm_i915_error_state_buf
*m
,
331 const struct i915_error_state_file_priv
*error_priv
)
333 struct drm_device
*dev
= error_priv
->dev
;
334 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
335 struct drm_i915_error_state
*error
= error_priv
->error
;
336 struct drm_i915_error_object
*obj
;
337 int i
, j
, offset
, elt
;
338 int max_hangcheck_score
;
341 err_printf(m
, "no error state collected\n");
345 err_printf(m
, "%s\n", error
->error_msg
);
346 err_printf(m
, "Time: %ld s %ld us\n", error
->time
.tv_sec
,
347 error
->time
.tv_usec
);
348 err_printf(m
, "Kernel: " UTS_RELEASE
"\n");
349 max_hangcheck_score
= 0;
350 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
351 if (error
->ring
[i
].hangcheck_score
> max_hangcheck_score
)
352 max_hangcheck_score
= error
->ring
[i
].hangcheck_score
;
354 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
355 if (error
->ring
[i
].hangcheck_score
== max_hangcheck_score
&&
356 error
->ring
[i
].pid
!= -1) {
357 err_printf(m
, "Active process (on ring %s): %s [%d]\n",
363 err_printf(m
, "Reset count: %u\n", error
->reset_count
);
364 err_printf(m
, "Suspend count: %u\n", error
->suspend_count
);
365 err_printf(m
, "PCI ID: 0x%04x\n", dev
->pdev
->device
);
366 err_printf(m
, "EIR: 0x%08x\n", error
->eir
);
367 err_printf(m
, "IER: 0x%08x\n", error
->ier
);
368 if (INTEL_INFO(dev
)->gen
>= 8) {
369 for (i
= 0; i
< 4; i
++)
370 err_printf(m
, "GTIER gt %d: 0x%08x\n", i
,
372 } else if (HAS_PCH_SPLIT(dev
) || IS_VALLEYVIEW(dev
))
373 err_printf(m
, "GTIER: 0x%08x\n", error
->gtier
[0]);
374 err_printf(m
, "PGTBL_ER: 0x%08x\n", error
->pgtbl_er
);
375 err_printf(m
, "FORCEWAKE: 0x%08x\n", error
->forcewake
);
376 err_printf(m
, "DERRMR: 0x%08x\n", error
->derrmr
);
377 err_printf(m
, "CCID: 0x%08x\n", error
->ccid
);
378 err_printf(m
, "Missed interrupts: 0x%08lx\n", dev_priv
->gpu_error
.missed_irq_rings
);
380 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
381 err_printf(m
, " fence[%d] = %08llx\n", i
, error
->fence
[i
]);
383 for (i
= 0; i
< ARRAY_SIZE(error
->extra_instdone
); i
++)
384 err_printf(m
, " INSTDONE_%d: 0x%08x\n", i
,
385 error
->extra_instdone
[i
]);
387 if (INTEL_INFO(dev
)->gen
>= 6) {
388 err_printf(m
, "ERROR: 0x%08x\n", error
->error
);
389 err_printf(m
, "DONE_REG: 0x%08x\n", error
->done_reg
);
392 if (INTEL_INFO(dev
)->gen
== 7)
393 err_printf(m
, "ERR_INT: 0x%08x\n", error
->err_int
);
395 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++)
396 i915_ring_error_state(m
, dev
, error
, i
);
398 for (i
= 0; i
< error
->vm_count
; i
++) {
399 err_printf(m
, "vm[%d]\n", i
);
401 print_error_buffers(m
, "Active",
403 error
->active_bo_count
[i
]);
405 print_error_buffers(m
, "Pinned",
407 error
->pinned_bo_count
[i
]);
410 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
411 obj
= error
->ring
[i
].batchbuffer
;
413 err_puts(m
, dev_priv
->ring
[i
].name
);
414 if (error
->ring
[i
].pid
!= -1)
415 err_printf(m
, " (submitted by %s [%d])",
418 err_printf(m
, " --- gtt_offset = 0x%08x\n",
420 print_error_obj(m
, obj
);
423 obj
= error
->ring
[i
].wa_batchbuffer
;
425 err_printf(m
, "%s (w/a) --- gtt_offset = 0x%08x\n",
426 dev_priv
->ring
[i
].name
, obj
->gtt_offset
);
427 print_error_obj(m
, obj
);
430 if (error
->ring
[i
].num_requests
) {
431 err_printf(m
, "%s --- %d requests\n",
432 dev_priv
->ring
[i
].name
,
433 error
->ring
[i
].num_requests
);
434 for (j
= 0; j
< error
->ring
[i
].num_requests
; j
++) {
435 err_printf(m
, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
436 error
->ring
[i
].requests
[j
].seqno
,
437 error
->ring
[i
].requests
[j
].jiffies
,
438 error
->ring
[i
].requests
[j
].tail
);
442 if ((obj
= error
->ring
[i
].ringbuffer
)) {
443 err_printf(m
, "%s --- ringbuffer = 0x%08x\n",
444 dev_priv
->ring
[i
].name
,
446 print_error_obj(m
, obj
);
449 if ((obj
= error
->ring
[i
].hws_page
)) {
450 err_printf(m
, "%s --- HW Status = 0x%08x\n",
451 dev_priv
->ring
[i
].name
,
454 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
455 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
458 obj
->pages
[0][elt
+1],
459 obj
->pages
[0][elt
+2],
460 obj
->pages
[0][elt
+3]);
465 if ((obj
= error
->ring
[i
].ctx
)) {
466 err_printf(m
, "%s --- HW Context = 0x%08x\n",
467 dev_priv
->ring
[i
].name
,
469 print_error_obj(m
, obj
);
473 if ((obj
= error
->semaphore_obj
)) {
474 err_printf(m
, "Semaphore page = 0x%08x\n", obj
->gtt_offset
);
475 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
476 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
479 obj
->pages
[0][elt
+1],
480 obj
->pages
[0][elt
+2],
481 obj
->pages
[0][elt
+3]);
486 intel_overlay_print_error_state(m
, error
->overlay
);
489 intel_display_print_error_state(m
, dev
, error
->display
);
492 if (m
->bytes
== 0 && m
->err
)
498 int i915_error_state_buf_init(struct drm_i915_error_state_buf
*ebuf
,
499 struct drm_i915_private
*i915
,
500 size_t count
, loff_t pos
)
502 memset(ebuf
, 0, sizeof(*ebuf
));
505 /* We need to have enough room to store any i915_error_state printf
506 * so that we can move it to start position.
508 ebuf
->size
= count
+ 1 > PAGE_SIZE
? count
+ 1 : PAGE_SIZE
;
509 ebuf
->buf
= kmalloc(ebuf
->size
,
510 GFP_TEMPORARY
| __GFP_NORETRY
| __GFP_NOWARN
);
512 if (ebuf
->buf
== NULL
) {
513 ebuf
->size
= PAGE_SIZE
;
514 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
517 if (ebuf
->buf
== NULL
) {
519 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
522 if (ebuf
->buf
== NULL
)
530 static void i915_error_object_free(struct drm_i915_error_object
*obj
)
537 for (page
= 0; page
< obj
->page_count
; page
++)
538 kfree(obj
->pages
[page
]);
543 static void i915_error_state_free(struct kref
*error_ref
)
545 struct drm_i915_error_state
*error
= container_of(error_ref
,
546 typeof(*error
), ref
);
549 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
550 i915_error_object_free(error
->ring
[i
].batchbuffer
);
551 i915_error_object_free(error
->ring
[i
].ringbuffer
);
552 i915_error_object_free(error
->ring
[i
].hws_page
);
553 i915_error_object_free(error
->ring
[i
].ctx
);
554 kfree(error
->ring
[i
].requests
);
557 i915_error_object_free(error
->semaphore_obj
);
558 kfree(error
->active_bo
);
559 kfree(error
->overlay
);
560 kfree(error
->display
);
564 static struct drm_i915_error_object
*
565 i915_error_object_create(struct drm_i915_private
*dev_priv
,
566 struct drm_i915_gem_object
*src
,
567 struct i915_address_space
*vm
)
569 struct drm_i915_error_object
*dst
;
570 struct i915_vma
*vma
= NULL
;
576 if (src
== NULL
|| src
->pages
== NULL
)
579 num_pages
= src
->base
.size
>> PAGE_SHIFT
;
581 dst
= kmalloc(sizeof(*dst
) + num_pages
* sizeof(u32
*), GFP_ATOMIC
);
585 if (i915_gem_obj_bound(src
, vm
))
586 dst
->gtt_offset
= i915_gem_obj_offset(src
, vm
);
588 dst
->gtt_offset
= -1;
590 reloc_offset
= dst
->gtt_offset
;
591 if (i915_is_ggtt(vm
))
592 vma
= i915_gem_obj_to_ggtt(src
);
593 use_ggtt
= (src
->cache_level
== I915_CACHE_NONE
&&
594 vma
&& (vma
->bound
& GLOBAL_BIND
) &&
595 reloc_offset
+ num_pages
* PAGE_SIZE
<= dev_priv
->gtt
.mappable_end
);
597 /* Cannot access stolen address directly, try to use the aperture */
601 if (!(vma
&& vma
->bound
& GLOBAL_BIND
))
604 reloc_offset
= i915_gem_obj_ggtt_offset(src
);
605 if (reloc_offset
+ num_pages
* PAGE_SIZE
> dev_priv
->gtt
.mappable_end
)
609 /* Cannot access snooped pages through the aperture */
610 if (use_ggtt
&& src
->cache_level
!= I915_CACHE_NONE
&& !HAS_LLC(dev_priv
->dev
))
613 dst
->page_count
= num_pages
;
614 while (num_pages
--) {
618 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
622 local_irq_save(flags
);
626 /* Simply ignore tiling or any overlapping fence.
627 * It's part of the error state, and this hopefully
628 * captures what the GPU read.
631 s
= io_mapping_map_atomic_wc(dev_priv
->gtt
.mappable
,
633 memcpy_fromio(d
, s
, PAGE_SIZE
);
634 io_mapping_unmap_atomic(s
);
639 page
= i915_gem_object_get_page(src
, i
);
641 drm_clflush_pages(&page
, 1);
643 s
= kmap_atomic(page
);
644 memcpy(d
, s
, PAGE_SIZE
);
647 drm_clflush_pages(&page
, 1);
649 local_irq_restore(flags
);
652 reloc_offset
+= PAGE_SIZE
;
659 kfree(dst
->pages
[i
]);
663 #define i915_error_ggtt_object_create(dev_priv, src) \
664 i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
666 static void capture_bo(struct drm_i915_error_buffer
*err
,
667 struct i915_vma
*vma
)
669 struct drm_i915_gem_object
*obj
= vma
->obj
;
671 err
->size
= obj
->base
.size
;
672 err
->name
= obj
->base
.name
;
673 err
->rseqno
= obj
->last_read_seqno
;
674 err
->wseqno
= obj
->last_write_seqno
;
675 err
->gtt_offset
= vma
->node
.start
;
676 err
->read_domains
= obj
->base
.read_domains
;
677 err
->write_domain
= obj
->base
.write_domain
;
678 err
->fence_reg
= obj
->fence_reg
;
680 if (i915_gem_obj_is_pinned(obj
))
682 if (obj
->user_pin_count
> 0)
684 err
->tiling
= obj
->tiling_mode
;
685 err
->dirty
= obj
->dirty
;
686 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
687 err
->userptr
= obj
->userptr
.mm
!= NULL
;
688 err
->ring
= obj
->ring
? obj
->ring
->id
: -1;
689 err
->cache_level
= obj
->cache_level
;
692 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
693 int count
, struct list_head
*head
)
695 struct i915_vma
*vma
;
698 list_for_each_entry(vma
, head
, mm_list
) {
699 capture_bo(err
++, vma
);
707 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
708 int count
, struct list_head
*head
,
709 struct i915_address_space
*vm
)
711 struct drm_i915_gem_object
*obj
;
712 struct drm_i915_error_buffer
* const first
= err
;
713 struct drm_i915_error_buffer
* const last
= err
+ count
;
715 list_for_each_entry(obj
, head
, global_list
) {
716 struct i915_vma
*vma
;
721 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
)
722 if (vma
->vm
== vm
&& vma
->pin_count
> 0) {
723 capture_bo(err
++, vma
);
731 /* Generate a semi-unique error code. The code is not meant to have meaning, The
732 * code's only purpose is to try to prevent false duplicated bug reports by
733 * grossly estimating a GPU error state.
735 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
736 * the hang if we could strip the GTT offset information from it.
738 * It's only a small step better than a random number in its current form.
740 static uint32_t i915_error_generate_code(struct drm_i915_private
*dev_priv
,
741 struct drm_i915_error_state
*error
,
744 uint32_t error_code
= 0;
747 /* IPEHR would be an ideal way to detect errors, as it's the gross
748 * measure of "the command that hung." However, has some very common
749 * synchronization commands which almost always appear in the case
750 * strictly a client bug. Use instdone to differentiate those some.
752 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
753 if (error
->ring
[i
].hangcheck_action
== HANGCHECK_HUNG
) {
757 return error
->ring
[i
].ipehr
^ error
->ring
[i
].instdone
;
764 static void i915_gem_record_fences(struct drm_device
*dev
,
765 struct drm_i915_error_state
*error
)
767 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
771 switch (INTEL_INFO(dev
)->gen
) {
776 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
777 error
->fence
[i
] = I915_READ64(FENCE_REG_SANDYBRIDGE_0
+ (i
* 8));
781 for (i
= 0; i
< 16; i
++)
782 error
->fence
[i
] = I915_READ64(FENCE_REG_965_0
+ (i
* 8));
785 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
786 for (i
= 0; i
< 8; i
++)
787 error
->fence
[i
+8] = I915_READ(FENCE_REG_945_8
+ (i
* 4));
789 for (i
= 0; i
< 8; i
++)
790 error
->fence
[i
] = I915_READ(FENCE_REG_830_0
+ (i
* 4));
799 static void gen8_record_semaphore_state(struct drm_i915_private
*dev_priv
,
800 struct drm_i915_error_state
*error
,
801 struct intel_engine_cs
*ring
,
802 struct drm_i915_error_ring
*ering
)
804 struct intel_engine_cs
*to
;
807 if (!i915_semaphore_is_enabled(dev_priv
->dev
))
810 if (!error
->semaphore_obj
)
811 error
->semaphore_obj
=
812 i915_error_object_create(dev_priv
,
813 dev_priv
->semaphore_obj
,
814 &dev_priv
->gtt
.base
);
816 for_each_ring(to
, dev_priv
, i
) {
824 signal_offset
= (GEN8_SIGNAL_OFFSET(ring
, i
) & (PAGE_SIZE
- 1))
826 tmp
= error
->semaphore_obj
->pages
[0];
827 idx
= intel_ring_sync_index(ring
, to
);
829 ering
->semaphore_mboxes
[idx
] = tmp
[signal_offset
];
830 ering
->semaphore_seqno
[idx
] = ring
->semaphore
.sync_seqno
[idx
];
834 static void gen6_record_semaphore_state(struct drm_i915_private
*dev_priv
,
835 struct intel_engine_cs
*ring
,
836 struct drm_i915_error_ring
*ering
)
838 ering
->semaphore_mboxes
[0] = I915_READ(RING_SYNC_0(ring
->mmio_base
));
839 ering
->semaphore_mboxes
[1] = I915_READ(RING_SYNC_1(ring
->mmio_base
));
840 ering
->semaphore_seqno
[0] = ring
->semaphore
.sync_seqno
[0];
841 ering
->semaphore_seqno
[1] = ring
->semaphore
.sync_seqno
[1];
843 if (HAS_VEBOX(dev_priv
->dev
)) {
844 ering
->semaphore_mboxes
[2] =
845 I915_READ(RING_SYNC_2(ring
->mmio_base
));
846 ering
->semaphore_seqno
[2] = ring
->semaphore
.sync_seqno
[2];
850 static void i915_record_ring_state(struct drm_device
*dev
,
851 struct drm_i915_error_state
*error
,
852 struct intel_engine_cs
*ring
,
853 struct drm_i915_error_ring
*ering
)
855 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
857 if (INTEL_INFO(dev
)->gen
>= 6) {
858 ering
->rc_psmi
= I915_READ(ring
->mmio_base
+ 0x50);
859 ering
->fault_reg
= I915_READ(RING_FAULT_REG(ring
));
860 if (INTEL_INFO(dev
)->gen
>= 8)
861 gen8_record_semaphore_state(dev_priv
, error
, ring
, ering
);
863 gen6_record_semaphore_state(dev_priv
, ring
, ering
);
866 if (INTEL_INFO(dev
)->gen
>= 4) {
867 ering
->faddr
= I915_READ(RING_DMA_FADD(ring
->mmio_base
));
868 ering
->ipeir
= I915_READ(RING_IPEIR(ring
->mmio_base
));
869 ering
->ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
870 ering
->instdone
= I915_READ(RING_INSTDONE(ring
->mmio_base
));
871 ering
->instps
= I915_READ(RING_INSTPS(ring
->mmio_base
));
872 ering
->bbaddr
= I915_READ(RING_BBADDR(ring
->mmio_base
));
873 if (INTEL_INFO(dev
)->gen
>= 8) {
874 ering
->faddr
|= (u64
) I915_READ(RING_DMA_FADD_UDW(ring
->mmio_base
)) << 32;
875 ering
->bbaddr
|= (u64
) I915_READ(RING_BBADDR_UDW(ring
->mmio_base
)) << 32;
877 ering
->bbstate
= I915_READ(RING_BBSTATE(ring
->mmio_base
));
879 ering
->faddr
= I915_READ(DMA_FADD_I8XX
);
880 ering
->ipeir
= I915_READ(IPEIR
);
881 ering
->ipehr
= I915_READ(IPEHR
);
882 ering
->instdone
= I915_READ(INSTDONE
);
885 ering
->waiting
= waitqueue_active(&ring
->irq_queue
);
886 ering
->instpm
= I915_READ(RING_INSTPM(ring
->mmio_base
));
887 ering
->seqno
= ring
->get_seqno(ring
, false);
888 ering
->acthd
= intel_ring_get_active_head(ring
);
889 ering
->head
= I915_READ_HEAD(ring
);
890 ering
->tail
= I915_READ_TAIL(ring
);
891 ering
->ctl
= I915_READ_CTL(ring
);
893 if (I915_NEED_GFX_HWS(dev
)) {
900 mmio
= RENDER_HWS_PGA_GEN7
;
903 mmio
= BLT_HWS_PGA_GEN7
;
906 mmio
= BSD_HWS_PGA_GEN7
;
909 mmio
= VEBOX_HWS_PGA_GEN7
;
912 } else if (IS_GEN6(ring
->dev
)) {
913 mmio
= RING_HWS_PGA_GEN6(ring
->mmio_base
);
915 /* XXX: gen8 returns to sanity */
916 mmio
= RING_HWS_PGA(ring
->mmio_base
);
919 ering
->hws
= I915_READ(mmio
);
922 ering
->hangcheck_score
= ring
->hangcheck
.score
;
923 ering
->hangcheck_action
= ring
->hangcheck
.action
;
925 if (USES_PPGTT(dev
)) {
928 ering
->vm_info
.gfx_mode
= I915_READ(RING_MODE_GEN7(ring
));
930 switch (INTEL_INFO(dev
)->gen
) {
933 for (i
= 0; i
< 4; i
++) {
934 ering
->vm_info
.pdp
[i
] =
935 I915_READ(GEN8_RING_PDP_UDW(ring
, i
));
936 ering
->vm_info
.pdp
[i
] <<= 32;
937 ering
->vm_info
.pdp
[i
] |=
938 I915_READ(GEN8_RING_PDP_LDW(ring
, i
));
942 ering
->vm_info
.pp_dir_base
=
943 I915_READ(RING_PP_DIR_BASE(ring
));
946 ering
->vm_info
.pp_dir_base
=
947 I915_READ(RING_PP_DIR_BASE_READ(ring
));
954 static void i915_gem_record_active_context(struct intel_engine_cs
*ring
,
955 struct drm_i915_error_state
*error
,
956 struct drm_i915_error_ring
*ering
)
958 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
959 struct drm_i915_gem_object
*obj
;
961 /* Currently render ring is the only HW context user */
962 if (ring
->id
!= RCS
|| !error
->ccid
)
965 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
966 if (!i915_gem_obj_ggtt_bound(obj
))
969 if ((error
->ccid
& PAGE_MASK
) == i915_gem_obj_ggtt_offset(obj
)) {
970 ering
->ctx
= i915_error_ggtt_object_create(dev_priv
, obj
);
976 static void i915_gem_record_rings(struct drm_device
*dev
,
977 struct drm_i915_error_state
*error
)
979 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
980 struct drm_i915_gem_request
*request
;
983 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
984 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
985 struct intel_ringbuffer
*rbuf
;
987 error
->ring
[i
].pid
= -1;
989 if (ring
->dev
== NULL
)
992 error
->ring
[i
].valid
= true;
994 i915_record_ring_state(dev
, error
, ring
, &error
->ring
[i
]);
996 request
= i915_gem_find_active_request(ring
);
998 struct i915_address_space
*vm
;
1000 vm
= request
->ctx
&& request
->ctx
->ppgtt
?
1001 &request
->ctx
->ppgtt
->base
:
1002 &dev_priv
->gtt
.base
;
1004 /* We need to copy these to an anonymous buffer
1005 * as the simplest method to avoid being overwritten
1008 error
->ring
[i
].batchbuffer
=
1009 i915_error_object_create(dev_priv
,
1013 if (HAS_BROKEN_CS_TLB(dev_priv
->dev
))
1014 error
->ring
[i
].wa_batchbuffer
=
1015 i915_error_ggtt_object_create(dev_priv
,
1018 if (request
->file_priv
) {
1019 struct task_struct
*task
;
1022 task
= pid_task(request
->file_priv
->file
->pid
,
1025 strcpy(error
->ring
[i
].comm
, task
->comm
);
1026 error
->ring
[i
].pid
= task
->pid
;
1032 if (i915
.enable_execlists
) {
1033 /* TODO: This is only a small fix to keep basic error
1034 * capture working, but we need to add more information
1035 * for it to be useful (e.g. dump the context being
1039 rbuf
= request
->ctx
->engine
[ring
->id
].ringbuf
;
1041 rbuf
= ring
->default_context
->engine
[ring
->id
].ringbuf
;
1043 rbuf
= ring
->buffer
;
1045 error
->ring
[i
].cpu_ring_head
= rbuf
->head
;
1046 error
->ring
[i
].cpu_ring_tail
= rbuf
->tail
;
1048 error
->ring
[i
].ringbuffer
=
1049 i915_error_ggtt_object_create(dev_priv
, rbuf
->obj
);
1051 error
->ring
[i
].hws_page
=
1052 i915_error_ggtt_object_create(dev_priv
, ring
->status_page
.obj
);
1054 i915_gem_record_active_context(ring
, error
, &error
->ring
[i
]);
1057 list_for_each_entry(request
, &ring
->request_list
, list
)
1060 error
->ring
[i
].num_requests
= count
;
1061 error
->ring
[i
].requests
=
1062 kcalloc(count
, sizeof(*error
->ring
[i
].requests
),
1064 if (error
->ring
[i
].requests
== NULL
) {
1065 error
->ring
[i
].num_requests
= 0;
1070 list_for_each_entry(request
, &ring
->request_list
, list
) {
1071 struct drm_i915_error_request
*erq
;
1073 erq
= &error
->ring
[i
].requests
[count
++];
1074 erq
->seqno
= request
->seqno
;
1075 erq
->jiffies
= request
->emitted_jiffies
;
1076 erq
->tail
= request
->tail
;
1081 /* FIXME: Since pin count/bound list is global, we duplicate what we capture per
1084 static void i915_gem_capture_vm(struct drm_i915_private
*dev_priv
,
1085 struct drm_i915_error_state
*error
,
1086 struct i915_address_space
*vm
,
1089 struct drm_i915_error_buffer
*active_bo
= NULL
, *pinned_bo
= NULL
;
1090 struct drm_i915_gem_object
*obj
;
1091 struct i915_vma
*vma
;
1095 list_for_each_entry(vma
, &vm
->active_list
, mm_list
)
1097 error
->active_bo_count
[ndx
] = i
;
1099 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
1100 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
)
1101 if (vma
->vm
== vm
&& vma
->pin_count
> 0) {
1106 error
->pinned_bo_count
[ndx
] = i
- error
->active_bo_count
[ndx
];
1109 active_bo
= kcalloc(i
, sizeof(*active_bo
), GFP_ATOMIC
);
1111 pinned_bo
= active_bo
+ error
->active_bo_count
[ndx
];
1115 error
->active_bo_count
[ndx
] =
1116 capture_active_bo(active_bo
,
1117 error
->active_bo_count
[ndx
],
1121 error
->pinned_bo_count
[ndx
] =
1122 capture_pinned_bo(pinned_bo
,
1123 error
->pinned_bo_count
[ndx
],
1124 &dev_priv
->mm
.bound_list
, vm
);
1125 error
->active_bo
[ndx
] = active_bo
;
1126 error
->pinned_bo
[ndx
] = pinned_bo
;
1129 static void i915_gem_capture_buffers(struct drm_i915_private
*dev_priv
,
1130 struct drm_i915_error_state
*error
)
1132 struct i915_address_space
*vm
;
1135 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1138 error
->active_bo
= kcalloc(cnt
, sizeof(*error
->active_bo
), GFP_ATOMIC
);
1139 error
->pinned_bo
= kcalloc(cnt
, sizeof(*error
->pinned_bo
), GFP_ATOMIC
);
1140 error
->active_bo_count
= kcalloc(cnt
, sizeof(*error
->active_bo_count
),
1142 error
->pinned_bo_count
= kcalloc(cnt
, sizeof(*error
->pinned_bo_count
),
1145 if (error
->active_bo
== NULL
||
1146 error
->pinned_bo
== NULL
||
1147 error
->active_bo_count
== NULL
||
1148 error
->pinned_bo_count
== NULL
) {
1149 kfree(error
->active_bo
);
1150 kfree(error
->active_bo_count
);
1151 kfree(error
->pinned_bo
);
1152 kfree(error
->pinned_bo_count
);
1154 error
->active_bo
= NULL
;
1155 error
->active_bo_count
= NULL
;
1156 error
->pinned_bo
= NULL
;
1157 error
->pinned_bo_count
= NULL
;
1159 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1160 i915_gem_capture_vm(dev_priv
, error
, vm
, i
++);
1162 error
->vm_count
= cnt
;
1166 /* Capture all registers which don't fit into another category. */
1167 static void i915_capture_reg_state(struct drm_i915_private
*dev_priv
,
1168 struct drm_i915_error_state
*error
)
1170 struct drm_device
*dev
= dev_priv
->dev
;
1173 /* General organization
1174 * 1. Registers specific to a single generation
1175 * 2. Registers which belong to multiple generations
1176 * 3. Feature specific registers.
1177 * 4. Everything else
1178 * Please try to follow the order.
1181 /* 1: Registers specific to a single generation */
1182 if (IS_VALLEYVIEW(dev
)) {
1183 error
->gtier
[0] = I915_READ(GTIER
);
1184 error
->ier
= I915_READ(VLV_IER
);
1185 error
->forcewake
= I915_READ(FORCEWAKE_VLV
);
1189 error
->err_int
= I915_READ(GEN7_ERR_INT
);
1192 error
->forcewake
= I915_READ(FORCEWAKE
);
1193 error
->gab_ctl
= I915_READ(GAB_CTL
);
1194 error
->gfx_mode
= I915_READ(GFX_MODE
);
1197 /* 2: Registers which belong to multiple generations */
1198 if (INTEL_INFO(dev
)->gen
>= 7)
1199 error
->forcewake
= I915_READ(FORCEWAKE_MT
);
1201 if (INTEL_INFO(dev
)->gen
>= 6) {
1202 error
->derrmr
= I915_READ(DERRMR
);
1203 error
->error
= I915_READ(ERROR_GEN6
);
1204 error
->done_reg
= I915_READ(DONE_REG
);
1207 /* 3: Feature specific registers */
1208 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1209 error
->gam_ecochk
= I915_READ(GAM_ECOCHK
);
1210 error
->gac_eco
= I915_READ(GAC_ECO_BITS
);
1213 /* 4: Everything else */
1214 if (HAS_HW_CONTEXTS(dev
))
1215 error
->ccid
= I915_READ(CCID
);
1217 if (INTEL_INFO(dev
)->gen
>= 8) {
1218 error
->ier
= I915_READ(GEN8_DE_MISC_IER
);
1219 for (i
= 0; i
< 4; i
++)
1220 error
->gtier
[i
] = I915_READ(GEN8_GT_IER(i
));
1221 } else if (HAS_PCH_SPLIT(dev
)) {
1222 error
->ier
= I915_READ(DEIER
);
1223 error
->gtier
[0] = I915_READ(GTIER
);
1224 } else if (IS_GEN2(dev
)) {
1225 error
->ier
= I915_READ16(IER
);
1226 } else if (!IS_VALLEYVIEW(dev
)) {
1227 error
->ier
= I915_READ(IER
);
1229 error
->eir
= I915_READ(EIR
);
1230 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1232 i915_get_extra_instdone(dev
, error
->extra_instdone
);
1235 static void i915_error_capture_msg(struct drm_device
*dev
,
1236 struct drm_i915_error_state
*error
,
1238 const char *error_msg
)
1240 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1242 int ring_id
= -1, len
;
1244 ecode
= i915_error_generate_code(dev_priv
, error
, &ring_id
);
1246 len
= scnprintf(error
->error_msg
, sizeof(error
->error_msg
),
1247 "GPU HANG: ecode %d:%d:0x%08x",
1248 INTEL_INFO(dev
)->gen
, ring_id
, ecode
);
1250 if (ring_id
!= -1 && error
->ring
[ring_id
].pid
!= -1)
1251 len
+= scnprintf(error
->error_msg
+ len
,
1252 sizeof(error
->error_msg
) - len
,
1254 error
->ring
[ring_id
].comm
,
1255 error
->ring
[ring_id
].pid
);
1257 scnprintf(error
->error_msg
+ len
, sizeof(error
->error_msg
) - len
,
1258 ", reason: %s, action: %s",
1260 wedged
? "reset" : "continue");
1263 static void i915_capture_gen_state(struct drm_i915_private
*dev_priv
,
1264 struct drm_i915_error_state
*error
)
1266 error
->reset_count
= i915_reset_count(&dev_priv
->gpu_error
);
1267 error
->suspend_count
= dev_priv
->suspend_count
;
1271 * i915_capture_error_state - capture an error record for later analysis
1274 * Should be called when an error is detected (either a hang or an error
1275 * interrupt) to capture error state from the time of the error. Fills
1276 * out a structure which becomes available in debugfs for user level tools
1279 void i915_capture_error_state(struct drm_device
*dev
, bool wedged
,
1280 const char *error_msg
)
1283 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1284 struct drm_i915_error_state
*error
;
1285 unsigned long flags
;
1287 /* Account for pipe specific data like PIPE*STAT */
1288 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1290 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1294 kref_init(&error
->ref
);
1296 i915_capture_gen_state(dev_priv
, error
);
1297 i915_capture_reg_state(dev_priv
, error
);
1298 i915_gem_capture_buffers(dev_priv
, error
);
1299 i915_gem_record_fences(dev
, error
);
1300 i915_gem_record_rings(dev
, error
);
1302 do_gettimeofday(&error
->time
);
1304 error
->overlay
= intel_overlay_capture_error_state(dev
);
1305 error
->display
= intel_display_capture_error_state(dev
);
1307 i915_error_capture_msg(dev
, error
, wedged
, error_msg
);
1308 DRM_INFO("%s\n", error
->error_msg
);
1310 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1311 if (dev_priv
->gpu_error
.first_error
== NULL
) {
1312 dev_priv
->gpu_error
.first_error
= error
;
1315 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1318 i915_error_state_free(&error
->ref
);
1323 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1324 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1325 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1326 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1327 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev
->primary
->index
);
1332 void i915_error_state_get(struct drm_device
*dev
,
1333 struct i915_error_state_file_priv
*error_priv
)
1335 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1337 spin_lock_irq(&dev_priv
->gpu_error
.lock
);
1338 error_priv
->error
= dev_priv
->gpu_error
.first_error
;
1339 if (error_priv
->error
)
1340 kref_get(&error_priv
->error
->ref
);
1341 spin_unlock_irq(&dev_priv
->gpu_error
.lock
);
1345 void i915_error_state_put(struct i915_error_state_file_priv
*error_priv
)
1347 if (error_priv
->error
)
1348 kref_put(&error_priv
->error
->ref
, i915_error_state_free
);
1351 void i915_destroy_error_state(struct drm_device
*dev
)
1353 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1354 struct drm_i915_error_state
*error
;
1356 spin_lock_irq(&dev_priv
->gpu_error
.lock
);
1357 error
= dev_priv
->gpu_error
.first_error
;
1358 dev_priv
->gpu_error
.first_error
= NULL
;
1359 spin_unlock_irq(&dev_priv
->gpu_error
.lock
);
1362 kref_put(&error
->ref
, i915_error_state_free
);
1365 const char *i915_cache_level_str(struct drm_i915_private
*i915
, int type
)
1368 case I915_CACHE_NONE
: return " uncached";
1369 case I915_CACHE_LLC
: return HAS_LLC(i915
) ? " LLC" : " snooped";
1370 case I915_CACHE_L3_LLC
: return " L3+LLC";
1371 case I915_CACHE_WT
: return " WT";
1376 /* NB: please notice the memset */
1377 void i915_get_extra_instdone(struct drm_device
*dev
, uint32_t *instdone
)
1379 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1380 memset(instdone
, 0, sizeof(*instdone
) * I915_NUM_INSTDONE_REG
);
1382 switch (INTEL_INFO(dev
)->gen
) {
1385 instdone
[0] = I915_READ(INSTDONE
);
1390 instdone
[0] = I915_READ(INSTDONE_I965
);
1391 instdone
[1] = I915_READ(INSTDONE1
);
1394 WARN_ONCE(1, "Unsupported platform\n");
1398 instdone
[0] = I915_READ(GEN7_INSTDONE_1
);
1399 instdone
[1] = I915_READ(GEN7_SC_INSTDONE
);
1400 instdone
[2] = I915_READ(GEN7_SAMPLER_INSTDONE
);
1401 instdone
[3] = I915_READ(GEN7_ROW_INSTDONE
);