2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <generated/utsrelease.h>
33 static const char *yesno(int v
)
35 return v
? "yes" : "no";
38 static const char *ring_str(int ring
)
41 case RCS
: return "render";
42 case VCS
: return "bsd";
43 case BCS
: return "blt";
44 case VECS
: return "vebox";
45 case VCS2
: return "bsd2";
50 static const char *pin_flag(int pinned
)
60 static const char *tiling_flag(int tiling
)
64 case I915_TILING_NONE
: return "";
65 case I915_TILING_X
: return " X";
66 case I915_TILING_Y
: return " Y";
70 static const char *dirty_flag(int dirty
)
72 return dirty
? " dirty" : "";
75 static const char *purgeable_flag(int purgeable
)
77 return purgeable
? " purgeable" : "";
80 static bool __i915_error_ok(struct drm_i915_error_state_buf
*e
)
83 if (!e
->err
&& WARN(e
->bytes
> (e
->size
- 1), "overflow")) {
88 if (e
->bytes
== e
->size
- 1 || e
->err
)
94 static bool __i915_error_seek(struct drm_i915_error_state_buf
*e
,
97 if (e
->pos
+ len
<= e
->start
) {
102 /* First vsnprintf needs to fit in its entirety for memmove */
103 if (len
>= e
->size
) {
111 static void __i915_error_advance(struct drm_i915_error_state_buf
*e
,
114 /* If this is first printf in this window, adjust it so that
115 * start position matches start of the buffer
118 if (e
->pos
< e
->start
) {
119 const size_t off
= e
->start
- e
->pos
;
121 /* Should not happen but be paranoid */
122 if (off
> len
|| e
->bytes
) {
127 memmove(e
->buf
, e
->buf
+ off
, len
- off
);
128 e
->bytes
= len
- off
;
137 static void i915_error_vprintf(struct drm_i915_error_state_buf
*e
,
138 const char *f
, va_list args
)
142 if (!__i915_error_ok(e
))
145 /* Seek the first printf which is hits start position */
146 if (e
->pos
< e
->start
) {
150 len
= vsnprintf(NULL
, 0, f
, tmp
);
153 if (!__i915_error_seek(e
, len
))
157 len
= vsnprintf(e
->buf
+ e
->bytes
, e
->size
- e
->bytes
, f
, args
);
158 if (len
>= e
->size
- e
->bytes
)
159 len
= e
->size
- e
->bytes
- 1;
161 __i915_error_advance(e
, len
);
164 static void i915_error_puts(struct drm_i915_error_state_buf
*e
,
169 if (!__i915_error_ok(e
))
174 /* Seek the first printf which is hits start position */
175 if (e
->pos
< e
->start
) {
176 if (!__i915_error_seek(e
, len
))
180 if (len
>= e
->size
- e
->bytes
)
181 len
= e
->size
- e
->bytes
- 1;
182 memcpy(e
->buf
+ e
->bytes
, str
, len
);
184 __i915_error_advance(e
, len
);
187 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
188 #define err_puts(e, s) i915_error_puts(e, s)
190 static void print_error_buffers(struct drm_i915_error_state_buf
*m
,
192 struct drm_i915_error_buffer
*err
,
195 err_printf(m
, "%s [%d]:\n", name
, count
);
198 err_printf(m
, " %08x %8u %02x %02x %x %x",
203 err
->rseqno
, err
->wseqno
);
204 err_puts(m
, pin_flag(err
->pinned
));
205 err_puts(m
, tiling_flag(err
->tiling
));
206 err_puts(m
, dirty_flag(err
->dirty
));
207 err_puts(m
, purgeable_flag(err
->purgeable
));
208 err_puts(m
, err
->userptr
? " userptr" : "");
209 err_puts(m
, err
->ring
!= -1 ? " " : "");
210 err_puts(m
, ring_str(err
->ring
));
211 err_puts(m
, i915_cache_level_str(err
->cache_level
));
214 err_printf(m
, " (name: %d)", err
->name
);
215 if (err
->fence_reg
!= I915_FENCE_REG_NONE
)
216 err_printf(m
, " (fence: %d)", err
->fence_reg
);
223 static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a
)
230 case HANGCHECK_ACTIVE
:
241 static void i915_ring_error_state(struct drm_i915_error_state_buf
*m
,
242 struct drm_device
*dev
,
243 struct drm_i915_error_ring
*ring
)
248 err_printf(m
, " HEAD: 0x%08x\n", ring
->head
);
249 err_printf(m
, " TAIL: 0x%08x\n", ring
->tail
);
250 err_printf(m
, " CTL: 0x%08x\n", ring
->ctl
);
251 err_printf(m
, " HWS: 0x%08x\n", ring
->hws
);
252 err_printf(m
, " ACTHD: 0x%08x %08x\n", (u32
)(ring
->acthd
>>32), (u32
)ring
->acthd
);
253 err_printf(m
, " IPEIR: 0x%08x\n", ring
->ipeir
);
254 err_printf(m
, " IPEHR: 0x%08x\n", ring
->ipehr
);
255 err_printf(m
, " INSTDONE: 0x%08x\n", ring
->instdone
);
256 if (INTEL_INFO(dev
)->gen
>= 4) {
257 err_printf(m
, " BBADDR: 0x%08x %08x\n", (u32
)(ring
->bbaddr
>>32), (u32
)ring
->bbaddr
);
258 err_printf(m
, " BB_STATE: 0x%08x\n", ring
->bbstate
);
259 err_printf(m
, " INSTPS: 0x%08x\n", ring
->instps
);
261 err_printf(m
, " INSTPM: 0x%08x\n", ring
->instpm
);
262 err_printf(m
, " FADDR: 0x%08x %08x\n", upper_32_bits(ring
->faddr
),
263 lower_32_bits(ring
->faddr
));
264 if (INTEL_INFO(dev
)->gen
>= 6) {
265 err_printf(m
, " RC PSMI: 0x%08x\n", ring
->rc_psmi
);
266 err_printf(m
, " FAULT_REG: 0x%08x\n", ring
->fault_reg
);
267 err_printf(m
, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
268 ring
->semaphore_mboxes
[0],
269 ring
->semaphore_seqno
[0]);
270 err_printf(m
, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
271 ring
->semaphore_mboxes
[1],
272 ring
->semaphore_seqno
[1]);
273 if (HAS_VEBOX(dev
)) {
274 err_printf(m
, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
275 ring
->semaphore_mboxes
[2],
276 ring
->semaphore_seqno
[2]);
279 if (USES_PPGTT(dev
)) {
280 err_printf(m
, " GFX_MODE: 0x%08x\n", ring
->vm_info
.gfx_mode
);
282 if (INTEL_INFO(dev
)->gen
>= 8) {
284 for (i
= 0; i
< 4; i
++)
285 err_printf(m
, " PDP%d: 0x%016llx\n",
286 i
, ring
->vm_info
.pdp
[i
]);
288 err_printf(m
, " PP_DIR_BASE: 0x%08x\n",
289 ring
->vm_info
.pp_dir_base
);
292 err_printf(m
, " seqno: 0x%08x\n", ring
->seqno
);
293 err_printf(m
, " waiting: %s\n", yesno(ring
->waiting
));
294 err_printf(m
, " ring->head: 0x%08x\n", ring
->cpu_ring_head
);
295 err_printf(m
, " ring->tail: 0x%08x\n", ring
->cpu_ring_tail
);
296 err_printf(m
, " hangcheck: %s [%d]\n",
297 hangcheck_action_to_str(ring
->hangcheck_action
),
298 ring
->hangcheck_score
);
301 void i915_error_printf(struct drm_i915_error_state_buf
*e
, const char *f
, ...)
306 i915_error_vprintf(e
, f
, args
);
310 static void print_error_obj(struct drm_i915_error_state_buf
*m
,
311 struct drm_i915_error_object
*obj
)
313 int page
, offset
, elt
;
315 for (page
= offset
= 0; page
< obj
->page_count
; page
++) {
316 for (elt
= 0; elt
< PAGE_SIZE
/4; elt
++) {
317 err_printf(m
, "%08x : %08x\n", offset
,
318 obj
->pages
[page
][elt
]);
324 int i915_error_state_to_str(struct drm_i915_error_state_buf
*m
,
325 const struct i915_error_state_file_priv
*error_priv
)
327 struct drm_device
*dev
= error_priv
->dev
;
328 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
329 struct drm_i915_error_state
*error
= error_priv
->error
;
330 int i
, j
, offset
, elt
;
331 int max_hangcheck_score
;
334 err_printf(m
, "no error state collected\n");
338 err_printf(m
, "%s\n", error
->error_msg
);
339 err_printf(m
, "Time: %ld s %ld us\n", error
->time
.tv_sec
,
340 error
->time
.tv_usec
);
341 err_printf(m
, "Kernel: " UTS_RELEASE
"\n");
342 max_hangcheck_score
= 0;
343 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
344 if (error
->ring
[i
].hangcheck_score
> max_hangcheck_score
)
345 max_hangcheck_score
= error
->ring
[i
].hangcheck_score
;
347 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
348 if (error
->ring
[i
].hangcheck_score
== max_hangcheck_score
&&
349 error
->ring
[i
].pid
!= -1) {
350 err_printf(m
, "Active process (on ring %s): %s [%d]\n",
356 err_printf(m
, "Reset count: %u\n", error
->reset_count
);
357 err_printf(m
, "Suspend count: %u\n", error
->suspend_count
);
358 err_printf(m
, "PCI ID: 0x%04x\n", dev
->pdev
->device
);
359 err_printf(m
, "EIR: 0x%08x\n", error
->eir
);
360 err_printf(m
, "IER: 0x%08x\n", error
->ier
);
361 err_printf(m
, "PGTBL_ER: 0x%08x\n", error
->pgtbl_er
);
362 err_printf(m
, "FORCEWAKE: 0x%08x\n", error
->forcewake
);
363 err_printf(m
, "DERRMR: 0x%08x\n", error
->derrmr
);
364 err_printf(m
, "CCID: 0x%08x\n", error
->ccid
);
365 err_printf(m
, "Missed interrupts: 0x%08lx\n", dev_priv
->gpu_error
.missed_irq_rings
);
367 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
368 err_printf(m
, " fence[%d] = %08llx\n", i
, error
->fence
[i
]);
370 for (i
= 0; i
< ARRAY_SIZE(error
->extra_instdone
); i
++)
371 err_printf(m
, " INSTDONE_%d: 0x%08x\n", i
,
372 error
->extra_instdone
[i
]);
374 if (INTEL_INFO(dev
)->gen
>= 6) {
375 err_printf(m
, "ERROR: 0x%08x\n", error
->error
);
376 err_printf(m
, "DONE_REG: 0x%08x\n", error
->done_reg
);
379 if (INTEL_INFO(dev
)->gen
== 7)
380 err_printf(m
, "ERR_INT: 0x%08x\n", error
->err_int
);
382 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
383 err_printf(m
, "%s command stream:\n", ring_str(i
));
384 i915_ring_error_state(m
, dev
, &error
->ring
[i
]);
387 if (error
->active_bo
)
388 print_error_buffers(m
, "Active",
390 error
->active_bo_count
[0]);
392 if (error
->pinned_bo
)
393 print_error_buffers(m
, "Pinned",
395 error
->pinned_bo_count
[0]);
397 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
398 struct drm_i915_error_object
*obj
;
400 obj
= error
->ring
[i
].batchbuffer
;
402 err_puts(m
, dev_priv
->ring
[i
].name
);
403 if (error
->ring
[i
].pid
!= -1)
404 err_printf(m
, " (submitted by %s [%d])",
407 err_printf(m
, " --- gtt_offset = 0x%08x\n",
409 print_error_obj(m
, obj
);
412 obj
= error
->ring
[i
].wa_batchbuffer
;
414 err_printf(m
, "%s (w/a) --- gtt_offset = 0x%08x\n",
415 dev_priv
->ring
[i
].name
, obj
->gtt_offset
);
416 print_error_obj(m
, obj
);
419 if (error
->ring
[i
].num_requests
) {
420 err_printf(m
, "%s --- %d requests\n",
421 dev_priv
->ring
[i
].name
,
422 error
->ring
[i
].num_requests
);
423 for (j
= 0; j
< error
->ring
[i
].num_requests
; j
++) {
424 err_printf(m
, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
425 error
->ring
[i
].requests
[j
].seqno
,
426 error
->ring
[i
].requests
[j
].jiffies
,
427 error
->ring
[i
].requests
[j
].tail
);
431 if ((obj
= error
->ring
[i
].ringbuffer
)) {
432 err_printf(m
, "%s --- ringbuffer = 0x%08x\n",
433 dev_priv
->ring
[i
].name
,
435 print_error_obj(m
, obj
);
438 if ((obj
= error
->ring
[i
].hws_page
)) {
439 err_printf(m
, "%s --- HW Status = 0x%08x\n",
440 dev_priv
->ring
[i
].name
,
443 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
444 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
447 obj
->pages
[0][elt
+1],
448 obj
->pages
[0][elt
+2],
449 obj
->pages
[0][elt
+3]);
454 if ((obj
= error
->ring
[i
].ctx
)) {
455 err_printf(m
, "%s --- HW Context = 0x%08x\n",
456 dev_priv
->ring
[i
].name
,
458 print_error_obj(m
, obj
);
463 intel_overlay_print_error_state(m
, error
->overlay
);
466 intel_display_print_error_state(m
, dev
, error
->display
);
469 if (m
->bytes
== 0 && m
->err
)
475 int i915_error_state_buf_init(struct drm_i915_error_state_buf
*ebuf
,
476 size_t count
, loff_t pos
)
478 memset(ebuf
, 0, sizeof(*ebuf
));
480 /* We need to have enough room to store any i915_error_state printf
481 * so that we can move it to start position.
483 ebuf
->size
= count
+ 1 > PAGE_SIZE
? count
+ 1 : PAGE_SIZE
;
484 ebuf
->buf
= kmalloc(ebuf
->size
,
485 GFP_TEMPORARY
| __GFP_NORETRY
| __GFP_NOWARN
);
487 if (ebuf
->buf
== NULL
) {
488 ebuf
->size
= PAGE_SIZE
;
489 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
492 if (ebuf
->buf
== NULL
) {
494 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
497 if (ebuf
->buf
== NULL
)
505 static void i915_error_object_free(struct drm_i915_error_object
*obj
)
512 for (page
= 0; page
< obj
->page_count
; page
++)
513 kfree(obj
->pages
[page
]);
518 static void i915_error_state_free(struct kref
*error_ref
)
520 struct drm_i915_error_state
*error
= container_of(error_ref
,
521 typeof(*error
), ref
);
524 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
525 i915_error_object_free(error
->ring
[i
].batchbuffer
);
526 i915_error_object_free(error
->ring
[i
].ringbuffer
);
527 i915_error_object_free(error
->ring
[i
].hws_page
);
528 i915_error_object_free(error
->ring
[i
].ctx
);
529 kfree(error
->ring
[i
].requests
);
532 kfree(error
->active_bo
);
533 kfree(error
->overlay
);
534 kfree(error
->display
);
538 static struct drm_i915_error_object
*
539 i915_error_object_create_sized(struct drm_i915_private
*dev_priv
,
540 struct drm_i915_gem_object
*src
,
541 struct i915_address_space
*vm
,
544 struct drm_i915_error_object
*dst
;
548 if (src
== NULL
|| src
->pages
== NULL
)
551 dst
= kmalloc(sizeof(*dst
) + num_pages
* sizeof(u32
*), GFP_ATOMIC
);
555 reloc_offset
= dst
->gtt_offset
= i915_gem_obj_offset(src
, vm
);
556 for (i
= 0; i
< num_pages
; i
++) {
560 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
564 local_irq_save(flags
);
565 if (src
->cache_level
== I915_CACHE_NONE
&&
566 reloc_offset
< dev_priv
->gtt
.mappable_end
&&
567 src
->has_global_gtt_mapping
&&
571 /* Simply ignore tiling or any overlapping fence.
572 * It's part of the error state, and this hopefully
573 * captures what the GPU read.
576 s
= io_mapping_map_atomic_wc(dev_priv
->gtt
.mappable
,
578 memcpy_fromio(d
, s
, PAGE_SIZE
);
579 io_mapping_unmap_atomic(s
);
580 } else if (src
->stolen
) {
581 unsigned long offset
;
583 offset
= dev_priv
->mm
.stolen_base
;
584 offset
+= src
->stolen
->start
;
585 offset
+= i
<< PAGE_SHIFT
;
587 memcpy_fromio(d
, (void __iomem
*) offset
, PAGE_SIZE
);
592 page
= i915_gem_object_get_page(src
, i
);
594 drm_clflush_pages(&page
, 1);
596 s
= kmap_atomic(page
);
597 memcpy(d
, s
, PAGE_SIZE
);
600 drm_clflush_pages(&page
, 1);
602 local_irq_restore(flags
);
606 reloc_offset
+= PAGE_SIZE
;
608 dst
->page_count
= num_pages
;
614 kfree(dst
->pages
[i
]);
618 #define i915_error_object_create(dev_priv, src, vm) \
619 i915_error_object_create_sized((dev_priv), (src), (vm), \
620 (src)->base.size>>PAGE_SHIFT)
622 #define i915_error_ggtt_object_create(dev_priv, src) \
623 i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
624 (src)->base.size>>PAGE_SHIFT)
626 static void capture_bo(struct drm_i915_error_buffer
*err
,
627 struct drm_i915_gem_object
*obj
)
629 err
->size
= obj
->base
.size
;
630 err
->name
= obj
->base
.name
;
631 err
->rseqno
= obj
->last_read_seqno
;
632 err
->wseqno
= obj
->last_write_seqno
;
633 err
->gtt_offset
= i915_gem_obj_ggtt_offset(obj
);
634 err
->read_domains
= obj
->base
.read_domains
;
635 err
->write_domain
= obj
->base
.write_domain
;
636 err
->fence_reg
= obj
->fence_reg
;
638 if (i915_gem_obj_is_pinned(obj
))
640 if (obj
->user_pin_count
> 0)
642 err
->tiling
= obj
->tiling_mode
;
643 err
->dirty
= obj
->dirty
;
644 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
645 err
->userptr
= obj
->userptr
.mm
!= NULL
;
646 err
->ring
= obj
->ring
? obj
->ring
->id
: -1;
647 err
->cache_level
= obj
->cache_level
;
650 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
651 int count
, struct list_head
*head
)
653 struct i915_vma
*vma
;
656 list_for_each_entry(vma
, head
, mm_list
) {
657 capture_bo(err
++, vma
->obj
);
665 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
666 int count
, struct list_head
*head
)
668 struct drm_i915_gem_object
*obj
;
671 list_for_each_entry(obj
, head
, global_list
) {
672 if (!i915_gem_obj_is_pinned(obj
))
675 capture_bo(err
++, obj
);
683 /* Generate a semi-unique error code. The code is not meant to have meaning, The
684 * code's only purpose is to try to prevent false duplicated bug reports by
685 * grossly estimating a GPU error state.
687 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
688 * the hang if we could strip the GTT offset information from it.
690 * It's only a small step better than a random number in its current form.
692 static uint32_t i915_error_generate_code(struct drm_i915_private
*dev_priv
,
693 struct drm_i915_error_state
*error
,
696 uint32_t error_code
= 0;
699 /* IPEHR would be an ideal way to detect errors, as it's the gross
700 * measure of "the command that hung." However, has some very common
701 * synchronization commands which almost always appear in the case
702 * strictly a client bug. Use instdone to differentiate those some.
704 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
705 if (error
->ring
[i
].hangcheck_action
== HANGCHECK_HUNG
) {
709 return error
->ring
[i
].ipehr
^ error
->ring
[i
].instdone
;
716 static void i915_gem_record_fences(struct drm_device
*dev
,
717 struct drm_i915_error_state
*error
)
719 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
723 switch (INTEL_INFO(dev
)->gen
) {
727 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
728 error
->fence
[i
] = I915_READ64(FENCE_REG_SANDYBRIDGE_0
+ (i
* 8));
732 for (i
= 0; i
< 16; i
++)
733 error
->fence
[i
] = I915_READ64(FENCE_REG_965_0
+ (i
* 8));
736 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
737 for (i
= 0; i
< 8; i
++)
738 error
->fence
[i
+8] = I915_READ(FENCE_REG_945_8
+ (i
* 4));
740 for (i
= 0; i
< 8; i
++)
741 error
->fence
[i
] = I915_READ(FENCE_REG_830_0
+ (i
* 4));
750 static void gen6_record_semaphore_state(struct drm_i915_private
*dev_priv
,
751 struct intel_engine_cs
*ring
,
752 struct drm_i915_error_ring
*ering
)
754 ering
->semaphore_mboxes
[0] = I915_READ(RING_SYNC_0(ring
->mmio_base
));
755 ering
->semaphore_mboxes
[1] = I915_READ(RING_SYNC_1(ring
->mmio_base
));
756 ering
->semaphore_seqno
[0] = ring
->semaphore
.sync_seqno
[0];
757 ering
->semaphore_seqno
[1] = ring
->semaphore
.sync_seqno
[1];
759 if (HAS_VEBOX(dev_priv
->dev
)) {
760 ering
->semaphore_mboxes
[2] =
761 I915_READ(RING_SYNC_2(ring
->mmio_base
));
762 ering
->semaphore_seqno
[2] = ring
->semaphore
.sync_seqno
[2];
766 static void i915_record_ring_state(struct drm_device
*dev
,
767 struct intel_engine_cs
*ring
,
768 struct drm_i915_error_ring
*ering
)
770 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
772 if (INTEL_INFO(dev
)->gen
>= 6) {
773 ering
->rc_psmi
= I915_READ(ring
->mmio_base
+ 0x50);
774 ering
->fault_reg
= I915_READ(RING_FAULT_REG(ring
));
775 gen6_record_semaphore_state(dev_priv
, ring
, ering
);
778 if (INTEL_INFO(dev
)->gen
>= 4) {
779 ering
->faddr
= I915_READ(RING_DMA_FADD(ring
->mmio_base
));
780 ering
->ipeir
= I915_READ(RING_IPEIR(ring
->mmio_base
));
781 ering
->ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
782 ering
->instdone
= I915_READ(RING_INSTDONE(ring
->mmio_base
));
783 ering
->instps
= I915_READ(RING_INSTPS(ring
->mmio_base
));
784 ering
->bbaddr
= I915_READ(RING_BBADDR(ring
->mmio_base
));
785 if (INTEL_INFO(dev
)->gen
>= 8) {
786 ering
->faddr
|= (u64
) I915_READ(RING_DMA_FADD_UDW(ring
->mmio_base
)) << 32;
787 ering
->bbaddr
|= (u64
) I915_READ(RING_BBADDR_UDW(ring
->mmio_base
)) << 32;
789 ering
->bbstate
= I915_READ(RING_BBSTATE(ring
->mmio_base
));
791 ering
->faddr
= I915_READ(DMA_FADD_I8XX
);
792 ering
->ipeir
= I915_READ(IPEIR
);
793 ering
->ipehr
= I915_READ(IPEHR
);
794 ering
->instdone
= I915_READ(INSTDONE
);
797 ering
->waiting
= waitqueue_active(&ring
->irq_queue
);
798 ering
->instpm
= I915_READ(RING_INSTPM(ring
->mmio_base
));
799 ering
->seqno
= ring
->get_seqno(ring
, false);
800 ering
->acthd
= intel_ring_get_active_head(ring
);
801 ering
->head
= I915_READ_HEAD(ring
);
802 ering
->tail
= I915_READ_TAIL(ring
);
803 ering
->ctl
= I915_READ_CTL(ring
);
805 if (I915_NEED_GFX_HWS(dev
)) {
812 mmio
= RENDER_HWS_PGA_GEN7
;
815 mmio
= BLT_HWS_PGA_GEN7
;
818 mmio
= BSD_HWS_PGA_GEN7
;
821 mmio
= VEBOX_HWS_PGA_GEN7
;
824 } else if (IS_GEN6(ring
->dev
)) {
825 mmio
= RING_HWS_PGA_GEN6(ring
->mmio_base
);
827 /* XXX: gen8 returns to sanity */
828 mmio
= RING_HWS_PGA(ring
->mmio_base
);
831 ering
->hws
= I915_READ(mmio
);
834 ering
->cpu_ring_head
= ring
->buffer
->head
;
835 ering
->cpu_ring_tail
= ring
->buffer
->tail
;
837 ering
->hangcheck_score
= ring
->hangcheck
.score
;
838 ering
->hangcheck_action
= ring
->hangcheck
.action
;
840 if (USES_PPGTT(dev
)) {
843 ering
->vm_info
.gfx_mode
= I915_READ(RING_MODE_GEN7(ring
));
845 switch (INTEL_INFO(dev
)->gen
) {
847 for (i
= 0; i
< 4; i
++) {
848 ering
->vm_info
.pdp
[i
] =
849 I915_READ(GEN8_RING_PDP_UDW(ring
, i
));
850 ering
->vm_info
.pdp
[i
] <<= 32;
851 ering
->vm_info
.pdp
[i
] |=
852 I915_READ(GEN8_RING_PDP_LDW(ring
, i
));
856 ering
->vm_info
.pp_dir_base
=
857 I915_READ(RING_PP_DIR_BASE(ring
));
860 ering
->vm_info
.pp_dir_base
=
861 I915_READ(RING_PP_DIR_BASE_READ(ring
));
868 static void i915_gem_record_active_context(struct intel_engine_cs
*ring
,
869 struct drm_i915_error_state
*error
,
870 struct drm_i915_error_ring
*ering
)
872 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
873 struct drm_i915_gem_object
*obj
;
875 /* Currently render ring is the only HW context user */
876 if (ring
->id
!= RCS
|| !error
->ccid
)
879 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
880 if ((error
->ccid
& PAGE_MASK
) == i915_gem_obj_ggtt_offset(obj
)) {
881 ering
->ctx
= i915_error_ggtt_object_create(dev_priv
, obj
);
887 static void i915_gem_record_rings(struct drm_device
*dev
,
888 struct drm_i915_error_state
*error
)
890 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
891 struct drm_i915_gem_request
*request
;
894 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
895 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
897 error
->ring
[i
].pid
= -1;
899 if (ring
->dev
== NULL
)
902 error
->ring
[i
].valid
= true;
904 i915_record_ring_state(dev
, ring
, &error
->ring
[i
]);
906 request
= i915_gem_find_active_request(ring
);
908 /* We need to copy these to an anonymous buffer
909 * as the simplest method to avoid being overwritten
912 error
->ring
[i
].batchbuffer
=
913 i915_error_object_create(dev_priv
,
917 &dev_priv
->gtt
.base
);
919 if (HAS_BROKEN_CS_TLB(dev_priv
->dev
) &&
921 error
->ring
[i
].wa_batchbuffer
=
922 i915_error_ggtt_object_create(dev_priv
,
925 if (request
->file_priv
) {
926 struct task_struct
*task
;
929 task
= pid_task(request
->file_priv
->file
->pid
,
932 strcpy(error
->ring
[i
].comm
, task
->comm
);
933 error
->ring
[i
].pid
= task
->pid
;
939 error
->ring
[i
].ringbuffer
=
940 i915_error_ggtt_object_create(dev_priv
, ring
->buffer
->obj
);
942 if (ring
->status_page
.obj
)
943 error
->ring
[i
].hws_page
=
944 i915_error_ggtt_object_create(dev_priv
, ring
->status_page
.obj
);
946 i915_gem_record_active_context(ring
, error
, &error
->ring
[i
]);
949 list_for_each_entry(request
, &ring
->request_list
, list
)
952 error
->ring
[i
].num_requests
= count
;
953 error
->ring
[i
].requests
=
954 kcalloc(count
, sizeof(*error
->ring
[i
].requests
),
956 if (error
->ring
[i
].requests
== NULL
) {
957 error
->ring
[i
].num_requests
= 0;
962 list_for_each_entry(request
, &ring
->request_list
, list
) {
963 struct drm_i915_error_request
*erq
;
965 erq
= &error
->ring
[i
].requests
[count
++];
966 erq
->seqno
= request
->seqno
;
967 erq
->jiffies
= request
->emitted_jiffies
;
968 erq
->tail
= request
->tail
;
973 /* FIXME: Since pin count/bound list is global, we duplicate what we capture per
976 static void i915_gem_capture_vm(struct drm_i915_private
*dev_priv
,
977 struct drm_i915_error_state
*error
,
978 struct i915_address_space
*vm
,
981 struct drm_i915_error_buffer
*active_bo
= NULL
, *pinned_bo
= NULL
;
982 struct drm_i915_gem_object
*obj
;
983 struct i915_vma
*vma
;
987 list_for_each_entry(vma
, &vm
->active_list
, mm_list
)
989 error
->active_bo_count
[ndx
] = i
;
990 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
)
991 if (i915_gem_obj_is_pinned(obj
))
993 error
->pinned_bo_count
[ndx
] = i
- error
->active_bo_count
[ndx
];
996 active_bo
= kcalloc(i
, sizeof(*active_bo
), GFP_ATOMIC
);
998 pinned_bo
= active_bo
+ error
->active_bo_count
[ndx
];
1002 error
->active_bo_count
[ndx
] =
1003 capture_active_bo(active_bo
,
1004 error
->active_bo_count
[ndx
],
1008 error
->pinned_bo_count
[ndx
] =
1009 capture_pinned_bo(pinned_bo
,
1010 error
->pinned_bo_count
[ndx
],
1011 &dev_priv
->mm
.bound_list
);
1012 error
->active_bo
[ndx
] = active_bo
;
1013 error
->pinned_bo
[ndx
] = pinned_bo
;
1016 static void i915_gem_capture_buffers(struct drm_i915_private
*dev_priv
,
1017 struct drm_i915_error_state
*error
)
1019 struct i915_address_space
*vm
;
1022 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1025 error
->active_bo
= kcalloc(cnt
, sizeof(*error
->active_bo
), GFP_ATOMIC
);
1026 error
->pinned_bo
= kcalloc(cnt
, sizeof(*error
->pinned_bo
), GFP_ATOMIC
);
1027 error
->active_bo_count
= kcalloc(cnt
, sizeof(*error
->active_bo_count
),
1029 error
->pinned_bo_count
= kcalloc(cnt
, sizeof(*error
->pinned_bo_count
),
1032 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1033 i915_gem_capture_vm(dev_priv
, error
, vm
, i
++);
1036 /* Capture all registers which don't fit into another category. */
1037 static void i915_capture_reg_state(struct drm_i915_private
*dev_priv
,
1038 struct drm_i915_error_state
*error
)
1040 struct drm_device
*dev
= dev_priv
->dev
;
1042 /* General organization
1043 * 1. Registers specific to a single generation
1044 * 2. Registers which belong to multiple generations
1045 * 3. Feature specific registers.
1046 * 4. Everything else
1047 * Please try to follow the order.
1050 /* 1: Registers specific to a single generation */
1051 if (IS_VALLEYVIEW(dev
)) {
1052 error
->ier
= I915_READ(GTIER
) | I915_READ(VLV_IER
);
1053 error
->forcewake
= I915_READ(FORCEWAKE_VLV
);
1057 error
->err_int
= I915_READ(GEN7_ERR_INT
);
1060 error
->forcewake
= I915_READ(FORCEWAKE
);
1061 error
->gab_ctl
= I915_READ(GAB_CTL
);
1062 error
->gfx_mode
= I915_READ(GFX_MODE
);
1065 /* 2: Registers which belong to multiple generations */
1066 if (INTEL_INFO(dev
)->gen
>= 7)
1067 error
->forcewake
= I915_READ(FORCEWAKE_MT
);
1069 if (INTEL_INFO(dev
)->gen
>= 6) {
1070 error
->derrmr
= I915_READ(DERRMR
);
1071 error
->error
= I915_READ(ERROR_GEN6
);
1072 error
->done_reg
= I915_READ(DONE_REG
);
1075 /* 3: Feature specific registers */
1076 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1077 error
->gam_ecochk
= I915_READ(GAM_ECOCHK
);
1078 error
->gac_eco
= I915_READ(GAC_ECO_BITS
);
1081 /* 4: Everything else */
1082 if (HAS_HW_CONTEXTS(dev
))
1083 error
->ccid
= I915_READ(CCID
);
1085 if (HAS_PCH_SPLIT(dev
))
1086 error
->ier
= I915_READ(DEIER
) | I915_READ(GTIER
);
1089 error
->ier
= I915_READ16(IER
);
1091 error
->ier
= I915_READ(IER
);
1094 /* 4: Everything else */
1095 error
->eir
= I915_READ(EIR
);
1096 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1098 i915_get_extra_instdone(dev
, error
->extra_instdone
);
1101 static void i915_error_capture_msg(struct drm_device
*dev
,
1102 struct drm_i915_error_state
*error
,
1104 const char *error_msg
)
1106 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1108 int ring_id
= -1, len
;
1110 ecode
= i915_error_generate_code(dev_priv
, error
, &ring_id
);
1112 len
= scnprintf(error
->error_msg
, sizeof(error
->error_msg
),
1113 "GPU HANG: ecode %d:0x%08x", ring_id
, ecode
);
1115 if (ring_id
!= -1 && error
->ring
[ring_id
].pid
!= -1)
1116 len
+= scnprintf(error
->error_msg
+ len
,
1117 sizeof(error
->error_msg
) - len
,
1119 error
->ring
[ring_id
].comm
,
1120 error
->ring
[ring_id
].pid
);
1122 scnprintf(error
->error_msg
+ len
, sizeof(error
->error_msg
) - len
,
1123 ", reason: %s, action: %s",
1125 wedged
? "reset" : "continue");
1128 static void i915_capture_gen_state(struct drm_i915_private
*dev_priv
,
1129 struct drm_i915_error_state
*error
)
1131 error
->reset_count
= i915_reset_count(&dev_priv
->gpu_error
);
1132 error
->suspend_count
= dev_priv
->suspend_count
;
1136 * i915_capture_error_state - capture an error record for later analysis
1139 * Should be called when an error is detected (either a hang or an error
1140 * interrupt) to capture error state from the time of the error. Fills
1141 * out a structure which becomes available in debugfs for user level tools
1144 void i915_capture_error_state(struct drm_device
*dev
, bool wedged
,
1145 const char *error_msg
)
1148 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1149 struct drm_i915_error_state
*error
;
1150 unsigned long flags
;
1152 /* Account for pipe specific data like PIPE*STAT */
1153 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1155 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1159 kref_init(&error
->ref
);
1161 i915_capture_gen_state(dev_priv
, error
);
1162 i915_capture_reg_state(dev_priv
, error
);
1163 i915_gem_capture_buffers(dev_priv
, error
);
1164 i915_gem_record_fences(dev
, error
);
1165 i915_gem_record_rings(dev
, error
);
1167 do_gettimeofday(&error
->time
);
1169 error
->overlay
= intel_overlay_capture_error_state(dev
);
1170 error
->display
= intel_display_capture_error_state(dev
);
1172 i915_error_capture_msg(dev
, error
, wedged
, error_msg
);
1173 DRM_INFO("%s\n", error
->error_msg
);
1175 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1176 if (dev_priv
->gpu_error
.first_error
== NULL
) {
1177 dev_priv
->gpu_error
.first_error
= error
;
1180 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1183 i915_error_state_free(&error
->ref
);
1188 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1189 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1190 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1191 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1192 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev
->primary
->index
);
1197 void i915_error_state_get(struct drm_device
*dev
,
1198 struct i915_error_state_file_priv
*error_priv
)
1200 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1201 unsigned long flags
;
1203 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1204 error_priv
->error
= dev_priv
->gpu_error
.first_error
;
1205 if (error_priv
->error
)
1206 kref_get(&error_priv
->error
->ref
);
1207 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1211 void i915_error_state_put(struct i915_error_state_file_priv
*error_priv
)
1213 if (error_priv
->error
)
1214 kref_put(&error_priv
->error
->ref
, i915_error_state_free
);
1217 void i915_destroy_error_state(struct drm_device
*dev
)
1219 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1220 struct drm_i915_error_state
*error
;
1221 unsigned long flags
;
1223 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1224 error
= dev_priv
->gpu_error
.first_error
;
1225 dev_priv
->gpu_error
.first_error
= NULL
;
1226 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1229 kref_put(&error
->ref
, i915_error_state_free
);
1232 const char *i915_cache_level_str(int type
)
1235 case I915_CACHE_NONE
: return " uncached";
1236 case I915_CACHE_LLC
: return " snooped or LLC";
1237 case I915_CACHE_L3_LLC
: return " L3+LLC";
1238 case I915_CACHE_WT
: return " WT";
1243 /* NB: please notice the memset */
1244 void i915_get_extra_instdone(struct drm_device
*dev
, uint32_t *instdone
)
1246 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1247 memset(instdone
, 0, sizeof(*instdone
) * I915_NUM_INSTDONE_REG
);
1249 switch (INTEL_INFO(dev
)->gen
) {
1252 instdone
[0] = I915_READ(INSTDONE
);
1257 instdone
[0] = I915_READ(INSTDONE_I965
);
1258 instdone
[1] = I915_READ(INSTDONE1
);
1261 WARN_ONCE(1, "Unsupported platform\n");
1264 instdone
[0] = I915_READ(GEN7_INSTDONE_1
);
1265 instdone
[1] = I915_READ(GEN7_SC_INSTDONE
);
1266 instdone
[2] = I915_READ(GEN7_SAMPLER_INSTDONE
);
1267 instdone
[3] = I915_READ(GEN7_ROW_INSTDONE
);