2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <generated/utsrelease.h>
33 static const char *yesno(int v
)
35 return v
? "yes" : "no";
38 static const char *ring_str(int ring
)
41 case RCS
: return "render";
42 case VCS
: return "bsd";
43 case BCS
: return "blt";
44 case VECS
: return "vebox";
45 case VCS2
: return "bsd2";
50 static const char *pin_flag(int pinned
)
60 static const char *tiling_flag(int tiling
)
64 case I915_TILING_NONE
: return "";
65 case I915_TILING_X
: return " X";
66 case I915_TILING_Y
: return " Y";
70 static const char *dirty_flag(int dirty
)
72 return dirty
? " dirty" : "";
75 static const char *purgeable_flag(int purgeable
)
77 return purgeable
? " purgeable" : "";
80 static bool __i915_error_ok(struct drm_i915_error_state_buf
*e
)
83 if (!e
->err
&& WARN(e
->bytes
> (e
->size
- 1), "overflow")) {
88 if (e
->bytes
== e
->size
- 1 || e
->err
)
94 static bool __i915_error_seek(struct drm_i915_error_state_buf
*e
,
97 if (e
->pos
+ len
<= e
->start
) {
102 /* First vsnprintf needs to fit in its entirety for memmove */
103 if (len
>= e
->size
) {
111 static void __i915_error_advance(struct drm_i915_error_state_buf
*e
,
114 /* If this is first printf in this window, adjust it so that
115 * start position matches start of the buffer
118 if (e
->pos
< e
->start
) {
119 const size_t off
= e
->start
- e
->pos
;
121 /* Should not happen but be paranoid */
122 if (off
> len
|| e
->bytes
) {
127 memmove(e
->buf
, e
->buf
+ off
, len
- off
);
128 e
->bytes
= len
- off
;
137 static void i915_error_vprintf(struct drm_i915_error_state_buf
*e
,
138 const char *f
, va_list args
)
142 if (!__i915_error_ok(e
))
145 /* Seek the first printf which is hits start position */
146 if (e
->pos
< e
->start
) {
150 len
= vsnprintf(NULL
, 0, f
, tmp
);
153 if (!__i915_error_seek(e
, len
))
157 len
= vsnprintf(e
->buf
+ e
->bytes
, e
->size
- e
->bytes
, f
, args
);
158 if (len
>= e
->size
- e
->bytes
)
159 len
= e
->size
- e
->bytes
- 1;
161 __i915_error_advance(e
, len
);
164 static void i915_error_puts(struct drm_i915_error_state_buf
*e
,
169 if (!__i915_error_ok(e
))
174 /* Seek the first printf which is hits start position */
175 if (e
->pos
< e
->start
) {
176 if (!__i915_error_seek(e
, len
))
180 if (len
>= e
->size
- e
->bytes
)
181 len
= e
->size
- e
->bytes
- 1;
182 memcpy(e
->buf
+ e
->bytes
, str
, len
);
184 __i915_error_advance(e
, len
);
187 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
188 #define err_puts(e, s) i915_error_puts(e, s)
190 static void print_error_buffers(struct drm_i915_error_state_buf
*m
,
192 struct drm_i915_error_buffer
*err
,
195 err_printf(m
, "%s [%d]:\n", name
, count
);
198 err_printf(m
, " %08x %8u %02x %02x %x %x",
203 err
->rseqno
, err
->wseqno
);
204 err_puts(m
, pin_flag(err
->pinned
));
205 err_puts(m
, tiling_flag(err
->tiling
));
206 err_puts(m
, dirty_flag(err
->dirty
));
207 err_puts(m
, purgeable_flag(err
->purgeable
));
208 err_puts(m
, err
->userptr
? " userptr" : "");
209 err_puts(m
, err
->ring
!= -1 ? " " : "");
210 err_puts(m
, ring_str(err
->ring
));
211 err_puts(m
, i915_cache_level_str(err
->cache_level
));
214 err_printf(m
, " (name: %d)", err
->name
);
215 if (err
->fence_reg
!= I915_FENCE_REG_NONE
)
216 err_printf(m
, " (fence: %d)", err
->fence_reg
);
223 static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a
)
230 case HANGCHECK_ACTIVE
:
241 static void i915_ring_error_state(struct drm_i915_error_state_buf
*m
,
242 struct drm_device
*dev
,
243 struct drm_i915_error_ring
*ring
)
248 err_printf(m
, " HEAD: 0x%08x\n", ring
->head
);
249 err_printf(m
, " TAIL: 0x%08x\n", ring
->tail
);
250 err_printf(m
, " CTL: 0x%08x\n", ring
->ctl
);
251 err_printf(m
, " HWS: 0x%08x\n", ring
->hws
);
252 err_printf(m
, " ACTHD: 0x%08x %08x\n", (u32
)(ring
->acthd
>>32), (u32
)ring
->acthd
);
253 err_printf(m
, " IPEIR: 0x%08x\n", ring
->ipeir
);
254 err_printf(m
, " IPEHR: 0x%08x\n", ring
->ipehr
);
255 err_printf(m
, " INSTDONE: 0x%08x\n", ring
->instdone
);
256 if (INTEL_INFO(dev
)->gen
>= 4) {
257 err_printf(m
, " BBADDR: 0x%08x %08x\n", (u32
)(ring
->bbaddr
>>32), (u32
)ring
->bbaddr
);
258 err_printf(m
, " BB_STATE: 0x%08x\n", ring
->bbstate
);
259 err_printf(m
, " INSTPS: 0x%08x\n", ring
->instps
);
261 err_printf(m
, " INSTPM: 0x%08x\n", ring
->instpm
);
262 err_printf(m
, " FADDR: 0x%08x %08x\n", upper_32_bits(ring
->faddr
),
263 lower_32_bits(ring
->faddr
));
264 if (INTEL_INFO(dev
)->gen
>= 6) {
265 err_printf(m
, " RC PSMI: 0x%08x\n", ring
->rc_psmi
);
266 err_printf(m
, " FAULT_REG: 0x%08x\n", ring
->fault_reg
);
267 err_printf(m
, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
268 ring
->semaphore_mboxes
[0],
269 ring
->semaphore_seqno
[0]);
270 err_printf(m
, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
271 ring
->semaphore_mboxes
[1],
272 ring
->semaphore_seqno
[1]);
273 if (HAS_VEBOX(dev
)) {
274 err_printf(m
, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
275 ring
->semaphore_mboxes
[2],
276 ring
->semaphore_seqno
[2]);
279 if (USES_PPGTT(dev
)) {
280 err_printf(m
, " GFX_MODE: 0x%08x\n", ring
->vm_info
.gfx_mode
);
282 if (INTEL_INFO(dev
)->gen
>= 8) {
284 for (i
= 0; i
< 4; i
++)
285 err_printf(m
, " PDP%d: 0x%016llx\n",
286 i
, ring
->vm_info
.pdp
[i
]);
288 err_printf(m
, " PP_DIR_BASE: 0x%08x\n",
289 ring
->vm_info
.pp_dir_base
);
292 err_printf(m
, " seqno: 0x%08x\n", ring
->seqno
);
293 err_printf(m
, " waiting: %s\n", yesno(ring
->waiting
));
294 err_printf(m
, " ring->head: 0x%08x\n", ring
->cpu_ring_head
);
295 err_printf(m
, " ring->tail: 0x%08x\n", ring
->cpu_ring_tail
);
296 err_printf(m
, " hangcheck: %s [%d]\n",
297 hangcheck_action_to_str(ring
->hangcheck_action
),
298 ring
->hangcheck_score
);
301 void i915_error_printf(struct drm_i915_error_state_buf
*e
, const char *f
, ...)
306 i915_error_vprintf(e
, f
, args
);
310 static void print_error_obj(struct drm_i915_error_state_buf
*m
,
311 struct drm_i915_error_object
*obj
)
313 int page
, offset
, elt
;
315 for (page
= offset
= 0; page
< obj
->page_count
; page
++) {
316 for (elt
= 0; elt
< PAGE_SIZE
/4; elt
++) {
317 err_printf(m
, "%08x : %08x\n", offset
,
318 obj
->pages
[page
][elt
]);
324 int i915_error_state_to_str(struct drm_i915_error_state_buf
*m
,
325 const struct i915_error_state_file_priv
*error_priv
)
327 struct drm_device
*dev
= error_priv
->dev
;
328 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
329 struct drm_i915_error_state
*error
= error_priv
->error
;
330 int i
, j
, offset
, elt
;
331 int max_hangcheck_score
;
334 err_printf(m
, "no error state collected\n");
338 err_printf(m
, "%s\n", error
->error_msg
);
339 err_printf(m
, "Time: %ld s %ld us\n", error
->time
.tv_sec
,
340 error
->time
.tv_usec
);
341 err_printf(m
, "Kernel: " UTS_RELEASE
"\n");
342 max_hangcheck_score
= 0;
343 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
344 if (error
->ring
[i
].hangcheck_score
> max_hangcheck_score
)
345 max_hangcheck_score
= error
->ring
[i
].hangcheck_score
;
347 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
348 if (error
->ring
[i
].hangcheck_score
== max_hangcheck_score
&&
349 error
->ring
[i
].pid
!= -1) {
350 err_printf(m
, "Active process (on ring %s): %s [%d]\n",
356 err_printf(m
, "Reset count: %u\n", error
->reset_count
);
357 err_printf(m
, "Suspend count: %u\n", error
->suspend_count
);
358 err_printf(m
, "PCI ID: 0x%04x\n", dev
->pdev
->device
);
359 err_printf(m
, "EIR: 0x%08x\n", error
->eir
);
360 err_printf(m
, "IER: 0x%08x\n", error
->ier
);
361 err_printf(m
, "PGTBL_ER: 0x%08x\n", error
->pgtbl_er
);
362 err_printf(m
, "FORCEWAKE: 0x%08x\n", error
->forcewake
);
363 err_printf(m
, "DERRMR: 0x%08x\n", error
->derrmr
);
364 err_printf(m
, "CCID: 0x%08x\n", error
->ccid
);
365 err_printf(m
, "Missed interrupts: 0x%08lx\n", dev_priv
->gpu_error
.missed_irq_rings
);
367 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
368 err_printf(m
, " fence[%d] = %08llx\n", i
, error
->fence
[i
]);
370 for (i
= 0; i
< ARRAY_SIZE(error
->extra_instdone
); i
++)
371 err_printf(m
, " INSTDONE_%d: 0x%08x\n", i
,
372 error
->extra_instdone
[i
]);
374 if (INTEL_INFO(dev
)->gen
>= 6) {
375 err_printf(m
, "ERROR: 0x%08x\n", error
->error
);
376 err_printf(m
, "DONE_REG: 0x%08x\n", error
->done_reg
);
379 if (INTEL_INFO(dev
)->gen
== 7)
380 err_printf(m
, "ERR_INT: 0x%08x\n", error
->err_int
);
382 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
383 err_printf(m
, "%s command stream:\n", ring_str(i
));
384 i915_ring_error_state(m
, dev
, &error
->ring
[i
]);
387 if (error
->active_bo
)
388 print_error_buffers(m
, "Active",
390 error
->active_bo_count
[0]);
392 if (error
->pinned_bo
)
393 print_error_buffers(m
, "Pinned",
395 error
->pinned_bo_count
[0]);
397 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
398 struct drm_i915_error_object
*obj
;
400 obj
= error
->ring
[i
].batchbuffer
;
402 err_puts(m
, dev_priv
->ring
[i
].name
);
403 if (error
->ring
[i
].pid
!= -1)
404 err_printf(m
, " (submitted by %s [%d])",
407 err_printf(m
, " --- gtt_offset = 0x%08x\n",
409 print_error_obj(m
, obj
);
412 obj
= error
->ring
[i
].wa_batchbuffer
;
414 err_printf(m
, "%s (w/a) --- gtt_offset = 0x%08x\n",
415 dev_priv
->ring
[i
].name
, obj
->gtt_offset
);
416 print_error_obj(m
, obj
);
419 if (error
->ring
[i
].num_requests
) {
420 err_printf(m
, "%s --- %d requests\n",
421 dev_priv
->ring
[i
].name
,
422 error
->ring
[i
].num_requests
);
423 for (j
= 0; j
< error
->ring
[i
].num_requests
; j
++) {
424 err_printf(m
, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
425 error
->ring
[i
].requests
[j
].seqno
,
426 error
->ring
[i
].requests
[j
].jiffies
,
427 error
->ring
[i
].requests
[j
].tail
);
431 if ((obj
= error
->ring
[i
].ringbuffer
)) {
432 err_printf(m
, "%s --- ringbuffer = 0x%08x\n",
433 dev_priv
->ring
[i
].name
,
435 print_error_obj(m
, obj
);
438 if ((obj
= error
->ring
[i
].hws_page
)) {
439 err_printf(m
, "%s --- HW Status = 0x%08x\n",
440 dev_priv
->ring
[i
].name
,
443 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
444 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
447 obj
->pages
[0][elt
+1],
448 obj
->pages
[0][elt
+2],
449 obj
->pages
[0][elt
+3]);
454 if ((obj
= error
->ring
[i
].ctx
)) {
455 err_printf(m
, "%s --- HW Context = 0x%08x\n",
456 dev_priv
->ring
[i
].name
,
458 print_error_obj(m
, obj
);
463 intel_overlay_print_error_state(m
, error
->overlay
);
466 intel_display_print_error_state(m
, dev
, error
->display
);
469 if (m
->bytes
== 0 && m
->err
)
475 int i915_error_state_buf_init(struct drm_i915_error_state_buf
*ebuf
,
476 size_t count
, loff_t pos
)
478 memset(ebuf
, 0, sizeof(*ebuf
));
480 /* We need to have enough room to store any i915_error_state printf
481 * so that we can move it to start position.
483 ebuf
->size
= count
+ 1 > PAGE_SIZE
? count
+ 1 : PAGE_SIZE
;
484 ebuf
->buf
= kmalloc(ebuf
->size
,
485 GFP_TEMPORARY
| __GFP_NORETRY
| __GFP_NOWARN
);
487 if (ebuf
->buf
== NULL
) {
488 ebuf
->size
= PAGE_SIZE
;
489 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
492 if (ebuf
->buf
== NULL
) {
494 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
497 if (ebuf
->buf
== NULL
)
505 static void i915_error_object_free(struct drm_i915_error_object
*obj
)
512 for (page
= 0; page
< obj
->page_count
; page
++)
513 kfree(obj
->pages
[page
]);
518 static void i915_error_state_free(struct kref
*error_ref
)
520 struct drm_i915_error_state
*error
= container_of(error_ref
,
521 typeof(*error
), ref
);
524 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
525 i915_error_object_free(error
->ring
[i
].batchbuffer
);
526 i915_error_object_free(error
->ring
[i
].ringbuffer
);
527 i915_error_object_free(error
->ring
[i
].hws_page
);
528 i915_error_object_free(error
->ring
[i
].ctx
);
529 kfree(error
->ring
[i
].requests
);
532 kfree(error
->active_bo
);
533 kfree(error
->overlay
);
534 kfree(error
->display
);
538 static struct drm_i915_error_object
*
539 i915_error_object_create_sized(struct drm_i915_private
*dev_priv
,
540 struct drm_i915_gem_object
*src
,
541 struct i915_address_space
*vm
,
544 struct drm_i915_error_object
*dst
;
548 if (src
== NULL
|| src
->pages
== NULL
)
551 dst
= kmalloc(sizeof(*dst
) + num_pages
* sizeof(u32
*), GFP_ATOMIC
);
555 reloc_offset
= dst
->gtt_offset
= i915_gem_obj_offset(src
, vm
);
556 for (i
= 0; i
< num_pages
; i
++) {
560 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
564 local_irq_save(flags
);
565 if (src
->cache_level
== I915_CACHE_NONE
&&
566 reloc_offset
< dev_priv
->gtt
.mappable_end
&&
567 src
->has_global_gtt_mapping
&&
571 /* Simply ignore tiling or any overlapping fence.
572 * It's part of the error state, and this hopefully
573 * captures what the GPU read.
576 s
= io_mapping_map_atomic_wc(dev_priv
->gtt
.mappable
,
578 memcpy_fromio(d
, s
, PAGE_SIZE
);
579 io_mapping_unmap_atomic(s
);
580 } else if (src
->stolen
) {
581 unsigned long offset
;
583 offset
= dev_priv
->mm
.stolen_base
;
584 offset
+= src
->stolen
->start
;
585 offset
+= i
<< PAGE_SHIFT
;
587 memcpy_fromio(d
, (void __iomem
*) offset
, PAGE_SIZE
);
592 page
= i915_gem_object_get_page(src
, i
);
594 drm_clflush_pages(&page
, 1);
596 s
= kmap_atomic(page
);
597 memcpy(d
, s
, PAGE_SIZE
);
600 drm_clflush_pages(&page
, 1);
602 local_irq_restore(flags
);
606 reloc_offset
+= PAGE_SIZE
;
608 dst
->page_count
= num_pages
;
614 kfree(dst
->pages
[i
]);
618 #define i915_error_object_create(dev_priv, src, vm) \
619 i915_error_object_create_sized((dev_priv), (src), (vm), \
620 (src)->base.size>>PAGE_SHIFT)
622 #define i915_error_ggtt_object_create(dev_priv, src) \
623 i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
624 (src)->base.size>>PAGE_SHIFT)
626 static void capture_bo(struct drm_i915_error_buffer
*err
,
627 struct drm_i915_gem_object
*obj
)
629 err
->size
= obj
->base
.size
;
630 err
->name
= obj
->base
.name
;
631 err
->rseqno
= obj
->last_read_seqno
;
632 err
->wseqno
= obj
->last_write_seqno
;
633 err
->gtt_offset
= i915_gem_obj_ggtt_offset(obj
);
634 err
->read_domains
= obj
->base
.read_domains
;
635 err
->write_domain
= obj
->base
.write_domain
;
636 err
->fence_reg
= obj
->fence_reg
;
638 if (i915_gem_obj_is_pinned(obj
))
640 if (obj
->user_pin_count
> 0)
642 err
->tiling
= obj
->tiling_mode
;
643 err
->dirty
= obj
->dirty
;
644 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
645 err
->userptr
= obj
->userptr
.mm
!= NULL
;
646 err
->ring
= obj
->ring
? obj
->ring
->id
: -1;
647 err
->cache_level
= obj
->cache_level
;
650 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
651 int count
, struct list_head
*head
)
653 struct i915_vma
*vma
;
656 list_for_each_entry(vma
, head
, mm_list
) {
657 capture_bo(err
++, vma
->obj
);
665 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
666 int count
, struct list_head
*head
)
668 struct drm_i915_gem_object
*obj
;
671 list_for_each_entry(obj
, head
, global_list
) {
672 if (!i915_gem_obj_is_pinned(obj
))
675 capture_bo(err
++, obj
);
683 /* Generate a semi-unique error code. The code is not meant to have meaning, The
684 * code's only purpose is to try to prevent false duplicated bug reports by
685 * grossly estimating a GPU error state.
687 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
688 * the hang if we could strip the GTT offset information from it.
690 * It's only a small step better than a random number in its current form.
692 static uint32_t i915_error_generate_code(struct drm_i915_private
*dev_priv
,
693 struct drm_i915_error_state
*error
,
696 uint32_t error_code
= 0;
699 /* IPEHR would be an ideal way to detect errors, as it's the gross
700 * measure of "the command that hung." However, has some very common
701 * synchronization commands which almost always appear in the case
702 * strictly a client bug. Use instdone to differentiate those some.
704 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
705 if (error
->ring
[i
].hangcheck_action
== HANGCHECK_HUNG
) {
709 return error
->ring
[i
].ipehr
^ error
->ring
[i
].instdone
;
716 static void i915_gem_record_fences(struct drm_device
*dev
,
717 struct drm_i915_error_state
*error
)
719 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
723 switch (INTEL_INFO(dev
)->gen
) {
727 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
728 error
->fence
[i
] = I915_READ64(FENCE_REG_SANDYBRIDGE_0
+ (i
* 8));
732 for (i
= 0; i
< 16; i
++)
733 error
->fence
[i
] = I915_READ64(FENCE_REG_965_0
+ (i
* 8));
736 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
737 for (i
= 0; i
< 8; i
++)
738 error
->fence
[i
+8] = I915_READ(FENCE_REG_945_8
+ (i
* 4));
740 for (i
= 0; i
< 8; i
++)
741 error
->fence
[i
] = I915_READ(FENCE_REG_830_0
+ (i
* 4));
749 static void i915_record_ring_state(struct drm_device
*dev
,
750 struct intel_engine_cs
*ring
,
751 struct drm_i915_error_ring
*ering
)
753 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
755 if (INTEL_INFO(dev
)->gen
>= 6) {
756 ering
->rc_psmi
= I915_READ(ring
->mmio_base
+ 0x50);
757 ering
->fault_reg
= I915_READ(RING_FAULT_REG(ring
));
758 ering
->semaphore_mboxes
[0]
759 = I915_READ(RING_SYNC_0(ring
->mmio_base
));
760 ering
->semaphore_mboxes
[1]
761 = I915_READ(RING_SYNC_1(ring
->mmio_base
));
762 ering
->semaphore_seqno
[0] = ring
->semaphore
.sync_seqno
[0];
763 ering
->semaphore_seqno
[1] = ring
->semaphore
.sync_seqno
[1];
766 if (HAS_VEBOX(dev
)) {
767 ering
->semaphore_mboxes
[2] =
768 I915_READ(RING_SYNC_2(ring
->mmio_base
));
769 ering
->semaphore_seqno
[2] = ring
->semaphore
.sync_seqno
[2];
772 if (INTEL_INFO(dev
)->gen
>= 4) {
773 ering
->faddr
= I915_READ(RING_DMA_FADD(ring
->mmio_base
));
774 ering
->ipeir
= I915_READ(RING_IPEIR(ring
->mmio_base
));
775 ering
->ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
776 ering
->instdone
= I915_READ(RING_INSTDONE(ring
->mmio_base
));
777 ering
->instps
= I915_READ(RING_INSTPS(ring
->mmio_base
));
778 ering
->bbaddr
= I915_READ(RING_BBADDR(ring
->mmio_base
));
779 if (INTEL_INFO(dev
)->gen
>= 8) {
780 ering
->faddr
|= (u64
) I915_READ(RING_DMA_FADD_UDW(ring
->mmio_base
)) << 32;
781 ering
->bbaddr
|= (u64
) I915_READ(RING_BBADDR_UDW(ring
->mmio_base
)) << 32;
783 ering
->bbstate
= I915_READ(RING_BBSTATE(ring
->mmio_base
));
785 ering
->faddr
= I915_READ(DMA_FADD_I8XX
);
786 ering
->ipeir
= I915_READ(IPEIR
);
787 ering
->ipehr
= I915_READ(IPEHR
);
788 ering
->instdone
= I915_READ(INSTDONE
);
791 ering
->waiting
= waitqueue_active(&ring
->irq_queue
);
792 ering
->instpm
= I915_READ(RING_INSTPM(ring
->mmio_base
));
793 ering
->seqno
= ring
->get_seqno(ring
, false);
794 ering
->acthd
= intel_ring_get_active_head(ring
);
795 ering
->head
= I915_READ_HEAD(ring
);
796 ering
->tail
= I915_READ_TAIL(ring
);
797 ering
->ctl
= I915_READ_CTL(ring
);
799 if (I915_NEED_GFX_HWS(dev
)) {
806 mmio
= RENDER_HWS_PGA_GEN7
;
809 mmio
= BLT_HWS_PGA_GEN7
;
812 mmio
= BSD_HWS_PGA_GEN7
;
815 mmio
= VEBOX_HWS_PGA_GEN7
;
818 } else if (IS_GEN6(ring
->dev
)) {
819 mmio
= RING_HWS_PGA_GEN6(ring
->mmio_base
);
821 /* XXX: gen8 returns to sanity */
822 mmio
= RING_HWS_PGA(ring
->mmio_base
);
825 ering
->hws
= I915_READ(mmio
);
828 ering
->cpu_ring_head
= ring
->buffer
->head
;
829 ering
->cpu_ring_tail
= ring
->buffer
->tail
;
831 ering
->hangcheck_score
= ring
->hangcheck
.score
;
832 ering
->hangcheck_action
= ring
->hangcheck
.action
;
834 if (USES_PPGTT(dev
)) {
837 ering
->vm_info
.gfx_mode
= I915_READ(RING_MODE_GEN7(ring
));
839 switch (INTEL_INFO(dev
)->gen
) {
841 for (i
= 0; i
< 4; i
++) {
842 ering
->vm_info
.pdp
[i
] =
843 I915_READ(GEN8_RING_PDP_UDW(ring
, i
));
844 ering
->vm_info
.pdp
[i
] <<= 32;
845 ering
->vm_info
.pdp
[i
] |=
846 I915_READ(GEN8_RING_PDP_LDW(ring
, i
));
850 ering
->vm_info
.pp_dir_base
=
851 I915_READ(RING_PP_DIR_BASE(ring
));
854 ering
->vm_info
.pp_dir_base
=
855 I915_READ(RING_PP_DIR_BASE_READ(ring
));
862 static void i915_gem_record_active_context(struct intel_engine_cs
*ring
,
863 struct drm_i915_error_state
*error
,
864 struct drm_i915_error_ring
*ering
)
866 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
867 struct drm_i915_gem_object
*obj
;
869 /* Currently render ring is the only HW context user */
870 if (ring
->id
!= RCS
|| !error
->ccid
)
873 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
874 if ((error
->ccid
& PAGE_MASK
) == i915_gem_obj_ggtt_offset(obj
)) {
875 ering
->ctx
= i915_error_ggtt_object_create(dev_priv
, obj
);
881 static void i915_gem_record_rings(struct drm_device
*dev
,
882 struct drm_i915_error_state
*error
)
884 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
885 struct drm_i915_gem_request
*request
;
888 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
889 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
891 if (ring
->dev
== NULL
)
894 error
->ring
[i
].valid
= true;
896 i915_record_ring_state(dev
, ring
, &error
->ring
[i
]);
898 error
->ring
[i
].pid
= -1;
899 request
= i915_gem_find_active_request(ring
);
901 /* We need to copy these to an anonymous buffer
902 * as the simplest method to avoid being overwritten
905 error
->ring
[i
].batchbuffer
=
906 i915_error_object_create(dev_priv
,
910 &dev_priv
->gtt
.base
);
912 if (HAS_BROKEN_CS_TLB(dev_priv
->dev
) &&
914 error
->ring
[i
].wa_batchbuffer
=
915 i915_error_ggtt_object_create(dev_priv
,
918 if (request
->file_priv
) {
919 struct task_struct
*task
;
922 task
= pid_task(request
->file_priv
->file
->pid
,
925 strcpy(error
->ring
[i
].comm
, task
->comm
);
926 error
->ring
[i
].pid
= task
->pid
;
932 error
->ring
[i
].ringbuffer
=
933 i915_error_ggtt_object_create(dev_priv
, ring
->buffer
->obj
);
935 if (ring
->status_page
.obj
)
936 error
->ring
[i
].hws_page
=
937 i915_error_ggtt_object_create(dev_priv
, ring
->status_page
.obj
);
939 i915_gem_record_active_context(ring
, error
, &error
->ring
[i
]);
942 list_for_each_entry(request
, &ring
->request_list
, list
)
945 error
->ring
[i
].num_requests
= count
;
946 error
->ring
[i
].requests
=
947 kcalloc(count
, sizeof(*error
->ring
[i
].requests
),
949 if (error
->ring
[i
].requests
== NULL
) {
950 error
->ring
[i
].num_requests
= 0;
955 list_for_each_entry(request
, &ring
->request_list
, list
) {
956 struct drm_i915_error_request
*erq
;
958 erq
= &error
->ring
[i
].requests
[count
++];
959 erq
->seqno
= request
->seqno
;
960 erq
->jiffies
= request
->emitted_jiffies
;
961 erq
->tail
= request
->tail
;
966 /* FIXME: Since pin count/bound list is global, we duplicate what we capture per
969 static void i915_gem_capture_vm(struct drm_i915_private
*dev_priv
,
970 struct drm_i915_error_state
*error
,
971 struct i915_address_space
*vm
,
974 struct drm_i915_error_buffer
*active_bo
= NULL
, *pinned_bo
= NULL
;
975 struct drm_i915_gem_object
*obj
;
976 struct i915_vma
*vma
;
980 list_for_each_entry(vma
, &vm
->active_list
, mm_list
)
982 error
->active_bo_count
[ndx
] = i
;
983 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
)
984 if (i915_gem_obj_is_pinned(obj
))
986 error
->pinned_bo_count
[ndx
] = i
- error
->active_bo_count
[ndx
];
989 active_bo
= kcalloc(i
, sizeof(*active_bo
), GFP_ATOMIC
);
991 pinned_bo
= active_bo
+ error
->active_bo_count
[ndx
];
995 error
->active_bo_count
[ndx
] =
996 capture_active_bo(active_bo
,
997 error
->active_bo_count
[ndx
],
1001 error
->pinned_bo_count
[ndx
] =
1002 capture_pinned_bo(pinned_bo
,
1003 error
->pinned_bo_count
[ndx
],
1004 &dev_priv
->mm
.bound_list
);
1005 error
->active_bo
[ndx
] = active_bo
;
1006 error
->pinned_bo
[ndx
] = pinned_bo
;
1009 static void i915_gem_capture_buffers(struct drm_i915_private
*dev_priv
,
1010 struct drm_i915_error_state
*error
)
1012 struct i915_address_space
*vm
;
1015 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1018 error
->active_bo
= kcalloc(cnt
, sizeof(*error
->active_bo
), GFP_ATOMIC
);
1019 error
->pinned_bo
= kcalloc(cnt
, sizeof(*error
->pinned_bo
), GFP_ATOMIC
);
1020 error
->active_bo_count
= kcalloc(cnt
, sizeof(*error
->active_bo_count
),
1022 error
->pinned_bo_count
= kcalloc(cnt
, sizeof(*error
->pinned_bo_count
),
1025 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1026 i915_gem_capture_vm(dev_priv
, error
, vm
, i
++);
1029 /* Capture all registers which don't fit into another category. */
1030 static void i915_capture_reg_state(struct drm_i915_private
*dev_priv
,
1031 struct drm_i915_error_state
*error
)
1033 struct drm_device
*dev
= dev_priv
->dev
;
1035 /* General organization
1036 * 1. Registers specific to a single generation
1037 * 2. Registers which belong to multiple generations
1038 * 3. Feature specific registers.
1039 * 4. Everything else
1040 * Please try to follow the order.
1043 /* 1: Registers specific to a single generation */
1044 if (IS_VALLEYVIEW(dev
)) {
1045 error
->ier
= I915_READ(GTIER
) | I915_READ(VLV_IER
);
1046 error
->forcewake
= I915_READ(FORCEWAKE_VLV
);
1050 error
->err_int
= I915_READ(GEN7_ERR_INT
);
1053 error
->forcewake
= I915_READ(FORCEWAKE
);
1054 error
->gab_ctl
= I915_READ(GAB_CTL
);
1055 error
->gfx_mode
= I915_READ(GFX_MODE
);
1058 /* 2: Registers which belong to multiple generations */
1059 if (INTEL_INFO(dev
)->gen
>= 7)
1060 error
->forcewake
= I915_READ(FORCEWAKE_MT
);
1062 if (INTEL_INFO(dev
)->gen
>= 6) {
1063 error
->derrmr
= I915_READ(DERRMR
);
1064 error
->error
= I915_READ(ERROR_GEN6
);
1065 error
->done_reg
= I915_READ(DONE_REG
);
1068 /* 3: Feature specific registers */
1069 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1070 error
->gam_ecochk
= I915_READ(GAM_ECOCHK
);
1071 error
->gac_eco
= I915_READ(GAC_ECO_BITS
);
1074 /* 4: Everything else */
1075 if (HAS_HW_CONTEXTS(dev
))
1076 error
->ccid
= I915_READ(CCID
);
1078 if (HAS_PCH_SPLIT(dev
))
1079 error
->ier
= I915_READ(DEIER
) | I915_READ(GTIER
);
1082 error
->ier
= I915_READ16(IER
);
1084 error
->ier
= I915_READ(IER
);
1087 /* 4: Everything else */
1088 error
->eir
= I915_READ(EIR
);
1089 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1091 i915_get_extra_instdone(dev
, error
->extra_instdone
);
1094 static void i915_error_capture_msg(struct drm_device
*dev
,
1095 struct drm_i915_error_state
*error
,
1097 const char *error_msg
)
1099 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1101 int ring_id
= -1, len
;
1103 ecode
= i915_error_generate_code(dev_priv
, error
, &ring_id
);
1105 len
= scnprintf(error
->error_msg
, sizeof(error
->error_msg
),
1106 "GPU HANG: ecode %d:0x%08x", ring_id
, ecode
);
1108 if (ring_id
!= -1 && error
->ring
[ring_id
].pid
!= -1)
1109 len
+= scnprintf(error
->error_msg
+ len
,
1110 sizeof(error
->error_msg
) - len
,
1112 error
->ring
[ring_id
].comm
,
1113 error
->ring
[ring_id
].pid
);
1115 scnprintf(error
->error_msg
+ len
, sizeof(error
->error_msg
) - len
,
1116 ", reason: %s, action: %s",
1118 wedged
? "reset" : "continue");
1121 static void i915_capture_gen_state(struct drm_i915_private
*dev_priv
,
1122 struct drm_i915_error_state
*error
)
1124 error
->reset_count
= i915_reset_count(&dev_priv
->gpu_error
);
1125 error
->suspend_count
= dev_priv
->suspend_count
;
1129 * i915_capture_error_state - capture an error record for later analysis
1132 * Should be called when an error is detected (either a hang or an error
1133 * interrupt) to capture error state from the time of the error. Fills
1134 * out a structure which becomes available in debugfs for user level tools
1137 void i915_capture_error_state(struct drm_device
*dev
, bool wedged
,
1138 const char *error_msg
)
1141 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1142 struct drm_i915_error_state
*error
;
1143 unsigned long flags
;
1145 /* Account for pipe specific data like PIPE*STAT */
1146 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1148 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1152 kref_init(&error
->ref
);
1154 i915_capture_gen_state(dev_priv
, error
);
1155 i915_capture_reg_state(dev_priv
, error
);
1156 i915_gem_capture_buffers(dev_priv
, error
);
1157 i915_gem_record_fences(dev
, error
);
1158 i915_gem_record_rings(dev
, error
);
1160 do_gettimeofday(&error
->time
);
1162 error
->overlay
= intel_overlay_capture_error_state(dev
);
1163 error
->display
= intel_display_capture_error_state(dev
);
1165 i915_error_capture_msg(dev
, error
, wedged
, error_msg
);
1166 DRM_INFO("%s\n", error
->error_msg
);
1168 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1169 if (dev_priv
->gpu_error
.first_error
== NULL
) {
1170 dev_priv
->gpu_error
.first_error
= error
;
1173 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1176 i915_error_state_free(&error
->ref
);
1181 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1182 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1183 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1184 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1185 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev
->primary
->index
);
1190 void i915_error_state_get(struct drm_device
*dev
,
1191 struct i915_error_state_file_priv
*error_priv
)
1193 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1194 unsigned long flags
;
1196 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1197 error_priv
->error
= dev_priv
->gpu_error
.first_error
;
1198 if (error_priv
->error
)
1199 kref_get(&error_priv
->error
->ref
);
1200 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1204 void i915_error_state_put(struct i915_error_state_file_priv
*error_priv
)
1206 if (error_priv
->error
)
1207 kref_put(&error_priv
->error
->ref
, i915_error_state_free
);
1210 void i915_destroy_error_state(struct drm_device
*dev
)
1212 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1213 struct drm_i915_error_state
*error
;
1214 unsigned long flags
;
1216 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1217 error
= dev_priv
->gpu_error
.first_error
;
1218 dev_priv
->gpu_error
.first_error
= NULL
;
1219 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1222 kref_put(&error
->ref
, i915_error_state_free
);
1225 const char *i915_cache_level_str(int type
)
1228 case I915_CACHE_NONE
: return " uncached";
1229 case I915_CACHE_LLC
: return " snooped or LLC";
1230 case I915_CACHE_L3_LLC
: return " L3+LLC";
1231 case I915_CACHE_WT
: return " WT";
1236 /* NB: please notice the memset */
1237 void i915_get_extra_instdone(struct drm_device
*dev
, uint32_t *instdone
)
1239 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1240 memset(instdone
, 0, sizeof(*instdone
) * I915_NUM_INSTDONE_REG
);
1242 switch (INTEL_INFO(dev
)->gen
) {
1245 instdone
[0] = I915_READ(INSTDONE
);
1250 instdone
[0] = I915_READ(INSTDONE_I965
);
1251 instdone
[1] = I915_READ(INSTDONE1
);
1254 WARN_ONCE(1, "Unsupported platform\n");
1257 instdone
[0] = I915_READ(GEN7_INSTDONE_1
);
1258 instdone
[1] = I915_READ(GEN7_SC_INSTDONE
);
1259 instdone
[2] = I915_READ(GEN7_SAMPLER_INSTDONE
);
1260 instdone
[3] = I915_READ(GEN7_ROW_INSTDONE
);