Commit | Line | Data |
---|---|---|
84734a04 MK |
1 | /* |
2 | * Copyright (c) 2008 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * Keith Packard <keithp@keithp.com> | |
26 | * Mika Kuoppala <mika.kuoppala@intel.com> | |
27 | * | |
28 | */ | |
29 | ||
30 | #include <generated/utsrelease.h> | |
31 | #include "i915_drv.h" | |
32 | ||
33 | static const char *yesno(int v) | |
34 | { | |
35 | return v ? "yes" : "no"; | |
36 | } | |
37 | ||
38 | static const char *ring_str(int ring) | |
39 | { | |
40 | switch (ring) { | |
41 | case RCS: return "render"; | |
42 | case VCS: return "bsd"; | |
43 | case BCS: return "blt"; | |
44 | case VECS: return "vebox"; | |
45 | default: return ""; | |
46 | } | |
47 | } | |
48 | ||
49 | static const char *pin_flag(int pinned) | |
50 | { | |
51 | if (pinned > 0) | |
52 | return " P"; | |
53 | else if (pinned < 0) | |
54 | return " p"; | |
55 | else | |
56 | return ""; | |
57 | } | |
58 | ||
59 | static const char *tiling_flag(int tiling) | |
60 | { | |
61 | switch (tiling) { | |
62 | default: | |
63 | case I915_TILING_NONE: return ""; | |
64 | case I915_TILING_X: return " X"; | |
65 | case I915_TILING_Y: return " Y"; | |
66 | } | |
67 | } | |
68 | ||
69 | static const char *dirty_flag(int dirty) | |
70 | { | |
71 | return dirty ? " dirty" : ""; | |
72 | } | |
73 | ||
74 | static const char *purgeable_flag(int purgeable) | |
75 | { | |
76 | return purgeable ? " purgeable" : ""; | |
77 | } | |
78 | ||
79 | static bool __i915_error_ok(struct drm_i915_error_state_buf *e) | |
80 | { | |
81 | ||
82 | if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) { | |
83 | e->err = -ENOSPC; | |
84 | return false; | |
85 | } | |
86 | ||
87 | if (e->bytes == e->size - 1 || e->err) | |
88 | return false; | |
89 | ||
90 | return true; | |
91 | } | |
92 | ||
93 | static bool __i915_error_seek(struct drm_i915_error_state_buf *e, | |
94 | unsigned len) | |
95 | { | |
96 | if (e->pos + len <= e->start) { | |
97 | e->pos += len; | |
98 | return false; | |
99 | } | |
100 | ||
101 | /* First vsnprintf needs to fit in its entirety for memmove */ | |
102 | if (len >= e->size) { | |
103 | e->err = -EIO; | |
104 | return false; | |
105 | } | |
106 | ||
107 | return true; | |
108 | } | |
109 | ||
110 | static void __i915_error_advance(struct drm_i915_error_state_buf *e, | |
111 | unsigned len) | |
112 | { | |
113 | /* If this is first printf in this window, adjust it so that | |
114 | * start position matches start of the buffer | |
115 | */ | |
116 | ||
117 | if (e->pos < e->start) { | |
118 | const size_t off = e->start - e->pos; | |
119 | ||
120 | /* Should not happen but be paranoid */ | |
121 | if (off > len || e->bytes) { | |
122 | e->err = -EIO; | |
123 | return; | |
124 | } | |
125 | ||
126 | memmove(e->buf, e->buf + off, len - off); | |
127 | e->bytes = len - off; | |
128 | e->pos = e->start; | |
129 | return; | |
130 | } | |
131 | ||
132 | e->bytes += len; | |
133 | e->pos += len; | |
134 | } | |
135 | ||
136 | static void i915_error_vprintf(struct drm_i915_error_state_buf *e, | |
137 | const char *f, va_list args) | |
138 | { | |
139 | unsigned len; | |
140 | ||
141 | if (!__i915_error_ok(e)) | |
142 | return; | |
143 | ||
144 | /* Seek the first printf which is hits start position */ | |
145 | if (e->pos < e->start) { | |
146 | len = vsnprintf(NULL, 0, f, args); | |
147 | if (!__i915_error_seek(e, len)) | |
148 | return; | |
149 | } | |
150 | ||
151 | len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args); | |
152 | if (len >= e->size - e->bytes) | |
153 | len = e->size - e->bytes - 1; | |
154 | ||
155 | __i915_error_advance(e, len); | |
156 | } | |
157 | ||
158 | static void i915_error_puts(struct drm_i915_error_state_buf *e, | |
159 | const char *str) | |
160 | { | |
161 | unsigned len; | |
162 | ||
163 | if (!__i915_error_ok(e)) | |
164 | return; | |
165 | ||
166 | len = strlen(str); | |
167 | ||
168 | /* Seek the first printf which is hits start position */ | |
169 | if (e->pos < e->start) { | |
170 | if (!__i915_error_seek(e, len)) | |
171 | return; | |
172 | } | |
173 | ||
174 | if (len >= e->size - e->bytes) | |
175 | len = e->size - e->bytes - 1; | |
176 | memcpy(e->buf + e->bytes, str, len); | |
177 | ||
178 | __i915_error_advance(e, len); | |
179 | } | |
180 | ||
181 | #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) | |
182 | #define err_puts(e, s) i915_error_puts(e, s) | |
183 | ||
184 | static void print_error_buffers(struct drm_i915_error_state_buf *m, | |
185 | const char *name, | |
186 | struct drm_i915_error_buffer *err, | |
187 | int count) | |
188 | { | |
189 | err_printf(m, "%s [%d]:\n", name, count); | |
190 | ||
191 | while (count--) { | |
192 | err_printf(m, " %08x %8u %02x %02x %x %x", | |
193 | err->gtt_offset, | |
194 | err->size, | |
195 | err->read_domains, | |
196 | err->write_domain, | |
197 | err->rseqno, err->wseqno); | |
198 | err_puts(m, pin_flag(err->pinned)); | |
199 | err_puts(m, tiling_flag(err->tiling)); | |
200 | err_puts(m, dirty_flag(err->dirty)); | |
201 | err_puts(m, purgeable_flag(err->purgeable)); | |
202 | err_puts(m, err->ring != -1 ? " " : ""); | |
203 | err_puts(m, ring_str(err->ring)); | |
204 | err_puts(m, i915_cache_level_str(err->cache_level)); | |
205 | ||
206 | if (err->name) | |
207 | err_printf(m, " (name: %d)", err->name); | |
208 | if (err->fence_reg != I915_FENCE_REG_NONE) | |
209 | err_printf(m, " (fence: %d)", err->fence_reg); | |
210 | ||
211 | err_puts(m, "\n"); | |
212 | err++; | |
213 | } | |
214 | } | |
215 | ||
216 | static void i915_ring_error_state(struct drm_i915_error_state_buf *m, | |
217 | struct drm_device *dev, | |
218 | struct drm_i915_error_state *error, | |
219 | unsigned ring) | |
220 | { | |
221 | BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ | |
222 | err_printf(m, "%s command stream:\n", ring_str(ring)); | |
223 | err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); | |
224 | err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); | |
225 | err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); | |
226 | err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); | |
227 | err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); | |
228 | err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); | |
229 | err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); | |
230 | if (ring == RCS && INTEL_INFO(dev)->gen >= 4) | |
231 | err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); | |
232 | ||
233 | if (INTEL_INFO(dev)->gen >= 4) | |
234 | err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); | |
235 | err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); | |
236 | err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); | |
237 | if (INTEL_INFO(dev)->gen >= 6) { | |
238 | err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); | |
239 | err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); | |
240 | err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", | |
241 | error->semaphore_mboxes[ring][0], | |
242 | error->semaphore_seqno[ring][0]); | |
243 | err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", | |
244 | error->semaphore_mboxes[ring][1], | |
245 | error->semaphore_seqno[ring][1]); | |
4e5aabfd BW |
246 | if (HAS_VEBOX(dev)) { |
247 | err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n", | |
248 | error->semaphore_mboxes[ring][2], | |
249 | error->semaphore_seqno[ring][2]); | |
250 | } | |
84734a04 MK |
251 | } |
252 | err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); | |
253 | err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); | |
254 | err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); | |
255 | err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); | |
256 | } | |
257 | ||
258 | void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) | |
259 | { | |
260 | va_list args; | |
261 | ||
262 | va_start(args, f); | |
263 | i915_error_vprintf(e, f, args); | |
264 | va_end(args); | |
265 | } | |
266 | ||
267 | int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |
268 | const struct i915_error_state_file_priv *error_priv) | |
269 | { | |
270 | struct drm_device *dev = error_priv->dev; | |
271 | drm_i915_private_t *dev_priv = dev->dev_private; | |
272 | struct drm_i915_error_state *error = error_priv->error; | |
273 | struct intel_ring_buffer *ring; | |
274 | int i, j, page, offset, elt; | |
275 | ||
276 | if (!error) { | |
277 | err_printf(m, "no error state collected\n"); | |
278 | goto out; | |
279 | } | |
280 | ||
281 | err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, | |
282 | error->time.tv_usec); | |
283 | err_printf(m, "Kernel: " UTS_RELEASE "\n"); | |
284 | err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); | |
285 | err_printf(m, "EIR: 0x%08x\n", error->eir); | |
286 | err_printf(m, "IER: 0x%08x\n", error->ier); | |
287 | err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); | |
288 | err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); | |
289 | err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); | |
290 | err_printf(m, "CCID: 0x%08x\n", error->ccid); | |
291 | ||
292 | for (i = 0; i < dev_priv->num_fence_regs; i++) | |
293 | err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); | |
294 | ||
295 | for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) | |
296 | err_printf(m, " INSTDONE_%d: 0x%08x\n", i, | |
297 | error->extra_instdone[i]); | |
298 | ||
299 | if (INTEL_INFO(dev)->gen >= 6) { | |
300 | err_printf(m, "ERROR: 0x%08x\n", error->error); | |
301 | err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); | |
302 | } | |
303 | ||
304 | if (INTEL_INFO(dev)->gen == 7) | |
305 | err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); | |
306 | ||
307 | for_each_ring(ring, dev_priv, i) | |
308 | i915_ring_error_state(m, dev, error, i); | |
309 | ||
310 | if (error->active_bo) | |
311 | print_error_buffers(m, "Active", | |
95f5301d BW |
312 | error->active_bo[0], |
313 | error->active_bo_count[0]); | |
84734a04 MK |
314 | |
315 | if (error->pinned_bo) | |
316 | print_error_buffers(m, "Pinned", | |
95f5301d BW |
317 | error->pinned_bo[0], |
318 | error->pinned_bo_count[0]); | |
84734a04 MK |
319 | |
320 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { | |
321 | struct drm_i915_error_object *obj; | |
322 | ||
323 | if ((obj = error->ring[i].batchbuffer)) { | |
324 | err_printf(m, "%s --- gtt_offset = 0x%08x\n", | |
325 | dev_priv->ring[i].name, | |
326 | obj->gtt_offset); | |
327 | offset = 0; | |
328 | for (page = 0; page < obj->page_count; page++) { | |
329 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { | |
330 | err_printf(m, "%08x : %08x\n", offset, | |
331 | obj->pages[page][elt]); | |
332 | offset += 4; | |
333 | } | |
334 | } | |
335 | } | |
336 | ||
337 | if (error->ring[i].num_requests) { | |
338 | err_printf(m, "%s --- %d requests\n", | |
339 | dev_priv->ring[i].name, | |
340 | error->ring[i].num_requests); | |
341 | for (j = 0; j < error->ring[i].num_requests; j++) { | |
342 | err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", | |
343 | error->ring[i].requests[j].seqno, | |
344 | error->ring[i].requests[j].jiffies, | |
345 | error->ring[i].requests[j].tail); | |
346 | } | |
347 | } | |
348 | ||
349 | if ((obj = error->ring[i].ringbuffer)) { | |
350 | err_printf(m, "%s --- ringbuffer = 0x%08x\n", | |
351 | dev_priv->ring[i].name, | |
352 | obj->gtt_offset); | |
353 | offset = 0; | |
354 | for (page = 0; page < obj->page_count; page++) { | |
355 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { | |
356 | err_printf(m, "%08x : %08x\n", | |
357 | offset, | |
358 | obj->pages[page][elt]); | |
359 | offset += 4; | |
360 | } | |
361 | } | |
362 | } | |
363 | ||
364 | obj = error->ring[i].ctx; | |
365 | if (obj) { | |
366 | err_printf(m, "%s --- HW Context = 0x%08x\n", | |
367 | dev_priv->ring[i].name, | |
368 | obj->gtt_offset); | |
369 | offset = 0; | |
370 | for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { | |
371 | err_printf(m, "[%04x] %08x %08x %08x %08x\n", | |
372 | offset, | |
373 | obj->pages[0][elt], | |
374 | obj->pages[0][elt+1], | |
375 | obj->pages[0][elt+2], | |
376 | obj->pages[0][elt+3]); | |
377 | offset += 16; | |
378 | } | |
379 | } | |
380 | } | |
381 | ||
382 | if (error->overlay) | |
383 | intel_overlay_print_error_state(m, error->overlay); | |
384 | ||
385 | if (error->display) | |
386 | intel_display_print_error_state(m, dev, error->display); | |
387 | ||
388 | out: | |
389 | if (m->bytes == 0 && m->err) | |
390 | return m->err; | |
391 | ||
392 | return 0; | |
393 | } | |
394 | ||
395 | int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, | |
396 | size_t count, loff_t pos) | |
397 | { | |
398 | memset(ebuf, 0, sizeof(*ebuf)); | |
399 | ||
400 | /* We need to have enough room to store any i915_error_state printf | |
401 | * so that we can move it to start position. | |
402 | */ | |
403 | ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; | |
404 | ebuf->buf = kmalloc(ebuf->size, | |
405 | GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); | |
406 | ||
407 | if (ebuf->buf == NULL) { | |
408 | ebuf->size = PAGE_SIZE; | |
409 | ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); | |
410 | } | |
411 | ||
412 | if (ebuf->buf == NULL) { | |
413 | ebuf->size = 128; | |
414 | ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); | |
415 | } | |
416 | ||
417 | if (ebuf->buf == NULL) | |
418 | return -ENOMEM; | |
419 | ||
420 | ebuf->start = pos; | |
421 | ||
422 | return 0; | |
423 | } | |
424 | ||
425 | static void i915_error_object_free(struct drm_i915_error_object *obj) | |
426 | { | |
427 | int page; | |
428 | ||
429 | if (obj == NULL) | |
430 | return; | |
431 | ||
432 | for (page = 0; page < obj->page_count; page++) | |
433 | kfree(obj->pages[page]); | |
434 | ||
435 | kfree(obj); | |
436 | } | |
437 | ||
438 | static void i915_error_state_free(struct kref *error_ref) | |
439 | { | |
440 | struct drm_i915_error_state *error = container_of(error_ref, | |
441 | typeof(*error), ref); | |
442 | int i; | |
443 | ||
444 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { | |
445 | i915_error_object_free(error->ring[i].batchbuffer); | |
446 | i915_error_object_free(error->ring[i].ringbuffer); | |
447 | i915_error_object_free(error->ring[i].ctx); | |
448 | kfree(error->ring[i].requests); | |
449 | } | |
450 | ||
451 | kfree(error->active_bo); | |
452 | kfree(error->overlay); | |
453 | kfree(error->display); | |
454 | kfree(error); | |
455 | } | |
456 | ||
457 | static struct drm_i915_error_object * | |
458 | i915_error_object_create_sized(struct drm_i915_private *dev_priv, | |
459 | struct drm_i915_gem_object *src, | |
460 | const int num_pages) | |
461 | { | |
462 | struct drm_i915_error_object *dst; | |
463 | int i; | |
464 | u32 reloc_offset; | |
465 | ||
466 | if (src == NULL || src->pages == NULL) | |
467 | return NULL; | |
468 | ||
469 | dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); | |
470 | if (dst == NULL) | |
471 | return NULL; | |
472 | ||
473 | reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src); | |
474 | for (i = 0; i < num_pages; i++) { | |
475 | unsigned long flags; | |
476 | void *d; | |
477 | ||
478 | d = kmalloc(PAGE_SIZE, GFP_ATOMIC); | |
479 | if (d == NULL) | |
480 | goto unwind; | |
481 | ||
482 | local_irq_save(flags); | |
483 | if (reloc_offset < dev_priv->gtt.mappable_end && | |
484 | src->has_global_gtt_mapping) { | |
485 | void __iomem *s; | |
486 | ||
487 | /* Simply ignore tiling or any overlapping fence. | |
488 | * It's part of the error state, and this hopefully | |
489 | * captures what the GPU read. | |
490 | */ | |
491 | ||
492 | s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, | |
493 | reloc_offset); | |
494 | memcpy_fromio(d, s, PAGE_SIZE); | |
495 | io_mapping_unmap_atomic(s); | |
496 | } else if (src->stolen) { | |
497 | unsigned long offset; | |
498 | ||
499 | offset = dev_priv->mm.stolen_base; | |
500 | offset += src->stolen->start; | |
501 | offset += i << PAGE_SHIFT; | |
502 | ||
503 | memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); | |
504 | } else { | |
505 | struct page *page; | |
506 | void *s; | |
507 | ||
508 | page = i915_gem_object_get_page(src, i); | |
509 | ||
510 | drm_clflush_pages(&page, 1); | |
511 | ||
512 | s = kmap_atomic(page); | |
513 | memcpy(d, s, PAGE_SIZE); | |
514 | kunmap_atomic(s); | |
515 | ||
516 | drm_clflush_pages(&page, 1); | |
517 | } | |
518 | local_irq_restore(flags); | |
519 | ||
520 | dst->pages[i] = d; | |
521 | ||
522 | reloc_offset += PAGE_SIZE; | |
523 | } | |
524 | dst->page_count = num_pages; | |
525 | ||
526 | return dst; | |
527 | ||
528 | unwind: | |
529 | while (i--) | |
530 | kfree(dst->pages[i]); | |
531 | kfree(dst); | |
532 | return NULL; | |
533 | } | |
534 | #define i915_error_object_create(dev_priv, src) \ | |
535 | i915_error_object_create_sized((dev_priv), (src), \ | |
536 | (src)->base.size>>PAGE_SHIFT) | |
537 | ||
538 | static void capture_bo(struct drm_i915_error_buffer *err, | |
539 | struct drm_i915_gem_object *obj) | |
540 | { | |
541 | err->size = obj->base.size; | |
542 | err->name = obj->base.name; | |
543 | err->rseqno = obj->last_read_seqno; | |
544 | err->wseqno = obj->last_write_seqno; | |
545 | err->gtt_offset = i915_gem_obj_ggtt_offset(obj); | |
546 | err->read_domains = obj->base.read_domains; | |
547 | err->write_domain = obj->base.write_domain; | |
548 | err->fence_reg = obj->fence_reg; | |
549 | err->pinned = 0; | |
550 | if (obj->pin_count > 0) | |
551 | err->pinned = 1; | |
552 | if (obj->user_pin_count > 0) | |
553 | err->pinned = -1; | |
554 | err->tiling = obj->tiling_mode; | |
555 | err->dirty = obj->dirty; | |
556 | err->purgeable = obj->madv != I915_MADV_WILLNEED; | |
557 | err->ring = obj->ring ? obj->ring->id : -1; | |
558 | err->cache_level = obj->cache_level; | |
559 | } | |
560 | ||
561 | static u32 capture_active_bo(struct drm_i915_error_buffer *err, | |
562 | int count, struct list_head *head) | |
563 | { | |
ca191b13 | 564 | struct i915_vma *vma; |
84734a04 MK |
565 | int i = 0; |
566 | ||
ca191b13 BW |
567 | list_for_each_entry(vma, head, mm_list) { |
568 | capture_bo(err++, vma->obj); | |
84734a04 MK |
569 | if (++i == count) |
570 | break; | |
571 | } | |
572 | ||
573 | return i; | |
574 | } | |
575 | ||
576 | static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, | |
577 | int count, struct list_head *head) | |
578 | { | |
579 | struct drm_i915_gem_object *obj; | |
580 | int i = 0; | |
581 | ||
582 | list_for_each_entry(obj, head, global_list) { | |
583 | if (obj->pin_count == 0) | |
584 | continue; | |
585 | ||
586 | capture_bo(err++, obj); | |
587 | if (++i == count) | |
588 | break; | |
589 | } | |
590 | ||
591 | return i; | |
592 | } | |
593 | ||
594 | static void i915_gem_record_fences(struct drm_device *dev, | |
595 | struct drm_i915_error_state *error) | |
596 | { | |
597 | struct drm_i915_private *dev_priv = dev->dev_private; | |
598 | int i; | |
599 | ||
600 | /* Fences */ | |
601 | switch (INTEL_INFO(dev)->gen) { | |
602 | case 7: | |
603 | case 6: | |
604 | for (i = 0; i < dev_priv->num_fence_regs; i++) | |
605 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | |
606 | break; | |
607 | case 5: | |
608 | case 4: | |
609 | for (i = 0; i < 16; i++) | |
610 | error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | |
611 | break; | |
612 | case 3: | |
613 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | |
614 | for (i = 0; i < 8; i++) | |
615 | error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | |
616 | case 2: | |
617 | for (i = 0; i < 8; i++) | |
618 | error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | |
619 | break; | |
620 | ||
621 | default: | |
622 | BUG(); | |
623 | } | |
624 | } | |
625 | ||
626 | static struct drm_i915_error_object * | |
627 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | |
628 | struct intel_ring_buffer *ring) | |
629 | { | |
ca191b13 BW |
630 | struct i915_address_space *vm; |
631 | struct i915_vma *vma; | |
84734a04 MK |
632 | struct drm_i915_gem_object *obj; |
633 | u32 seqno; | |
634 | ||
635 | if (!ring->get_seqno) | |
636 | return NULL; | |
637 | ||
638 | if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { | |
639 | u32 acthd = I915_READ(ACTHD); | |
640 | ||
641 | if (WARN_ON(ring->id != RCS)) | |
642 | return NULL; | |
643 | ||
0d1aacac | 644 | obj = ring->scratch.obj; |
84734a04 MK |
645 | if (acthd >= i915_gem_obj_ggtt_offset(obj) && |
646 | acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) | |
647 | return i915_error_object_create(dev_priv, obj); | |
648 | } | |
649 | ||
650 | seqno = ring->get_seqno(ring, false); | |
ca191b13 BW |
651 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
652 | list_for_each_entry(vma, &vm->active_list, mm_list) { | |
653 | obj = vma->obj; | |
654 | if (obj->ring != ring) | |
655 | continue; | |
84734a04 | 656 | |
ca191b13 BW |
657 | if (i915_seqno_passed(seqno, obj->last_read_seqno)) |
658 | continue; | |
84734a04 | 659 | |
ca191b13 BW |
660 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) |
661 | continue; | |
84734a04 | 662 | |
ca191b13 BW |
663 | /* We need to copy these to an anonymous buffer as the simplest |
664 | * method to avoid being overwritten by userspace. | |
665 | */ | |
666 | return i915_error_object_create(dev_priv, obj); | |
667 | } | |
84734a04 MK |
668 | } |
669 | ||
670 | return NULL; | |
671 | } | |
672 | ||
673 | static void i915_record_ring_state(struct drm_device *dev, | |
674 | struct drm_i915_error_state *error, | |
675 | struct intel_ring_buffer *ring) | |
676 | { | |
677 | struct drm_i915_private *dev_priv = dev->dev_private; | |
678 | ||
679 | if (INTEL_INFO(dev)->gen >= 6) { | |
680 | error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); | |
681 | error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); | |
682 | error->semaphore_mboxes[ring->id][0] | |
683 | = I915_READ(RING_SYNC_0(ring->mmio_base)); | |
684 | error->semaphore_mboxes[ring->id][1] | |
685 | = I915_READ(RING_SYNC_1(ring->mmio_base)); | |
686 | error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; | |
687 | error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; | |
688 | } | |
689 | ||
4e5aabfd BW |
690 | if (HAS_VEBOX(dev)) { |
691 | error->semaphore_mboxes[ring->id][2] = | |
692 | I915_READ(RING_SYNC_2(ring->mmio_base)); | |
693 | error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2]; | |
694 | } | |
695 | ||
84734a04 MK |
696 | if (INTEL_INFO(dev)->gen >= 4) { |
697 | error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); | |
698 | error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); | |
699 | error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); | |
700 | error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); | |
701 | error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); | |
702 | if (ring->id == RCS) | |
703 | error->bbaddr = I915_READ64(BB_ADDR); | |
704 | } else { | |
705 | error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); | |
706 | error->ipeir[ring->id] = I915_READ(IPEIR); | |
707 | error->ipehr[ring->id] = I915_READ(IPEHR); | |
708 | error->instdone[ring->id] = I915_READ(INSTDONE); | |
709 | } | |
710 | ||
711 | error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); | |
712 | error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); | |
713 | error->seqno[ring->id] = ring->get_seqno(ring, false); | |
714 | error->acthd[ring->id] = intel_ring_get_active_head(ring); | |
715 | error->head[ring->id] = I915_READ_HEAD(ring); | |
716 | error->tail[ring->id] = I915_READ_TAIL(ring); | |
717 | error->ctl[ring->id] = I915_READ_CTL(ring); | |
718 | ||
719 | error->cpu_ring_head[ring->id] = ring->head; | |
720 | error->cpu_ring_tail[ring->id] = ring->tail; | |
721 | } | |
722 | ||
723 | ||
724 | static void i915_gem_record_active_context(struct intel_ring_buffer *ring, | |
725 | struct drm_i915_error_state *error, | |
726 | struct drm_i915_error_ring *ering) | |
727 | { | |
728 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
729 | struct drm_i915_gem_object *obj; | |
730 | ||
731 | /* Currently render ring is the only HW context user */ | |
732 | if (ring->id != RCS || !error->ccid) | |
733 | return; | |
734 | ||
735 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { | |
736 | if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { | |
737 | ering->ctx = i915_error_object_create_sized(dev_priv, | |
738 | obj, 1); | |
739 | break; | |
740 | } | |
741 | } | |
742 | } | |
743 | ||
744 | static void i915_gem_record_rings(struct drm_device *dev, | |
745 | struct drm_i915_error_state *error) | |
746 | { | |
747 | struct drm_i915_private *dev_priv = dev->dev_private; | |
748 | struct intel_ring_buffer *ring; | |
749 | struct drm_i915_gem_request *request; | |
750 | int i, count; | |
751 | ||
752 | for_each_ring(ring, dev_priv, i) { | |
753 | i915_record_ring_state(dev, error, ring); | |
754 | ||
755 | error->ring[i].batchbuffer = | |
756 | i915_error_first_batchbuffer(dev_priv, ring); | |
757 | ||
758 | error->ring[i].ringbuffer = | |
759 | i915_error_object_create(dev_priv, ring->obj); | |
760 | ||
761 | ||
762 | i915_gem_record_active_context(ring, error, &error->ring[i]); | |
763 | ||
764 | count = 0; | |
765 | list_for_each_entry(request, &ring->request_list, list) | |
766 | count++; | |
767 | ||
768 | error->ring[i].num_requests = count; | |
769 | error->ring[i].requests = | |
770 | kmalloc(count*sizeof(struct drm_i915_error_request), | |
771 | GFP_ATOMIC); | |
772 | if (error->ring[i].requests == NULL) { | |
773 | error->ring[i].num_requests = 0; | |
774 | continue; | |
775 | } | |
776 | ||
777 | count = 0; | |
778 | list_for_each_entry(request, &ring->request_list, list) { | |
779 | struct drm_i915_error_request *erq; | |
780 | ||
781 | erq = &error->ring[i].requests[count++]; | |
782 | erq->seqno = request->seqno; | |
783 | erq->jiffies = request->emitted_jiffies; | |
784 | erq->tail = request->tail; | |
785 | } | |
786 | } | |
787 | } | |
788 | ||
95f5301d BW |
789 | /* FIXME: Since pin count/bound list is global, we duplicate what we capture per |
790 | * VM. | |
791 | */ | |
792 | static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, | |
793 | struct drm_i915_error_state *error, | |
794 | struct i915_address_space *vm, | |
795 | const int ndx) | |
84734a04 | 796 | { |
95f5301d | 797 | struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL; |
84734a04 | 798 | struct drm_i915_gem_object *obj; |
95f5301d | 799 | struct i915_vma *vma; |
84734a04 MK |
800 | int i; |
801 | ||
802 | i = 0; | |
ca191b13 | 803 | list_for_each_entry(vma, &vm->active_list, mm_list) |
84734a04 | 804 | i++; |
95f5301d | 805 | error->active_bo_count[ndx] = i; |
84734a04 MK |
806 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) |
807 | if (obj->pin_count) | |
808 | i++; | |
95f5301d | 809 | error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; |
84734a04 MK |
810 | |
811 | if (i) { | |
95f5301d BW |
812 | active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC); |
813 | if (active_bo) | |
814 | pinned_bo = active_bo + error->active_bo_count[ndx]; | |
84734a04 MK |
815 | } |
816 | ||
95f5301d BW |
817 | if (active_bo) |
818 | error->active_bo_count[ndx] = | |
819 | capture_active_bo(active_bo, | |
820 | error->active_bo_count[ndx], | |
5cef07e1 | 821 | &vm->active_list); |
84734a04 | 822 | |
95f5301d BW |
823 | if (pinned_bo) |
824 | error->pinned_bo_count[ndx] = | |
825 | capture_pinned_bo(pinned_bo, | |
826 | error->pinned_bo_count[ndx], | |
84734a04 | 827 | &dev_priv->mm.bound_list); |
95f5301d BW |
828 | error->active_bo[ndx] = active_bo; |
829 | error->pinned_bo[ndx] = pinned_bo; | |
830 | } | |
831 | ||
832 | static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, | |
833 | struct drm_i915_error_state *error) | |
834 | { | |
835 | struct i915_address_space *vm; | |
836 | int cnt = 0, i = 0; | |
837 | ||
838 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) | |
839 | cnt++; | |
840 | ||
841 | if (WARN(cnt > 1, "Multiple VMs not yet supported\n")) | |
842 | cnt = 1; | |
843 | ||
844 | vm = &dev_priv->gtt.base; | |
845 | ||
846 | error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC); | |
847 | error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC); | |
848 | error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count), | |
849 | GFP_ATOMIC); | |
850 | error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count), | |
851 | GFP_ATOMIC); | |
852 | ||
853 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) | |
854 | i915_gem_capture_vm(dev_priv, error, vm, i++); | |
84734a04 MK |
855 | } |
856 | ||
857 | /** | |
858 | * i915_capture_error_state - capture an error record for later analysis | |
859 | * @dev: drm device | |
860 | * | |
861 | * Should be called when an error is detected (either a hang or an error | |
862 | * interrupt) to capture error state from the time of the error. Fills | |
863 | * out a structure which becomes available in debugfs for user level tools | |
864 | * to pick up. | |
865 | */ | |
866 | void i915_capture_error_state(struct drm_device *dev) | |
867 | { | |
868 | struct drm_i915_private *dev_priv = dev->dev_private; | |
869 | struct drm_i915_error_state *error; | |
870 | unsigned long flags; | |
871 | int pipe; | |
872 | ||
873 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); | |
874 | error = dev_priv->gpu_error.first_error; | |
875 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); | |
876 | if (error) | |
877 | return; | |
878 | ||
879 | /* Account for pipe specific data like PIPE*STAT */ | |
880 | error = kzalloc(sizeof(*error), GFP_ATOMIC); | |
881 | if (!error) { | |
882 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); | |
883 | return; | |
884 | } | |
885 | ||
886 | DRM_INFO("capturing error event; look for more information in " | |
887 | "/sys/class/drm/card%d/error\n", dev->primary->index); | |
888 | ||
889 | kref_init(&error->ref); | |
890 | error->eir = I915_READ(EIR); | |
891 | error->pgtbl_er = I915_READ(PGTBL_ER); | |
892 | if (HAS_HW_CONTEXTS(dev)) | |
893 | error->ccid = I915_READ(CCID); | |
894 | ||
895 | if (HAS_PCH_SPLIT(dev)) | |
896 | error->ier = I915_READ(DEIER) | I915_READ(GTIER); | |
897 | else if (IS_VALLEYVIEW(dev)) | |
898 | error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); | |
899 | else if (IS_GEN2(dev)) | |
900 | error->ier = I915_READ16(IER); | |
901 | else | |
902 | error->ier = I915_READ(IER); | |
903 | ||
904 | if (INTEL_INFO(dev)->gen >= 6) | |
905 | error->derrmr = I915_READ(DERRMR); | |
906 | ||
907 | if (IS_VALLEYVIEW(dev)) | |
908 | error->forcewake = I915_READ(FORCEWAKE_VLV); | |
909 | else if (INTEL_INFO(dev)->gen >= 7) | |
910 | error->forcewake = I915_READ(FORCEWAKE_MT); | |
911 | else if (INTEL_INFO(dev)->gen == 6) | |
912 | error->forcewake = I915_READ(FORCEWAKE); | |
913 | ||
914 | if (!HAS_PCH_SPLIT(dev)) | |
915 | for_each_pipe(pipe) | |
916 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); | |
917 | ||
918 | if (INTEL_INFO(dev)->gen >= 6) { | |
919 | error->error = I915_READ(ERROR_GEN6); | |
920 | error->done_reg = I915_READ(DONE_REG); | |
921 | } | |
922 | ||
923 | if (INTEL_INFO(dev)->gen == 7) | |
924 | error->err_int = I915_READ(GEN7_ERR_INT); | |
925 | ||
926 | i915_get_extra_instdone(dev, error->extra_instdone); | |
927 | ||
928 | i915_gem_capture_buffers(dev_priv, error); | |
929 | i915_gem_record_fences(dev, error); | |
930 | i915_gem_record_rings(dev, error); | |
931 | ||
932 | do_gettimeofday(&error->time); | |
933 | ||
934 | error->overlay = intel_overlay_capture_error_state(dev); | |
935 | error->display = intel_display_capture_error_state(dev); | |
936 | ||
937 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); | |
938 | if (dev_priv->gpu_error.first_error == NULL) { | |
939 | dev_priv->gpu_error.first_error = error; | |
940 | error = NULL; | |
941 | } | |
942 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); | |
943 | ||
944 | if (error) | |
945 | i915_error_state_free(&error->ref); | |
946 | } | |
947 | ||
948 | void i915_error_state_get(struct drm_device *dev, | |
949 | struct i915_error_state_file_priv *error_priv) | |
950 | { | |
951 | struct drm_i915_private *dev_priv = dev->dev_private; | |
952 | unsigned long flags; | |
953 | ||
954 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); | |
955 | error_priv->error = dev_priv->gpu_error.first_error; | |
956 | if (error_priv->error) | |
957 | kref_get(&error_priv->error->ref); | |
958 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); | |
959 | ||
960 | } | |
961 | ||
962 | void i915_error_state_put(struct i915_error_state_file_priv *error_priv) | |
963 | { | |
964 | if (error_priv->error) | |
965 | kref_put(&error_priv->error->ref, i915_error_state_free); | |
966 | } | |
967 | ||
968 | void i915_destroy_error_state(struct drm_device *dev) | |
969 | { | |
970 | struct drm_i915_private *dev_priv = dev->dev_private; | |
971 | struct drm_i915_error_state *error; | |
972 | unsigned long flags; | |
973 | ||
974 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); | |
975 | error = dev_priv->gpu_error.first_error; | |
976 | dev_priv->gpu_error.first_error = NULL; | |
977 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); | |
978 | ||
979 | if (error) | |
980 | kref_put(&error->ref, i915_error_state_free); | |
981 | } | |
982 | ||
983 | const char *i915_cache_level_str(int type) | |
984 | { | |
985 | switch (type) { | |
986 | case I915_CACHE_NONE: return " uncached"; | |
350ec881 CW |
987 | case I915_CACHE_LLC: return " snooped or LLC"; |
988 | case I915_CACHE_L3_LLC: return " L3+LLC"; | |
84734a04 MK |
989 | default: return ""; |
990 | } | |
991 | } | |
992 | ||
993 | /* NB: please notice the memset */ | |
994 | void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) | |
995 | { | |
996 | struct drm_i915_private *dev_priv = dev->dev_private; | |
997 | memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); | |
998 | ||
999 | switch (INTEL_INFO(dev)->gen) { | |
1000 | case 2: | |
1001 | case 3: | |
1002 | instdone[0] = I915_READ(INSTDONE); | |
1003 | break; | |
1004 | case 4: | |
1005 | case 5: | |
1006 | case 6: | |
1007 | instdone[0] = I915_READ(INSTDONE_I965); | |
1008 | instdone[1] = I915_READ(INSTDONE1); | |
1009 | break; | |
1010 | default: | |
1011 | WARN_ONCE(1, "Unsupported platform\n"); | |
1012 | case 7: | |
1013 | instdone[0] = I915_READ(GEN7_INSTDONE_1); | |
1014 | instdone[1] = I915_READ(GEN7_SC_INSTDONE); | |
1015 | instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); | |
1016 | instdone[3] = I915_READ(GEN7_ROW_INSTDONE); | |
1017 | break; | |
1018 | } | |
1019 | } |