drm/i915: Report the deferred free list in debugfs
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
CommitLineData
2017263e
BG
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include <linux/seq_file.h>
f3cd474b 30#include <linux/debugfs.h>
5a0e3ad6 31#include <linux/slab.h>
2017263e
BG
32#include "drmP.h"
33#include "drm.h"
4e5359cd 34#include "intel_drv.h"
2017263e
BG
35#include "i915_drm.h"
36#include "i915_drv.h"
37
38#define DRM_I915_RING_DEBUG 1
39
40
41#if defined(CONFIG_DEBUG_FS)
42
f13d3f73
CW
43enum {
44 RENDER_LIST,
45 BSD_LIST,
46 FLUSHING_LIST,
47 INACTIVE_LIST,
d21d5975
CW
48 PINNED_LIST,
49 DEFERRED_FREE_LIST,
f13d3f73 50};
2017263e 51
70d39fe4
CW
52static const char *yesno(int v)
53{
54 return v ? "yes" : "no";
55}
56
57static int i915_capabilities(struct seq_file *m, void *data)
58{
59 struct drm_info_node *node = (struct drm_info_node *) m->private;
60 struct drm_device *dev = node->minor->dev;
61 const struct intel_device_info *info = INTEL_INFO(dev);
62
63 seq_printf(m, "gen: %d\n", info->gen);
64#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65 B(is_mobile);
70d39fe4
CW
66 B(is_i85x);
67 B(is_i915g);
70d39fe4 68 B(is_i945gm);
70d39fe4
CW
69 B(is_g33);
70 B(need_gfx_hws);
71 B(is_g4x);
72 B(is_pineview);
73 B(is_broadwater);
74 B(is_crestline);
75 B(is_ironlake);
76 B(has_fbc);
77 B(has_rc6);
78 B(has_pipe_cxsr);
79 B(has_hotplug);
80 B(cursor_needs_physical);
81 B(has_overlay);
82 B(overlay_needs_physical);
a6c45cf0 83 B(supports_tv);
70d39fe4
CW
84#undef B
85
86 return 0;
87}
88
a6172a80
CW
89static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
90{
91 if (obj_priv->user_pin_count > 0)
92 return "P";
93 else if (obj_priv->pin_count > 0)
94 return "p";
95 else
96 return " ";
97}
98
99static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
100{
101 switch (obj_priv->tiling_mode) {
102 default:
103 case I915_TILING_NONE: return " ";
104 case I915_TILING_X: return "X";
105 case I915_TILING_Y: return "Y";
106 }
107}
108
37811fcc
CW
109static void
110describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
111{
112 seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
113 &obj->base,
114 get_pin_flag(obj),
115 get_tiling_flag(obj),
116 obj->base.size,
117 obj->base.read_domains,
118 obj->base.write_domain,
119 obj->last_rendering_seqno,
120 obj->dirty ? " dirty" : "",
121 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
122 if (obj->base.name)
123 seq_printf(m, " (name: %d)", obj->base.name);
124 if (obj->fence_reg != I915_FENCE_REG_NONE)
125 seq_printf(m, " (fence: %d)", obj->fence_reg);
126 if (obj->gtt_space != NULL)
127 seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
128}
129
433e12f7 130static int i915_gem_object_list_info(struct seq_file *m, void *data)
2017263e
BG
131{
132 struct drm_info_node *node = (struct drm_info_node *) m->private;
433e12f7
BG
133 uintptr_t list = (uintptr_t) node->info_ent->data;
134 struct list_head *head;
2017263e
BG
135 struct drm_device *dev = node->minor->dev;
136 drm_i915_private_t *dev_priv = dev->dev_private;
137 struct drm_i915_gem_object *obj_priv;
de227ef0
CW
138 int ret;
139
140 ret = mutex_lock_interruptible(&dev->struct_mutex);
141 if (ret)
142 return ret;
2017263e 143
433e12f7 144 switch (list) {
82690bba
CW
145 case RENDER_LIST:
146 seq_printf(m, "Render:\n");
852835f3 147 head = &dev_priv->render_ring.active_list;
433e12f7 148 break;
82690bba
CW
149 case BSD_LIST:
150 seq_printf(m, "BSD:\n");
151 head = &dev_priv->bsd_ring.active_list;
152 break;
433e12f7 153 case INACTIVE_LIST:
a17458fc 154 seq_printf(m, "Inactive:\n");
433e12f7
BG
155 head = &dev_priv->mm.inactive_list;
156 break;
f13d3f73
CW
157 case PINNED_LIST:
158 seq_printf(m, "Pinned:\n");
159 head = &dev_priv->mm.pinned_list;
160 break;
433e12f7
BG
161 case FLUSHING_LIST:
162 seq_printf(m, "Flushing:\n");
163 head = &dev_priv->mm.flushing_list;
164 break;
d21d5975
CW
165 case DEFERRED_FREE_LIST:
166 seq_printf(m, "Deferred free:\n");
167 head = &dev_priv->mm.deferred_free_list;
168 break;
433e12f7 169 default:
de227ef0
CW
170 mutex_unlock(&dev->struct_mutex);
171 return -EINVAL;
2017263e 172 }
2017263e 173
de227ef0 174 list_for_each_entry(obj_priv, head, list) {
37811fcc
CW
175 seq_printf(m, " ");
176 describe_obj(m, obj_priv);
f4ceda89 177 seq_printf(m, "\n");
2017263e 178 }
5e118f41 179
de227ef0 180 mutex_unlock(&dev->struct_mutex);
2017263e
BG
181 return 0;
182}
183
4e5359cd
SF
184static int i915_gem_pageflip_info(struct seq_file *m, void *data)
185{
186 struct drm_info_node *node = (struct drm_info_node *) m->private;
187 struct drm_device *dev = node->minor->dev;
188 unsigned long flags;
189 struct intel_crtc *crtc;
190
191 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
192 const char *pipe = crtc->pipe ? "B" : "A";
193 const char *plane = crtc->plane ? "B" : "A";
194 struct intel_unpin_work *work;
195
196 spin_lock_irqsave(&dev->event_lock, flags);
197 work = crtc->unpin_work;
198 if (work == NULL) {
199 seq_printf(m, "No flip due on pipe %s (plane %s)\n",
200 pipe, plane);
201 } else {
202 if (!work->pending) {
203 seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
204 pipe, plane);
205 } else {
206 seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
207 pipe, plane);
208 }
209 if (work->enable_stall_check)
210 seq_printf(m, "Stall check enabled, ");
211 else
212 seq_printf(m, "Stall check waiting for page flip ioctl, ");
213 seq_printf(m, "%d prepares\n", work->pending);
214
215 if (work->old_fb_obj) {
216 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
217 if(obj_priv)
218 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
219 }
220 if (work->pending_flip_obj) {
221 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
222 if(obj_priv)
223 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
224 }
225 }
226 spin_unlock_irqrestore(&dev->event_lock, flags);
227 }
228
229 return 0;
230}
231
2017263e
BG
232static int i915_gem_request_info(struct seq_file *m, void *data)
233{
234 struct drm_info_node *node = (struct drm_info_node *) m->private;
235 struct drm_device *dev = node->minor->dev;
236 drm_i915_private_t *dev_priv = dev->dev_private;
237 struct drm_i915_gem_request *gem_request;
de227ef0
CW
238 int ret;
239
240 ret = mutex_lock_interruptible(&dev->struct_mutex);
241 if (ret)
242 return ret;
2017263e
BG
243
244 seq_printf(m, "Request:\n");
852835f3
ZN
245 list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
246 list) {
2017263e
BG
247 seq_printf(m, " %d @ %d\n",
248 gem_request->seqno,
249 (int) (jiffies - gem_request->emitted_jiffies));
250 }
de227ef0
CW
251 mutex_unlock(&dev->struct_mutex);
252
2017263e
BG
253 return 0;
254}
255
256static int i915_gem_seqno_info(struct seq_file *m, void *data)
257{
258 struct drm_info_node *node = (struct drm_info_node *) m->private;
259 struct drm_device *dev = node->minor->dev;
260 drm_i915_private_t *dev_priv = dev->dev_private;
de227ef0
CW
261 int ret;
262
263 ret = mutex_lock_interruptible(&dev->struct_mutex);
264 if (ret)
265 return ret;
2017263e 266
e20f9c64 267 if (dev_priv->render_ring.status_page.page_addr != NULL) {
2017263e 268 seq_printf(m, "Current sequence: %d\n",
f787a5f5 269 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
2017263e
BG
270 } else {
271 seq_printf(m, "Current sequence: hws uninitialized\n");
272 }
273 seq_printf(m, "Waiter sequence: %d\n",
274 dev_priv->mm.waiting_gem_seqno);
275 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
de227ef0
CW
276
277 mutex_unlock(&dev->struct_mutex);
278
2017263e
BG
279 return 0;
280}
281
282
283static int i915_interrupt_info(struct seq_file *m, void *data)
284{
285 struct drm_info_node *node = (struct drm_info_node *) m->private;
286 struct drm_device *dev = node->minor->dev;
287 drm_i915_private_t *dev_priv = dev->dev_private;
de227ef0
CW
288 int ret;
289
290 ret = mutex_lock_interruptible(&dev->struct_mutex);
291 if (ret)
292 return ret;
2017263e 293
bad720ff 294 if (!HAS_PCH_SPLIT(dev)) {
5f6a1695
ZW
295 seq_printf(m, "Interrupt enable: %08x\n",
296 I915_READ(IER));
297 seq_printf(m, "Interrupt identity: %08x\n",
298 I915_READ(IIR));
299 seq_printf(m, "Interrupt mask: %08x\n",
300 I915_READ(IMR));
301 seq_printf(m, "Pipe A stat: %08x\n",
302 I915_READ(PIPEASTAT));
303 seq_printf(m, "Pipe B stat: %08x\n",
304 I915_READ(PIPEBSTAT));
305 } else {
306 seq_printf(m, "North Display Interrupt enable: %08x\n",
307 I915_READ(DEIER));
308 seq_printf(m, "North Display Interrupt identity: %08x\n",
309 I915_READ(DEIIR));
310 seq_printf(m, "North Display Interrupt mask: %08x\n",
311 I915_READ(DEIMR));
312 seq_printf(m, "South Display Interrupt enable: %08x\n",
313 I915_READ(SDEIER));
314 seq_printf(m, "South Display Interrupt identity: %08x\n",
315 I915_READ(SDEIIR));
316 seq_printf(m, "South Display Interrupt mask: %08x\n",
317 I915_READ(SDEIMR));
318 seq_printf(m, "Graphics Interrupt enable: %08x\n",
319 I915_READ(GTIER));
320 seq_printf(m, "Graphics Interrupt identity: %08x\n",
321 I915_READ(GTIIR));
322 seq_printf(m, "Graphics Interrupt mask: %08x\n",
323 I915_READ(GTIMR));
324 }
2017263e
BG
325 seq_printf(m, "Interrupts received: %d\n",
326 atomic_read(&dev_priv->irq_received));
e20f9c64 327 if (dev_priv->render_ring.status_page.page_addr != NULL) {
2017263e 328 seq_printf(m, "Current sequence: %d\n",
f787a5f5 329 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
2017263e
BG
330 } else {
331 seq_printf(m, "Current sequence: hws uninitialized\n");
332 }
333 seq_printf(m, "Waiter sequence: %d\n",
334 dev_priv->mm.waiting_gem_seqno);
335 seq_printf(m, "IRQ sequence: %d\n",
336 dev_priv->mm.irq_gem_seqno);
de227ef0
CW
337 mutex_unlock(&dev->struct_mutex);
338
2017263e
BG
339 return 0;
340}
341
a6172a80
CW
342static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
343{
344 struct drm_info_node *node = (struct drm_info_node *) m->private;
345 struct drm_device *dev = node->minor->dev;
346 drm_i915_private_t *dev_priv = dev->dev_private;
de227ef0
CW
347 int i, ret;
348
349 ret = mutex_lock_interruptible(&dev->struct_mutex);
350 if (ret)
351 return ret;
a6172a80
CW
352
353 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
354 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
355 for (i = 0; i < dev_priv->num_fence_regs; i++) {
356 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
357
358 if (obj == NULL) {
359 seq_printf(m, "Fenced object[%2d] = unused\n", i);
360 } else {
361 struct drm_i915_gem_object *obj_priv;
362
23010e43 363 obj_priv = to_intel_bo(obj);
a6172a80 364 seq_printf(m, "Fenced object[%2d] = %p: %s "
0b4d569d 365 "%08x %08zx %08x %s %08x %08x %d",
a6172a80
CW
366 i, obj, get_pin_flag(obj_priv),
367 obj_priv->gtt_offset,
368 obj->size, obj_priv->stride,
369 get_tiling_flag(obj_priv),
370 obj->read_domains, obj->write_domain,
371 obj_priv->last_rendering_seqno);
372 if (obj->name)
373 seq_printf(m, " (name: %d)", obj->name);
374 seq_printf(m, "\n");
375 }
376 }
de227ef0 377 mutex_unlock(&dev->struct_mutex);
a6172a80
CW
378
379 return 0;
380}
381
2017263e
BG
382static int i915_hws_info(struct seq_file *m, void *data)
383{
384 struct drm_info_node *node = (struct drm_info_node *) m->private;
385 struct drm_device *dev = node->minor->dev;
386 drm_i915_private_t *dev_priv = dev->dev_private;
387 int i;
388 volatile u32 *hws;
389
e20f9c64 390 hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
2017263e
BG
391 if (hws == NULL)
392 return 0;
393
394 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
395 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
396 i * 4,
397 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
398 }
399 return 0;
400}
401
6911a9b8
BG
402static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
403{
404 int page, i;
405 uint32_t *mem;
406
407 for (page = 0; page < page_count; page++) {
de227ef0 408 mem = kmap(pages[page]);
6911a9b8
BG
409 for (i = 0; i < PAGE_SIZE; i += 4)
410 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
de227ef0 411 kunmap(pages[page]);
6911a9b8
BG
412 }
413}
414
415static int i915_batchbuffer_info(struct seq_file *m, void *data)
416{
417 struct drm_info_node *node = (struct drm_info_node *) m->private;
418 struct drm_device *dev = node->minor->dev;
419 drm_i915_private_t *dev_priv = dev->dev_private;
420 struct drm_gem_object *obj;
421 struct drm_i915_gem_object *obj_priv;
422 int ret;
423
de227ef0
CW
424 ret = mutex_lock_interruptible(&dev->struct_mutex);
425 if (ret)
426 return ret;
6911a9b8 427
852835f3
ZN
428 list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
429 list) {
a8089e84 430 obj = &obj_priv->base;
6911a9b8 431 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
4bdadb97 432 ret = i915_gem_object_get_pages(obj, 0);
6911a9b8 433 if (ret) {
de227ef0 434 mutex_unlock(&dev->struct_mutex);
6911a9b8
BG
435 return ret;
436 }
437
438 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
439 i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
440
441 i915_gem_object_put_pages(obj);
442 }
443 }
444
de227ef0 445 mutex_unlock(&dev->struct_mutex);
6911a9b8
BG
446
447 return 0;
448}
449
450static int i915_ringbuffer_data(struct seq_file *m, void *data)
451{
452 struct drm_info_node *node = (struct drm_info_node *) m->private;
453 struct drm_device *dev = node->minor->dev;
454 drm_i915_private_t *dev_priv = dev->dev_private;
de227ef0
CW
455 int ret;
456
457 ret = mutex_lock_interruptible(&dev->struct_mutex);
458 if (ret)
459 return ret;
6911a9b8 460
8187a2b7 461 if (!dev_priv->render_ring.gem_object) {
6911a9b8 462 seq_printf(m, "No ringbuffer setup\n");
de227ef0
CW
463 } else {
464 u8 *virt = dev_priv->render_ring.virtual_start;
465 uint32_t off;
6911a9b8 466
de227ef0
CW
467 for (off = 0; off < dev_priv->render_ring.size; off += 4) {
468 uint32_t *ptr = (uint32_t *)(virt + off);
469 seq_printf(m, "%08x : %08x\n", off, *ptr);
470 }
6911a9b8 471 }
de227ef0 472 mutex_unlock(&dev->struct_mutex);
6911a9b8
BG
473
474 return 0;
475}
476
477static int i915_ringbuffer_info(struct seq_file *m, void *data)
478{
479 struct drm_info_node *node = (struct drm_info_node *) m->private;
480 struct drm_device *dev = node->minor->dev;
481 drm_i915_private_t *dev_priv = dev->dev_private;
0ef82af7 482 unsigned int head, tail;
6911a9b8
BG
483
484 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
485 tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
6911a9b8
BG
486
487 seq_printf(m, "RingHead : %08x\n", head);
488 seq_printf(m, "RingTail : %08x\n", tail);
8187a2b7 489 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
a6c45cf0 490 seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
6911a9b8
BG
491
492 return 0;
493}
494
9df30794
CW
495static const char *pin_flag(int pinned)
496{
497 if (pinned > 0)
498 return " P";
499 else if (pinned < 0)
500 return " p";
501 else
502 return "";
503}
504
505static const char *tiling_flag(int tiling)
506{
507 switch (tiling) {
508 default:
509 case I915_TILING_NONE: return "";
510 case I915_TILING_X: return " X";
511 case I915_TILING_Y: return " Y";
512 }
513}
514
515static const char *dirty_flag(int dirty)
516{
517 return dirty ? " dirty" : "";
518}
519
520static const char *purgeable_flag(int purgeable)
521{
522 return purgeable ? " purgeable" : "";
523}
524
63eeaf38
JB
525static int i915_error_state(struct seq_file *m, void *unused)
526{
527 struct drm_info_node *node = (struct drm_info_node *) m->private;
528 struct drm_device *dev = node->minor->dev;
529 drm_i915_private_t *dev_priv = dev->dev_private;
530 struct drm_i915_error_state *error;
531 unsigned long flags;
9df30794 532 int i, page, offset, elt;
63eeaf38
JB
533
534 spin_lock_irqsave(&dev_priv->error_lock, flags);
535 if (!dev_priv->first_error) {
536 seq_printf(m, "no error state collected\n");
537 goto out;
538 }
539
540 error = dev_priv->first_error;
541
8a905236
JB
542 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
543 error->time.tv_usec);
9df30794 544 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
63eeaf38
JB
545 seq_printf(m, "EIR: 0x%08x\n", error->eir);
546 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
547 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
548 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
549 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
550 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
551 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
a6c45cf0 552 if (INTEL_INFO(dev)->gen >= 4) {
63eeaf38
JB
553 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
554 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
555 }
9df30794
CW
556 seq_printf(m, "seqno: 0x%08x\n", error->seqno);
557
558 if (error->active_bo_count) {
559 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
560
561 for (i = 0; i < error->active_bo_count; i++) {
562 seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
563 error->active_bo[i].gtt_offset,
564 error->active_bo[i].size,
565 error->active_bo[i].read_domains,
566 error->active_bo[i].write_domain,
567 error->active_bo[i].seqno,
568 pin_flag(error->active_bo[i].pinned),
569 tiling_flag(error->active_bo[i].tiling),
570 dirty_flag(error->active_bo[i].dirty),
571 purgeable_flag(error->active_bo[i].purgeable));
572
573 if (error->active_bo[i].name)
574 seq_printf(m, " (name: %d)", error->active_bo[i].name);
575 if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
576 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
577
578 seq_printf(m, "\n");
579 }
580 }
581
582 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
583 if (error->batchbuffer[i]) {
584 struct drm_i915_error_object *obj = error->batchbuffer[i];
585
586 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
587 offset = 0;
588 for (page = 0; page < obj->page_count; page++) {
589 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
590 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
591 offset += 4;
592 }
593 }
594 }
595 }
596
597 if (error->ringbuffer) {
598 struct drm_i915_error_object *obj = error->ringbuffer;
599
600 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
601 offset = 0;
602 for (page = 0; page < obj->page_count; page++) {
603 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
604 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
605 offset += 4;
606 }
607 }
608 }
63eeaf38 609
6ef3d427
CW
610 if (error->overlay)
611 intel_overlay_print_error_state(m, error->overlay);
612
63eeaf38
JB
613out:
614 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
615
616 return 0;
617}
6911a9b8 618
f97108d1
JB
619static int i915_rstdby_delays(struct seq_file *m, void *unused)
620{
621 struct drm_info_node *node = (struct drm_info_node *) m->private;
622 struct drm_device *dev = node->minor->dev;
623 drm_i915_private_t *dev_priv = dev->dev_private;
624 u16 crstanddelay = I915_READ16(CRSTANDVID);
625
626 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
627
628 return 0;
629}
630
631static int i915_cur_delayinfo(struct seq_file *m, void *unused)
632{
633 struct drm_info_node *node = (struct drm_info_node *) m->private;
634 struct drm_device *dev = node->minor->dev;
635 drm_i915_private_t *dev_priv = dev->dev_private;
636 u16 rgvswctl = I915_READ16(MEMSWCTL);
7648fa99 637 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
f97108d1 638
7648fa99
JB
639 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
640 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
641 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
642 MEMSTAT_VID_SHIFT);
643 seq_printf(m, "Current P-state: %d\n",
644 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
f97108d1
JB
645
646 return 0;
647}
648
649static int i915_delayfreq_table(struct seq_file *m, void *unused)
650{
651 struct drm_info_node *node = (struct drm_info_node *) m->private;
652 struct drm_device *dev = node->minor->dev;
653 drm_i915_private_t *dev_priv = dev->dev_private;
654 u32 delayfreq;
655 int i;
656
657 for (i = 0; i < 16; i++) {
658 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
7648fa99
JB
659 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
660 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
f97108d1
JB
661 }
662
663 return 0;
664}
665
666static inline int MAP_TO_MV(int map)
667{
668 return 1250 - (map * 25);
669}
670
671static int i915_inttoext_table(struct seq_file *m, void *unused)
672{
673 struct drm_info_node *node = (struct drm_info_node *) m->private;
674 struct drm_device *dev = node->minor->dev;
675 drm_i915_private_t *dev_priv = dev->dev_private;
676 u32 inttoext;
677 int i;
678
679 for (i = 1; i <= 32; i++) {
680 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
681 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
682 }
683
684 return 0;
685}
686
687static int i915_drpc_info(struct seq_file *m, void *unused)
688{
689 struct drm_info_node *node = (struct drm_info_node *) m->private;
690 struct drm_device *dev = node->minor->dev;
691 drm_i915_private_t *dev_priv = dev->dev_private;
692 u32 rgvmodectl = I915_READ(MEMMODECTL);
7648fa99
JB
693 u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
694 u16 crstandvid = I915_READ16(CRSTANDVID);
f97108d1
JB
695
696 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
697 "yes" : "no");
698 seq_printf(m, "Boost freq: %d\n",
699 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
700 MEMMODE_BOOST_FREQ_SHIFT);
701 seq_printf(m, "HW control enabled: %s\n",
702 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
703 seq_printf(m, "SW control enabled: %s\n",
704 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
705 seq_printf(m, "Gated voltage change: %s\n",
706 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
707 seq_printf(m, "Starting frequency: P%d\n",
708 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
7648fa99 709 seq_printf(m, "Max P-state: P%d\n",
f97108d1 710 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
7648fa99
JB
711 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
712 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
713 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
714 seq_printf(m, "Render standby enabled: %s\n",
715 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
f97108d1
JB
716
717 return 0;
718}
719
b5e50c3f
JB
720static int i915_fbc_status(struct seq_file *m, void *unused)
721{
722 struct drm_info_node *node = (struct drm_info_node *) m->private;
723 struct drm_device *dev = node->minor->dev;
b5e50c3f 724 drm_i915_private_t *dev_priv = dev->dev_private;
b5e50c3f 725
ee5382ae 726 if (!I915_HAS_FBC(dev)) {
b5e50c3f
JB
727 seq_printf(m, "FBC unsupported on this chipset\n");
728 return 0;
729 }
730
ee5382ae 731 if (intel_fbc_enabled(dev)) {
b5e50c3f
JB
732 seq_printf(m, "FBC enabled\n");
733 } else {
734 seq_printf(m, "FBC disabled: ");
735 switch (dev_priv->no_fbc_reason) {
bed4a673
CW
736 case FBC_NO_OUTPUT:
737 seq_printf(m, "no outputs");
738 break;
b5e50c3f
JB
739 case FBC_STOLEN_TOO_SMALL:
740 seq_printf(m, "not enough stolen memory");
741 break;
742 case FBC_UNSUPPORTED_MODE:
743 seq_printf(m, "mode not supported");
744 break;
745 case FBC_MODE_TOO_LARGE:
746 seq_printf(m, "mode too large");
747 break;
748 case FBC_BAD_PLANE:
749 seq_printf(m, "FBC unsupported on plane");
750 break;
751 case FBC_NOT_TILED:
752 seq_printf(m, "scanout buffer not tiled");
753 break;
9c928d16
JB
754 case FBC_MULTIPLE_PIPES:
755 seq_printf(m, "multiple pipes are enabled");
756 break;
b5e50c3f
JB
757 default:
758 seq_printf(m, "unknown reason");
759 }
760 seq_printf(m, "\n");
761 }
762 return 0;
763}
764
4a9bef37
JB
765static int i915_sr_status(struct seq_file *m, void *unused)
766{
767 struct drm_info_node *node = (struct drm_info_node *) m->private;
768 struct drm_device *dev = node->minor->dev;
769 drm_i915_private_t *dev_priv = dev->dev_private;
770 bool sr_enabled = false;
771
5ba2aaaa
CW
772 if (IS_IRONLAKE(dev))
773 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
a6c45cf0 774 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
4a9bef37
JB
775 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
776 else if (IS_I915GM(dev))
777 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
778 else if (IS_PINEVIEW(dev))
779 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
780
5ba2aaaa
CW
781 seq_printf(m, "self-refresh: %s\n",
782 sr_enabled ? "enabled" : "disabled");
4a9bef37
JB
783
784 return 0;
785}
786
7648fa99
JB
787static int i915_emon_status(struct seq_file *m, void *unused)
788{
789 struct drm_info_node *node = (struct drm_info_node *) m->private;
790 struct drm_device *dev = node->minor->dev;
791 drm_i915_private_t *dev_priv = dev->dev_private;
792 unsigned long temp, chipset, gfx;
de227ef0
CW
793 int ret;
794
795 ret = mutex_lock_interruptible(&dev->struct_mutex);
796 if (ret)
797 return ret;
7648fa99
JB
798
799 temp = i915_mch_val(dev_priv);
800 chipset = i915_chipset_val(dev_priv);
801 gfx = i915_gfx_val(dev_priv);
de227ef0 802 mutex_unlock(&dev->struct_mutex);
7648fa99
JB
803
804 seq_printf(m, "GMCH temp: %ld\n", temp);
805 seq_printf(m, "Chipset power: %ld\n", chipset);
806 seq_printf(m, "GFX power: %ld\n", gfx);
807 seq_printf(m, "Total power: %ld\n", chipset + gfx);
808
809 return 0;
810}
811
812static int i915_gfxec(struct seq_file *m, void *unused)
813{
814 struct drm_info_node *node = (struct drm_info_node *) m->private;
815 struct drm_device *dev = node->minor->dev;
816 drm_i915_private_t *dev_priv = dev->dev_private;
817
818 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
819
820 return 0;
821}
822
44834a67
CW
823static int i915_opregion(struct seq_file *m, void *unused)
824{
825 struct drm_info_node *node = (struct drm_info_node *) m->private;
826 struct drm_device *dev = node->minor->dev;
827 drm_i915_private_t *dev_priv = dev->dev_private;
828 struct intel_opregion *opregion = &dev_priv->opregion;
829 int ret;
830
831 ret = mutex_lock_interruptible(&dev->struct_mutex);
832 if (ret)
833 return ret;
834
835 if (opregion->header)
836 seq_write(m, opregion->header, OPREGION_SIZE);
837
838 mutex_unlock(&dev->struct_mutex);
839
840 return 0;
841}
842
37811fcc
CW
843static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
844{
845 struct drm_info_node *node = (struct drm_info_node *) m->private;
846 struct drm_device *dev = node->minor->dev;
847 drm_i915_private_t *dev_priv = dev->dev_private;
848 struct intel_fbdev *ifbdev;
849 struct intel_framebuffer *fb;
850 int ret;
851
852 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
853 if (ret)
854 return ret;
855
856 ifbdev = dev_priv->fbdev;
857 fb = to_intel_framebuffer(ifbdev->helper.fb);
858
859 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
860 fb->base.width,
861 fb->base.height,
862 fb->base.depth,
863 fb->base.bits_per_pixel);
864 describe_obj(m, to_intel_bo(fb->obj));
865 seq_printf(m, "\n");
866
867 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
868 if (&fb->base == ifbdev->helper.fb)
869 continue;
870
871 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
872 fb->base.width,
873 fb->base.height,
874 fb->base.depth,
875 fb->base.bits_per_pixel);
876 describe_obj(m, to_intel_bo(fb->obj));
877 seq_printf(m, "\n");
878 }
879
880 mutex_unlock(&dev->mode_config.mutex);
881
882 return 0;
883}
884
f3cd474b
CW
885static int
886i915_wedged_open(struct inode *inode,
887 struct file *filp)
888{
889 filp->private_data = inode->i_private;
890 return 0;
891}
892
893static ssize_t
894i915_wedged_read(struct file *filp,
895 char __user *ubuf,
896 size_t max,
897 loff_t *ppos)
898{
899 struct drm_device *dev = filp->private_data;
900 drm_i915_private_t *dev_priv = dev->dev_private;
901 char buf[80];
902 int len;
903
904 len = snprintf(buf, sizeof (buf),
905 "wedged : %d\n",
906 atomic_read(&dev_priv->mm.wedged));
907
f4433a8d
DC
908 if (len > sizeof (buf))
909 len = sizeof (buf);
910
f3cd474b
CW
911 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
912}
913
914static ssize_t
915i915_wedged_write(struct file *filp,
916 const char __user *ubuf,
917 size_t cnt,
918 loff_t *ppos)
919{
920 struct drm_device *dev = filp->private_data;
921 drm_i915_private_t *dev_priv = dev->dev_private;
922 char buf[20];
923 int val = 1;
924
925 if (cnt > 0) {
926 if (cnt > sizeof (buf) - 1)
927 return -EINVAL;
928
929 if (copy_from_user(buf, ubuf, cnt))
930 return -EFAULT;
931 buf[cnt] = 0;
932
933 val = simple_strtoul(buf, NULL, 0);
934 }
935
936 DRM_INFO("Manually setting wedged to %d\n", val);
937
938 atomic_set(&dev_priv->mm.wedged, val);
939 if (val) {
f787a5f5 940 wake_up_all(&dev_priv->irq_queue);
f3cd474b
CW
941 queue_work(dev_priv->wq, &dev_priv->error_work);
942 }
943
944 return cnt;
945}
946
947static const struct file_operations i915_wedged_fops = {
948 .owner = THIS_MODULE,
949 .open = i915_wedged_open,
950 .read = i915_wedged_read,
951 .write = i915_wedged_write,
952};
953
954/* As the drm_debugfs_init() routines are called before dev->dev_private is
955 * allocated we need to hook into the minor for release. */
956static int
957drm_add_fake_info_node(struct drm_minor *minor,
958 struct dentry *ent,
959 const void *key)
960{
961 struct drm_info_node *node;
962
963 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
964 if (node == NULL) {
965 debugfs_remove(ent);
966 return -ENOMEM;
967 }
968
969 node->minor = minor;
970 node->dent = ent;
971 node->info_ent = (void *) key;
972 list_add(&node->list, &minor->debugfs_nodes.list);
973
974 return 0;
975}
976
977static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
978{
979 struct drm_device *dev = minor->dev;
980 struct dentry *ent;
981
982 ent = debugfs_create_file("i915_wedged",
983 S_IRUGO | S_IWUSR,
984 root, dev,
985 &i915_wedged_fops);
986 if (IS_ERR(ent))
987 return PTR_ERR(ent);
988
989 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
990}
9e3a6d15 991
27c202ad 992static struct drm_info_list i915_debugfs_list[] = {
70d39fe4 993 {"i915_capabilities", i915_capabilities, 0, 0},
82690bba
CW
994 {"i915_gem_render_active", i915_gem_object_list_info, 0, (void *) RENDER_LIST},
995 {"i915_gem_bsd_active", i915_gem_object_list_info, 0, (void *) BSD_LIST},
433e12f7
BG
996 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
997 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
f13d3f73 998 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
d21d5975 999 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
4e5359cd 1000 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2017263e
BG
1001 {"i915_gem_request", i915_gem_request_info, 0},
1002 {"i915_gem_seqno", i915_gem_seqno_info, 0},
a6172a80 1003 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2017263e
BG
1004 {"i915_gem_interrupt", i915_interrupt_info, 0},
1005 {"i915_gem_hws", i915_hws_info, 0},
6911a9b8
BG
1006 {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
1007 {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
1008 {"i915_batchbuffers", i915_batchbuffer_info, 0},
63eeaf38 1009 {"i915_error_state", i915_error_state, 0},
f97108d1
JB
1010 {"i915_rstdby_delays", i915_rstdby_delays, 0},
1011 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1012 {"i915_delayfreq_table", i915_delayfreq_table, 0},
1013 {"i915_inttoext_table", i915_inttoext_table, 0},
1014 {"i915_drpc_info", i915_drpc_info, 0},
7648fa99
JB
1015 {"i915_emon_status", i915_emon_status, 0},
1016 {"i915_gfxec", i915_gfxec, 0},
b5e50c3f 1017 {"i915_fbc_status", i915_fbc_status, 0},
4a9bef37 1018 {"i915_sr_status", i915_sr_status, 0},
44834a67 1019 {"i915_opregion", i915_opregion, 0},
37811fcc 1020 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2017263e 1021};
27c202ad 1022#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2017263e 1023
27c202ad 1024int i915_debugfs_init(struct drm_minor *minor)
2017263e 1025{
f3cd474b
CW
1026 int ret;
1027
1028 ret = i915_wedged_create(minor->debugfs_root, minor);
1029 if (ret)
1030 return ret;
1031
27c202ad
BG
1032 return drm_debugfs_create_files(i915_debugfs_list,
1033 I915_DEBUGFS_ENTRIES,
2017263e
BG
1034 minor->debugfs_root, minor);
1035}
1036
27c202ad 1037void i915_debugfs_cleanup(struct drm_minor *minor)
2017263e 1038{
27c202ad
BG
1039 drm_debugfs_remove_files(i915_debugfs_list,
1040 I915_DEBUGFS_ENTRIES, minor);
33db679b
KH
1041 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1042 1, minor);
2017263e
BG
1043}
1044
1045#endif /* CONFIG_DEBUG_FS */
This page took 0.336342 seconds and 5 git commands to generate.