drm/i915/tv: After disabling the pipe, use wait_for_vblank_off()
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
CommitLineData
2017263e
BG
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include <linux/seq_file.h>
f3cd474b 30#include <linux/debugfs.h>
5a0e3ad6 31#include <linux/slab.h>
2017263e
BG
32#include "drmP.h"
33#include "drm.h"
4e5359cd 34#include "intel_drv.h"
2017263e
BG
35#include "i915_drm.h"
36#include "i915_drv.h"
37
38#define DRM_I915_RING_DEBUG 1
39
40
41#if defined(CONFIG_DEBUG_FS)
42
433e12f7
BG
43#define ACTIVE_LIST 1
44#define FLUSHING_LIST 2
45#define INACTIVE_LIST 3
2017263e 46
a6172a80
CW
47static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
48{
49 if (obj_priv->user_pin_count > 0)
50 return "P";
51 else if (obj_priv->pin_count > 0)
52 return "p";
53 else
54 return " ";
55}
56
57static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
58{
59 switch (obj_priv->tiling_mode) {
60 default:
61 case I915_TILING_NONE: return " ";
62 case I915_TILING_X: return "X";
63 case I915_TILING_Y: return "Y";
64 }
65}
66
433e12f7 67static int i915_gem_object_list_info(struct seq_file *m, void *data)
2017263e
BG
68{
69 struct drm_info_node *node = (struct drm_info_node *) m->private;
433e12f7
BG
70 uintptr_t list = (uintptr_t) node->info_ent->data;
71 struct list_head *head;
2017263e
BG
72 struct drm_device *dev = node->minor->dev;
73 drm_i915_private_t *dev_priv = dev->dev_private;
74 struct drm_i915_gem_object *obj_priv;
de227ef0
CW
75 int ret;
76
77 ret = mutex_lock_interruptible(&dev->struct_mutex);
78 if (ret)
79 return ret;
2017263e 80
433e12f7
BG
81 switch (list) {
82 case ACTIVE_LIST:
83 seq_printf(m, "Active:\n");
852835f3 84 head = &dev_priv->render_ring.active_list;
433e12f7
BG
85 break;
86 case INACTIVE_LIST:
a17458fc 87 seq_printf(m, "Inactive:\n");
433e12f7
BG
88 head = &dev_priv->mm.inactive_list;
89 break;
90 case FLUSHING_LIST:
91 seq_printf(m, "Flushing:\n");
92 head = &dev_priv->mm.flushing_list;
93 break;
94 default:
de227ef0
CW
95 mutex_unlock(&dev->struct_mutex);
96 return -EINVAL;
2017263e 97 }
2017263e 98
de227ef0 99 list_for_each_entry(obj_priv, head, list) {
fcffb947 100 seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
a8089e84 101 &obj_priv->base,
a6172a80 102 get_pin_flag(obj_priv),
a8089e84
DV
103 obj_priv->base.size,
104 obj_priv->base.read_domains,
105 obj_priv->base.write_domain,
725ceaa0 106 obj_priv->last_rendering_seqno,
fcffb947
CW
107 obj_priv->dirty ? " dirty" : "",
108 obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
f4ceda89 109
a8089e84
DV
110 if (obj_priv->base.name)
111 seq_printf(m, " (name: %d)", obj_priv->base.name);
f4ceda89 112 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
a01c75b3
BG
113 seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
114 if (obj_priv->gtt_space != NULL)
115 seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
116
f4ceda89 117 seq_printf(m, "\n");
2017263e 118 }
5e118f41 119
de227ef0 120 mutex_unlock(&dev->struct_mutex);
2017263e
BG
121 return 0;
122}
123
4e5359cd
SF
124static int i915_gem_pageflip_info(struct seq_file *m, void *data)
125{
126 struct drm_info_node *node = (struct drm_info_node *) m->private;
127 struct drm_device *dev = node->minor->dev;
128 unsigned long flags;
129 struct intel_crtc *crtc;
130
131 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
132 const char *pipe = crtc->pipe ? "B" : "A";
133 const char *plane = crtc->plane ? "B" : "A";
134 struct intel_unpin_work *work;
135
136 spin_lock_irqsave(&dev->event_lock, flags);
137 work = crtc->unpin_work;
138 if (work == NULL) {
139 seq_printf(m, "No flip due on pipe %s (plane %s)\n",
140 pipe, plane);
141 } else {
142 if (!work->pending) {
143 seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
144 pipe, plane);
145 } else {
146 seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
147 pipe, plane);
148 }
149 if (work->enable_stall_check)
150 seq_printf(m, "Stall check enabled, ");
151 else
152 seq_printf(m, "Stall check waiting for page flip ioctl, ");
153 seq_printf(m, "%d prepares\n", work->pending);
154
155 if (work->old_fb_obj) {
156 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
157 if(obj_priv)
158 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
159 }
160 if (work->pending_flip_obj) {
161 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
162 if(obj_priv)
163 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
164 }
165 }
166 spin_unlock_irqrestore(&dev->event_lock, flags);
167 }
168
169 return 0;
170}
171
2017263e
BG
172static int i915_gem_request_info(struct seq_file *m, void *data)
173{
174 struct drm_info_node *node = (struct drm_info_node *) m->private;
175 struct drm_device *dev = node->minor->dev;
176 drm_i915_private_t *dev_priv = dev->dev_private;
177 struct drm_i915_gem_request *gem_request;
de227ef0
CW
178 int ret;
179
180 ret = mutex_lock_interruptible(&dev->struct_mutex);
181 if (ret)
182 return ret;
2017263e
BG
183
184 seq_printf(m, "Request:\n");
852835f3
ZN
185 list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
186 list) {
2017263e
BG
187 seq_printf(m, " %d @ %d\n",
188 gem_request->seqno,
189 (int) (jiffies - gem_request->emitted_jiffies));
190 }
de227ef0
CW
191 mutex_unlock(&dev->struct_mutex);
192
2017263e
BG
193 return 0;
194}
195
196static int i915_gem_seqno_info(struct seq_file *m, void *data)
197{
198 struct drm_info_node *node = (struct drm_info_node *) m->private;
199 struct drm_device *dev = node->minor->dev;
200 drm_i915_private_t *dev_priv = dev->dev_private;
de227ef0
CW
201 int ret;
202
203 ret = mutex_lock_interruptible(&dev->struct_mutex);
204 if (ret)
205 return ret;
2017263e 206
e20f9c64 207 if (dev_priv->render_ring.status_page.page_addr != NULL) {
2017263e 208 seq_printf(m, "Current sequence: %d\n",
852835f3 209 i915_get_gem_seqno(dev, &dev_priv->render_ring));
2017263e
BG
210 } else {
211 seq_printf(m, "Current sequence: hws uninitialized\n");
212 }
213 seq_printf(m, "Waiter sequence: %d\n",
214 dev_priv->mm.waiting_gem_seqno);
215 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
de227ef0
CW
216
217 mutex_unlock(&dev->struct_mutex);
218
2017263e
BG
219 return 0;
220}
221
222
223static int i915_interrupt_info(struct seq_file *m, void *data)
224{
225 struct drm_info_node *node = (struct drm_info_node *) m->private;
226 struct drm_device *dev = node->minor->dev;
227 drm_i915_private_t *dev_priv = dev->dev_private;
de227ef0
CW
228 int ret;
229
230 ret = mutex_lock_interruptible(&dev->struct_mutex);
231 if (ret)
232 return ret;
2017263e 233
bad720ff 234 if (!HAS_PCH_SPLIT(dev)) {
5f6a1695
ZW
235 seq_printf(m, "Interrupt enable: %08x\n",
236 I915_READ(IER));
237 seq_printf(m, "Interrupt identity: %08x\n",
238 I915_READ(IIR));
239 seq_printf(m, "Interrupt mask: %08x\n",
240 I915_READ(IMR));
241 seq_printf(m, "Pipe A stat: %08x\n",
242 I915_READ(PIPEASTAT));
243 seq_printf(m, "Pipe B stat: %08x\n",
244 I915_READ(PIPEBSTAT));
245 } else {
246 seq_printf(m, "North Display Interrupt enable: %08x\n",
247 I915_READ(DEIER));
248 seq_printf(m, "North Display Interrupt identity: %08x\n",
249 I915_READ(DEIIR));
250 seq_printf(m, "North Display Interrupt mask: %08x\n",
251 I915_READ(DEIMR));
252 seq_printf(m, "South Display Interrupt enable: %08x\n",
253 I915_READ(SDEIER));
254 seq_printf(m, "South Display Interrupt identity: %08x\n",
255 I915_READ(SDEIIR));
256 seq_printf(m, "South Display Interrupt mask: %08x\n",
257 I915_READ(SDEIMR));
258 seq_printf(m, "Graphics Interrupt enable: %08x\n",
259 I915_READ(GTIER));
260 seq_printf(m, "Graphics Interrupt identity: %08x\n",
261 I915_READ(GTIIR));
262 seq_printf(m, "Graphics Interrupt mask: %08x\n",
263 I915_READ(GTIMR));
264 }
2017263e
BG
265 seq_printf(m, "Interrupts received: %d\n",
266 atomic_read(&dev_priv->irq_received));
e20f9c64 267 if (dev_priv->render_ring.status_page.page_addr != NULL) {
2017263e 268 seq_printf(m, "Current sequence: %d\n",
852835f3 269 i915_get_gem_seqno(dev, &dev_priv->render_ring));
2017263e
BG
270 } else {
271 seq_printf(m, "Current sequence: hws uninitialized\n");
272 }
273 seq_printf(m, "Waiter sequence: %d\n",
274 dev_priv->mm.waiting_gem_seqno);
275 seq_printf(m, "IRQ sequence: %d\n",
276 dev_priv->mm.irq_gem_seqno);
de227ef0
CW
277 mutex_unlock(&dev->struct_mutex);
278
2017263e
BG
279 return 0;
280}
281
a6172a80
CW
282static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
283{
284 struct drm_info_node *node = (struct drm_info_node *) m->private;
285 struct drm_device *dev = node->minor->dev;
286 drm_i915_private_t *dev_priv = dev->dev_private;
de227ef0
CW
287 int i, ret;
288
289 ret = mutex_lock_interruptible(&dev->struct_mutex);
290 if (ret)
291 return ret;
a6172a80
CW
292
293 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
294 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
295 for (i = 0; i < dev_priv->num_fence_regs; i++) {
296 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
297
298 if (obj == NULL) {
299 seq_printf(m, "Fenced object[%2d] = unused\n", i);
300 } else {
301 struct drm_i915_gem_object *obj_priv;
302
23010e43 303 obj_priv = to_intel_bo(obj);
a6172a80 304 seq_printf(m, "Fenced object[%2d] = %p: %s "
0b4d569d 305 "%08x %08zx %08x %s %08x %08x %d",
a6172a80
CW
306 i, obj, get_pin_flag(obj_priv),
307 obj_priv->gtt_offset,
308 obj->size, obj_priv->stride,
309 get_tiling_flag(obj_priv),
310 obj->read_domains, obj->write_domain,
311 obj_priv->last_rendering_seqno);
312 if (obj->name)
313 seq_printf(m, " (name: %d)", obj->name);
314 seq_printf(m, "\n");
315 }
316 }
de227ef0 317 mutex_unlock(&dev->struct_mutex);
a6172a80
CW
318
319 return 0;
320}
321
2017263e
BG
322static int i915_hws_info(struct seq_file *m, void *data)
323{
324 struct drm_info_node *node = (struct drm_info_node *) m->private;
325 struct drm_device *dev = node->minor->dev;
326 drm_i915_private_t *dev_priv = dev->dev_private;
327 int i;
328 volatile u32 *hws;
329
e20f9c64 330 hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
2017263e
BG
331 if (hws == NULL)
332 return 0;
333
334 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
335 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
336 i * 4,
337 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
338 }
339 return 0;
340}
341
6911a9b8
BG
342static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
343{
344 int page, i;
345 uint32_t *mem;
346
347 for (page = 0; page < page_count; page++) {
de227ef0 348 mem = kmap(pages[page]);
6911a9b8
BG
349 for (i = 0; i < PAGE_SIZE; i += 4)
350 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
de227ef0 351 kunmap(pages[page]);
6911a9b8
BG
352 }
353}
354
355static int i915_batchbuffer_info(struct seq_file *m, void *data)
356{
357 struct drm_info_node *node = (struct drm_info_node *) m->private;
358 struct drm_device *dev = node->minor->dev;
359 drm_i915_private_t *dev_priv = dev->dev_private;
360 struct drm_gem_object *obj;
361 struct drm_i915_gem_object *obj_priv;
362 int ret;
363
de227ef0
CW
364 ret = mutex_lock_interruptible(&dev->struct_mutex);
365 if (ret)
366 return ret;
6911a9b8 367
852835f3
ZN
368 list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
369 list) {
a8089e84 370 obj = &obj_priv->base;
6911a9b8 371 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
4bdadb97 372 ret = i915_gem_object_get_pages(obj, 0);
6911a9b8 373 if (ret) {
de227ef0 374 mutex_unlock(&dev->struct_mutex);
6911a9b8
BG
375 return ret;
376 }
377
378 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
379 i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
380
381 i915_gem_object_put_pages(obj);
382 }
383 }
384
de227ef0 385 mutex_unlock(&dev->struct_mutex);
6911a9b8
BG
386
387 return 0;
388}
389
390static int i915_ringbuffer_data(struct seq_file *m, void *data)
391{
392 struct drm_info_node *node = (struct drm_info_node *) m->private;
393 struct drm_device *dev = node->minor->dev;
394 drm_i915_private_t *dev_priv = dev->dev_private;
de227ef0
CW
395 int ret;
396
397 ret = mutex_lock_interruptible(&dev->struct_mutex);
398 if (ret)
399 return ret;
6911a9b8 400
8187a2b7 401 if (!dev_priv->render_ring.gem_object) {
6911a9b8 402 seq_printf(m, "No ringbuffer setup\n");
de227ef0
CW
403 } else {
404 u8 *virt = dev_priv->render_ring.virtual_start;
405 uint32_t off;
6911a9b8 406
de227ef0
CW
407 for (off = 0; off < dev_priv->render_ring.size; off += 4) {
408 uint32_t *ptr = (uint32_t *)(virt + off);
409 seq_printf(m, "%08x : %08x\n", off, *ptr);
410 }
6911a9b8 411 }
de227ef0 412 mutex_unlock(&dev->struct_mutex);
6911a9b8
BG
413
414 return 0;
415}
416
417static int i915_ringbuffer_info(struct seq_file *m, void *data)
418{
419 struct drm_info_node *node = (struct drm_info_node *) m->private;
420 struct drm_device *dev = node->minor->dev;
421 drm_i915_private_t *dev_priv = dev->dev_private;
0ef82af7 422 unsigned int head, tail;
6911a9b8
BG
423
424 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
425 tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
6911a9b8
BG
426
427 seq_printf(m, "RingHead : %08x\n", head);
428 seq_printf(m, "RingTail : %08x\n", tail);
8187a2b7 429 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
76cff81a 430 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
6911a9b8
BG
431
432 return 0;
433}
434
9df30794
CW
435static const char *pin_flag(int pinned)
436{
437 if (pinned > 0)
438 return " P";
439 else if (pinned < 0)
440 return " p";
441 else
442 return "";
443}
444
445static const char *tiling_flag(int tiling)
446{
447 switch (tiling) {
448 default:
449 case I915_TILING_NONE: return "";
450 case I915_TILING_X: return " X";
451 case I915_TILING_Y: return " Y";
452 }
453}
454
455static const char *dirty_flag(int dirty)
456{
457 return dirty ? " dirty" : "";
458}
459
460static const char *purgeable_flag(int purgeable)
461{
462 return purgeable ? " purgeable" : "";
463}
464
63eeaf38
JB
465static int i915_error_state(struct seq_file *m, void *unused)
466{
467 struct drm_info_node *node = (struct drm_info_node *) m->private;
468 struct drm_device *dev = node->minor->dev;
469 drm_i915_private_t *dev_priv = dev->dev_private;
470 struct drm_i915_error_state *error;
471 unsigned long flags;
9df30794 472 int i, page, offset, elt;
63eeaf38
JB
473
474 spin_lock_irqsave(&dev_priv->error_lock, flags);
475 if (!dev_priv->first_error) {
476 seq_printf(m, "no error state collected\n");
477 goto out;
478 }
479
480 error = dev_priv->first_error;
481
8a905236
JB
482 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
483 error->time.tv_usec);
9df30794 484 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
63eeaf38
JB
485 seq_printf(m, "EIR: 0x%08x\n", error->eir);
486 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
487 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
488 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
489 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
490 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
491 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
492 if (IS_I965G(dev)) {
493 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
494 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
495 }
9df30794
CW
496 seq_printf(m, "seqno: 0x%08x\n", error->seqno);
497
498 if (error->active_bo_count) {
499 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
500
501 for (i = 0; i < error->active_bo_count; i++) {
502 seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
503 error->active_bo[i].gtt_offset,
504 error->active_bo[i].size,
505 error->active_bo[i].read_domains,
506 error->active_bo[i].write_domain,
507 error->active_bo[i].seqno,
508 pin_flag(error->active_bo[i].pinned),
509 tiling_flag(error->active_bo[i].tiling),
510 dirty_flag(error->active_bo[i].dirty),
511 purgeable_flag(error->active_bo[i].purgeable));
512
513 if (error->active_bo[i].name)
514 seq_printf(m, " (name: %d)", error->active_bo[i].name);
515 if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
516 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
517
518 seq_printf(m, "\n");
519 }
520 }
521
522 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
523 if (error->batchbuffer[i]) {
524 struct drm_i915_error_object *obj = error->batchbuffer[i];
525
526 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
527 offset = 0;
528 for (page = 0; page < obj->page_count; page++) {
529 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
530 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
531 offset += 4;
532 }
533 }
534 }
535 }
536
537 if (error->ringbuffer) {
538 struct drm_i915_error_object *obj = error->ringbuffer;
539
540 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
541 offset = 0;
542 for (page = 0; page < obj->page_count; page++) {
543 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
544 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
545 offset += 4;
546 }
547 }
548 }
63eeaf38 549
6ef3d427
CW
550 if (error->overlay)
551 intel_overlay_print_error_state(m, error->overlay);
552
63eeaf38
JB
553out:
554 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
555
556 return 0;
557}
6911a9b8 558
f97108d1
JB
559static int i915_rstdby_delays(struct seq_file *m, void *unused)
560{
561 struct drm_info_node *node = (struct drm_info_node *) m->private;
562 struct drm_device *dev = node->minor->dev;
563 drm_i915_private_t *dev_priv = dev->dev_private;
564 u16 crstanddelay = I915_READ16(CRSTANDVID);
565
566 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
567
568 return 0;
569}
570
571static int i915_cur_delayinfo(struct seq_file *m, void *unused)
572{
573 struct drm_info_node *node = (struct drm_info_node *) m->private;
574 struct drm_device *dev = node->minor->dev;
575 drm_i915_private_t *dev_priv = dev->dev_private;
576 u16 rgvswctl = I915_READ16(MEMSWCTL);
7648fa99 577 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
f97108d1 578
7648fa99
JB
579 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
580 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
581 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
582 MEMSTAT_VID_SHIFT);
583 seq_printf(m, "Current P-state: %d\n",
584 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
f97108d1
JB
585
586 return 0;
587}
588
589static int i915_delayfreq_table(struct seq_file *m, void *unused)
590{
591 struct drm_info_node *node = (struct drm_info_node *) m->private;
592 struct drm_device *dev = node->minor->dev;
593 drm_i915_private_t *dev_priv = dev->dev_private;
594 u32 delayfreq;
595 int i;
596
597 for (i = 0; i < 16; i++) {
598 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
7648fa99
JB
599 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
600 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
f97108d1
JB
601 }
602
603 return 0;
604}
605
606static inline int MAP_TO_MV(int map)
607{
608 return 1250 - (map * 25);
609}
610
611static int i915_inttoext_table(struct seq_file *m, void *unused)
612{
613 struct drm_info_node *node = (struct drm_info_node *) m->private;
614 struct drm_device *dev = node->minor->dev;
615 drm_i915_private_t *dev_priv = dev->dev_private;
616 u32 inttoext;
617 int i;
618
619 for (i = 1; i <= 32; i++) {
620 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
621 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
622 }
623
624 return 0;
625}
626
627static int i915_drpc_info(struct seq_file *m, void *unused)
628{
629 struct drm_info_node *node = (struct drm_info_node *) m->private;
630 struct drm_device *dev = node->minor->dev;
631 drm_i915_private_t *dev_priv = dev->dev_private;
632 u32 rgvmodectl = I915_READ(MEMMODECTL);
7648fa99
JB
633 u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
634 u16 crstandvid = I915_READ16(CRSTANDVID);
f97108d1
JB
635
636 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
637 "yes" : "no");
638 seq_printf(m, "Boost freq: %d\n",
639 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
640 MEMMODE_BOOST_FREQ_SHIFT);
641 seq_printf(m, "HW control enabled: %s\n",
642 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
643 seq_printf(m, "SW control enabled: %s\n",
644 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
645 seq_printf(m, "Gated voltage change: %s\n",
646 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
647 seq_printf(m, "Starting frequency: P%d\n",
648 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
7648fa99 649 seq_printf(m, "Max P-state: P%d\n",
f97108d1 650 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
7648fa99
JB
651 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
652 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
653 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
654 seq_printf(m, "Render standby enabled: %s\n",
655 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
f97108d1
JB
656
657 return 0;
658}
659
b5e50c3f
JB
660static int i915_fbc_status(struct seq_file *m, void *unused)
661{
662 struct drm_info_node *node = (struct drm_info_node *) m->private;
663 struct drm_device *dev = node->minor->dev;
b5e50c3f 664 drm_i915_private_t *dev_priv = dev->dev_private;
b5e50c3f 665
ee5382ae 666 if (!I915_HAS_FBC(dev)) {
b5e50c3f
JB
667 seq_printf(m, "FBC unsupported on this chipset\n");
668 return 0;
669 }
670
ee5382ae 671 if (intel_fbc_enabled(dev)) {
b5e50c3f
JB
672 seq_printf(m, "FBC enabled\n");
673 } else {
674 seq_printf(m, "FBC disabled: ");
675 switch (dev_priv->no_fbc_reason) {
676 case FBC_STOLEN_TOO_SMALL:
677 seq_printf(m, "not enough stolen memory");
678 break;
679 case FBC_UNSUPPORTED_MODE:
680 seq_printf(m, "mode not supported");
681 break;
682 case FBC_MODE_TOO_LARGE:
683 seq_printf(m, "mode too large");
684 break;
685 case FBC_BAD_PLANE:
686 seq_printf(m, "FBC unsupported on plane");
687 break;
688 case FBC_NOT_TILED:
689 seq_printf(m, "scanout buffer not tiled");
690 break;
9c928d16
JB
691 case FBC_MULTIPLE_PIPES:
692 seq_printf(m, "multiple pipes are enabled");
693 break;
b5e50c3f
JB
694 default:
695 seq_printf(m, "unknown reason");
696 }
697 seq_printf(m, "\n");
698 }
699 return 0;
700}
701
4a9bef37
JB
702static int i915_sr_status(struct seq_file *m, void *unused)
703{
704 struct drm_info_node *node = (struct drm_info_node *) m->private;
705 struct drm_device *dev = node->minor->dev;
706 drm_i915_private_t *dev_priv = dev->dev_private;
707 bool sr_enabled = false;
708
adcdbc66 709 if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
4a9bef37
JB
710 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
711 else if (IS_I915GM(dev))
712 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
713 else if (IS_PINEVIEW(dev))
714 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
715
716 seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
717 "disabled");
718
719 return 0;
720}
721
7648fa99
JB
722static int i915_emon_status(struct seq_file *m, void *unused)
723{
724 struct drm_info_node *node = (struct drm_info_node *) m->private;
725 struct drm_device *dev = node->minor->dev;
726 drm_i915_private_t *dev_priv = dev->dev_private;
727 unsigned long temp, chipset, gfx;
de227ef0
CW
728 int ret;
729
730 ret = mutex_lock_interruptible(&dev->struct_mutex);
731 if (ret)
732 return ret;
7648fa99
JB
733
734 temp = i915_mch_val(dev_priv);
735 chipset = i915_chipset_val(dev_priv);
736 gfx = i915_gfx_val(dev_priv);
de227ef0 737 mutex_unlock(&dev->struct_mutex);
7648fa99
JB
738
739 seq_printf(m, "GMCH temp: %ld\n", temp);
740 seq_printf(m, "Chipset power: %ld\n", chipset);
741 seq_printf(m, "GFX power: %ld\n", gfx);
742 seq_printf(m, "Total power: %ld\n", chipset + gfx);
743
744 return 0;
745}
746
747static int i915_gfxec(struct seq_file *m, void *unused)
748{
749 struct drm_info_node *node = (struct drm_info_node *) m->private;
750 struct drm_device *dev = node->minor->dev;
751 drm_i915_private_t *dev_priv = dev->dev_private;
752
753 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
754
755 return 0;
756}
757
44834a67
CW
758static int i915_opregion(struct seq_file *m, void *unused)
759{
760 struct drm_info_node *node = (struct drm_info_node *) m->private;
761 struct drm_device *dev = node->minor->dev;
762 drm_i915_private_t *dev_priv = dev->dev_private;
763 struct intel_opregion *opregion = &dev_priv->opregion;
764 int ret;
765
766 ret = mutex_lock_interruptible(&dev->struct_mutex);
767 if (ret)
768 return ret;
769
770 if (opregion->header)
771 seq_write(m, opregion->header, OPREGION_SIZE);
772
773 mutex_unlock(&dev->struct_mutex);
774
775 return 0;
776}
777
f3cd474b
CW
778static int
779i915_wedged_open(struct inode *inode,
780 struct file *filp)
781{
782 filp->private_data = inode->i_private;
783 return 0;
784}
785
786static ssize_t
787i915_wedged_read(struct file *filp,
788 char __user *ubuf,
789 size_t max,
790 loff_t *ppos)
791{
792 struct drm_device *dev = filp->private_data;
793 drm_i915_private_t *dev_priv = dev->dev_private;
794 char buf[80];
795 int len;
796
797 len = snprintf(buf, sizeof (buf),
798 "wedged : %d\n",
799 atomic_read(&dev_priv->mm.wedged));
800
801 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
802}
803
804static ssize_t
805i915_wedged_write(struct file *filp,
806 const char __user *ubuf,
807 size_t cnt,
808 loff_t *ppos)
809{
810 struct drm_device *dev = filp->private_data;
811 drm_i915_private_t *dev_priv = dev->dev_private;
812 char buf[20];
813 int val = 1;
814
815 if (cnt > 0) {
816 if (cnt > sizeof (buf) - 1)
817 return -EINVAL;
818
819 if (copy_from_user(buf, ubuf, cnt))
820 return -EFAULT;
821 buf[cnt] = 0;
822
823 val = simple_strtoul(buf, NULL, 0);
824 }
825
826 DRM_INFO("Manually setting wedged to %d\n", val);
827
828 atomic_set(&dev_priv->mm.wedged, val);
829 if (val) {
830 DRM_WAKEUP(&dev_priv->irq_queue);
831 queue_work(dev_priv->wq, &dev_priv->error_work);
832 }
833
834 return cnt;
835}
836
837static const struct file_operations i915_wedged_fops = {
838 .owner = THIS_MODULE,
839 .open = i915_wedged_open,
840 .read = i915_wedged_read,
841 .write = i915_wedged_write,
842};
843
844/* As the drm_debugfs_init() routines are called before dev->dev_private is
845 * allocated we need to hook into the minor for release. */
846static int
847drm_add_fake_info_node(struct drm_minor *minor,
848 struct dentry *ent,
849 const void *key)
850{
851 struct drm_info_node *node;
852
853 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
854 if (node == NULL) {
855 debugfs_remove(ent);
856 return -ENOMEM;
857 }
858
859 node->minor = minor;
860 node->dent = ent;
861 node->info_ent = (void *) key;
862 list_add(&node->list, &minor->debugfs_nodes.list);
863
864 return 0;
865}
866
867static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
868{
869 struct drm_device *dev = minor->dev;
870 struct dentry *ent;
871
872 ent = debugfs_create_file("i915_wedged",
873 S_IRUGO | S_IWUSR,
874 root, dev,
875 &i915_wedged_fops);
876 if (IS_ERR(ent))
877 return PTR_ERR(ent);
878
879 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
880}
9e3a6d15 881
27c202ad 882static struct drm_info_list i915_debugfs_list[] = {
433e12f7
BG
883 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
884 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
885 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
4e5359cd 886 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2017263e
BG
887 {"i915_gem_request", i915_gem_request_info, 0},
888 {"i915_gem_seqno", i915_gem_seqno_info, 0},
a6172a80 889 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2017263e
BG
890 {"i915_gem_interrupt", i915_interrupt_info, 0},
891 {"i915_gem_hws", i915_hws_info, 0},
6911a9b8
BG
892 {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
893 {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
894 {"i915_batchbuffers", i915_batchbuffer_info, 0},
63eeaf38 895 {"i915_error_state", i915_error_state, 0},
f97108d1
JB
896 {"i915_rstdby_delays", i915_rstdby_delays, 0},
897 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
898 {"i915_delayfreq_table", i915_delayfreq_table, 0},
899 {"i915_inttoext_table", i915_inttoext_table, 0},
900 {"i915_drpc_info", i915_drpc_info, 0},
7648fa99
JB
901 {"i915_emon_status", i915_emon_status, 0},
902 {"i915_gfxec", i915_gfxec, 0},
b5e50c3f 903 {"i915_fbc_status", i915_fbc_status, 0},
4a9bef37 904 {"i915_sr_status", i915_sr_status, 0},
44834a67 905 {"i915_opregion", i915_opregion, 0},
2017263e 906};
27c202ad 907#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2017263e 908
27c202ad 909int i915_debugfs_init(struct drm_minor *minor)
2017263e 910{
f3cd474b
CW
911 int ret;
912
913 ret = i915_wedged_create(minor->debugfs_root, minor);
914 if (ret)
915 return ret;
916
27c202ad
BG
917 return drm_debugfs_create_files(i915_debugfs_list,
918 I915_DEBUGFS_ENTRIES,
2017263e
BG
919 minor->debugfs_root, minor);
920}
921
27c202ad 922void i915_debugfs_cleanup(struct drm_minor *minor)
2017263e 923{
27c202ad
BG
924 drm_debugfs_remove_files(i915_debugfs_list,
925 I915_DEBUGFS_ENTRIES, minor);
33db679b
KH
926 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
927 1, minor);
2017263e
BG
928}
929
930#endif /* CONFIG_DEBUG_FS */
This page took 0.15622 seconds and 5 git commands to generate.