drm/i915: properly guard ilk ips state
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
CommitLineData
2017263e
BG
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include <linux/seq_file.h>
f3cd474b 30#include <linux/debugfs.h>
5a0e3ad6 31#include <linux/slab.h>
2d1a8a48 32#include <linux/export.h>
2017263e
BG
33#include "drmP.h"
34#include "drm.h"
4e5359cd 35#include "intel_drv.h"
e5c65260 36#include "intel_ringbuffer.h"
2017263e
BG
37#include "i915_drm.h"
38#include "i915_drv.h"
39
40#define DRM_I915_RING_DEBUG 1
41
42
43#if defined(CONFIG_DEBUG_FS)
44
f13d3f73 45enum {
69dc4987 46 ACTIVE_LIST,
f13d3f73 47 INACTIVE_LIST,
d21d5975 48 PINNED_LIST,
f13d3f73 49};
2017263e 50
70d39fe4
CW
51static const char *yesno(int v)
52{
53 return v ? "yes" : "no";
54}
55
56static int i915_capabilities(struct seq_file *m, void *data)
57{
58 struct drm_info_node *node = (struct drm_info_node *) m->private;
59 struct drm_device *dev = node->minor->dev;
60 const struct intel_device_info *info = INTEL_INFO(dev);
61
62 seq_printf(m, "gen: %d\n", info->gen);
03d00ac5 63 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
c96ea64e
DV
64#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65#define DEV_INFO_SEP ;
66 DEV_INFO_FLAGS;
67#undef DEV_INFO_FLAG
68#undef DEV_INFO_SEP
70d39fe4
CW
69
70 return 0;
71}
2017263e 72
05394f39 73static const char *get_pin_flag(struct drm_i915_gem_object *obj)
a6172a80 74{
05394f39 75 if (obj->user_pin_count > 0)
a6172a80 76 return "P";
05394f39 77 else if (obj->pin_count > 0)
a6172a80
CW
78 return "p";
79 else
80 return " ";
81}
82
05394f39 83static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
a6172a80 84{
0206e353
AJ
85 switch (obj->tiling_mode) {
86 default:
87 case I915_TILING_NONE: return " ";
88 case I915_TILING_X: return "X";
89 case I915_TILING_Y: return "Y";
90 }
a6172a80
CW
91}
92
93dfb40c 93static const char *cache_level_str(int type)
08c18323
CW
94{
95 switch (type) {
93dfb40c
CW
96 case I915_CACHE_NONE: return " uncached";
97 case I915_CACHE_LLC: return " snooped (LLC)";
98 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
08c18323
CW
99 default: return "";
100 }
101}
102
37811fcc
CW
103static void
104describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
105{
0201f1ec 106 seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
37811fcc
CW
107 &obj->base,
108 get_pin_flag(obj),
109 get_tiling_flag(obj),
a05a5862 110 obj->base.size / 1024,
37811fcc
CW
111 obj->base.read_domains,
112 obj->base.write_domain,
0201f1ec
CW
113 obj->last_read_seqno,
114 obj->last_write_seqno,
caea7476 115 obj->last_fenced_seqno,
93dfb40c 116 cache_level_str(obj->cache_level),
37811fcc
CW
117 obj->dirty ? " dirty" : "",
118 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
119 if (obj->base.name)
120 seq_printf(m, " (name: %d)", obj->base.name);
121 if (obj->fence_reg != I915_FENCE_REG_NONE)
122 seq_printf(m, " (fence: %d)", obj->fence_reg);
123 if (obj->gtt_space != NULL)
a00b10c3
CW
124 seq_printf(m, " (gtt offset: %08x, size: %08x)",
125 obj->gtt_offset, (unsigned int)obj->gtt_space->size);
6299f992
CW
126 if (obj->pin_mappable || obj->fault_mappable) {
127 char s[3], *t = s;
128 if (obj->pin_mappable)
129 *t++ = 'p';
130 if (obj->fault_mappable)
131 *t++ = 'f';
132 *t = '\0';
133 seq_printf(m, " (%s mappable)", s);
134 }
69dc4987
CW
135 if (obj->ring != NULL)
136 seq_printf(m, " (%s)", obj->ring->name);
37811fcc
CW
137}
138
433e12f7 139static int i915_gem_object_list_info(struct seq_file *m, void *data)
2017263e
BG
140{
141 struct drm_info_node *node = (struct drm_info_node *) m->private;
433e12f7
BG
142 uintptr_t list = (uintptr_t) node->info_ent->data;
143 struct list_head *head;
2017263e
BG
144 struct drm_device *dev = node->minor->dev;
145 drm_i915_private_t *dev_priv = dev->dev_private;
05394f39 146 struct drm_i915_gem_object *obj;
8f2480fb
CW
147 size_t total_obj_size, total_gtt_size;
148 int count, ret;
de227ef0
CW
149
150 ret = mutex_lock_interruptible(&dev->struct_mutex);
151 if (ret)
152 return ret;
2017263e 153
433e12f7
BG
154 switch (list) {
155 case ACTIVE_LIST:
156 seq_printf(m, "Active:\n");
69dc4987 157 head = &dev_priv->mm.active_list;
433e12f7
BG
158 break;
159 case INACTIVE_LIST:
a17458fc 160 seq_printf(m, "Inactive:\n");
433e12f7
BG
161 head = &dev_priv->mm.inactive_list;
162 break;
433e12f7 163 default:
de227ef0
CW
164 mutex_unlock(&dev->struct_mutex);
165 return -EINVAL;
2017263e 166 }
2017263e 167
8f2480fb 168 total_obj_size = total_gtt_size = count = 0;
05394f39 169 list_for_each_entry(obj, head, mm_list) {
37811fcc 170 seq_printf(m, " ");
05394f39 171 describe_obj(m, obj);
f4ceda89 172 seq_printf(m, "\n");
05394f39
CW
173 total_obj_size += obj->base.size;
174 total_gtt_size += obj->gtt_space->size;
8f2480fb 175 count++;
2017263e 176 }
de227ef0 177 mutex_unlock(&dev->struct_mutex);
5e118f41 178
8f2480fb
CW
179 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
180 count, total_obj_size, total_gtt_size);
2017263e
BG
181 return 0;
182}
183
6299f992
CW
184#define count_objects(list, member) do { \
185 list_for_each_entry(obj, list, member) { \
186 size += obj->gtt_space->size; \
187 ++count; \
188 if (obj->map_and_fenceable) { \
189 mappable_size += obj->gtt_space->size; \
190 ++mappable_count; \
191 } \
192 } \
0206e353 193} while (0)
6299f992 194
73aa808f
CW
195static int i915_gem_object_info(struct seq_file *m, void* data)
196{
197 struct drm_info_node *node = (struct drm_info_node *) m->private;
198 struct drm_device *dev = node->minor->dev;
199 struct drm_i915_private *dev_priv = dev->dev_private;
6299f992
CW
200 u32 count, mappable_count;
201 size_t size, mappable_size;
202 struct drm_i915_gem_object *obj;
73aa808f
CW
203 int ret;
204
205 ret = mutex_lock_interruptible(&dev->struct_mutex);
206 if (ret)
207 return ret;
208
6299f992
CW
209 seq_printf(m, "%u objects, %zu bytes\n",
210 dev_priv->mm.object_count,
211 dev_priv->mm.object_memory);
212
213 size = count = mappable_size = mappable_count = 0;
214 count_objects(&dev_priv->mm.gtt_list, gtt_list);
215 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
216 count, mappable_count, size, mappable_size);
217
218 size = count = mappable_size = mappable_count = 0;
219 count_objects(&dev_priv->mm.active_list, mm_list);
6299f992
CW
220 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
221 count, mappable_count, size, mappable_size);
222
6299f992
CW
223 size = count = mappable_size = mappable_count = 0;
224 count_objects(&dev_priv->mm.inactive_list, mm_list);
225 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
226 count, mappable_count, size, mappable_size);
227
6299f992
CW
228 size = count = mappable_size = mappable_count = 0;
229 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
230 if (obj->fault_mappable) {
231 size += obj->gtt_space->size;
232 ++count;
233 }
234 if (obj->pin_mappable) {
235 mappable_size += obj->gtt_space->size;
236 ++mappable_count;
237 }
238 }
239 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
240 mappable_count, mappable_size);
241 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
242 count, size);
243
244 seq_printf(m, "%zu [%zu] gtt total\n",
245 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
73aa808f
CW
246
247 mutex_unlock(&dev->struct_mutex);
248
249 return 0;
250}
251
08c18323
CW
252static int i915_gem_gtt_info(struct seq_file *m, void* data)
253{
254 struct drm_info_node *node = (struct drm_info_node *) m->private;
255 struct drm_device *dev = node->minor->dev;
1b50247a 256 uintptr_t list = (uintptr_t) node->info_ent->data;
08c18323
CW
257 struct drm_i915_private *dev_priv = dev->dev_private;
258 struct drm_i915_gem_object *obj;
259 size_t total_obj_size, total_gtt_size;
260 int count, ret;
261
262 ret = mutex_lock_interruptible(&dev->struct_mutex);
263 if (ret)
264 return ret;
265
266 total_obj_size = total_gtt_size = count = 0;
267 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
1b50247a
CW
268 if (list == PINNED_LIST && obj->pin_count == 0)
269 continue;
270
08c18323
CW
271 seq_printf(m, " ");
272 describe_obj(m, obj);
273 seq_printf(m, "\n");
274 total_obj_size += obj->base.size;
275 total_gtt_size += obj->gtt_space->size;
276 count++;
277 }
278
279 mutex_unlock(&dev->struct_mutex);
280
281 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
282 count, total_obj_size, total_gtt_size);
283
284 return 0;
285}
286
4e5359cd
SF
287static int i915_gem_pageflip_info(struct seq_file *m, void *data)
288{
289 struct drm_info_node *node = (struct drm_info_node *) m->private;
290 struct drm_device *dev = node->minor->dev;
291 unsigned long flags;
292 struct intel_crtc *crtc;
293
294 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
9db4a9c7
JB
295 const char pipe = pipe_name(crtc->pipe);
296 const char plane = plane_name(crtc->plane);
4e5359cd
SF
297 struct intel_unpin_work *work;
298
299 spin_lock_irqsave(&dev->event_lock, flags);
300 work = crtc->unpin_work;
301 if (work == NULL) {
9db4a9c7 302 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
4e5359cd
SF
303 pipe, plane);
304 } else {
305 if (!work->pending) {
9db4a9c7 306 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
4e5359cd
SF
307 pipe, plane);
308 } else {
9db4a9c7 309 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
4e5359cd
SF
310 pipe, plane);
311 }
312 if (work->enable_stall_check)
313 seq_printf(m, "Stall check enabled, ");
314 else
315 seq_printf(m, "Stall check waiting for page flip ioctl, ");
316 seq_printf(m, "%d prepares\n", work->pending);
317
318 if (work->old_fb_obj) {
05394f39
CW
319 struct drm_i915_gem_object *obj = work->old_fb_obj;
320 if (obj)
321 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
4e5359cd
SF
322 }
323 if (work->pending_flip_obj) {
05394f39
CW
324 struct drm_i915_gem_object *obj = work->pending_flip_obj;
325 if (obj)
326 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
4e5359cd
SF
327 }
328 }
329 spin_unlock_irqrestore(&dev->event_lock, flags);
330 }
331
332 return 0;
333}
334
2017263e
BG
335static int i915_gem_request_info(struct seq_file *m, void *data)
336{
337 struct drm_info_node *node = (struct drm_info_node *) m->private;
338 struct drm_device *dev = node->minor->dev;
339 drm_i915_private_t *dev_priv = dev->dev_private;
340 struct drm_i915_gem_request *gem_request;
c2c347a9 341 int ret, count;
de227ef0
CW
342
343 ret = mutex_lock_interruptible(&dev->struct_mutex);
344 if (ret)
345 return ret;
2017263e 346
c2c347a9 347 count = 0;
1ec14ad3 348 if (!list_empty(&dev_priv->ring[RCS].request_list)) {
c2c347a9
CW
349 seq_printf(m, "Render requests:\n");
350 list_for_each_entry(gem_request,
1ec14ad3 351 &dev_priv->ring[RCS].request_list,
c2c347a9
CW
352 list) {
353 seq_printf(m, " %d @ %d\n",
354 gem_request->seqno,
355 (int) (jiffies - gem_request->emitted_jiffies));
356 }
357 count++;
358 }
1ec14ad3 359 if (!list_empty(&dev_priv->ring[VCS].request_list)) {
c2c347a9
CW
360 seq_printf(m, "BSD requests:\n");
361 list_for_each_entry(gem_request,
1ec14ad3 362 &dev_priv->ring[VCS].request_list,
c2c347a9
CW
363 list) {
364 seq_printf(m, " %d @ %d\n",
365 gem_request->seqno,
366 (int) (jiffies - gem_request->emitted_jiffies));
367 }
368 count++;
369 }
1ec14ad3 370 if (!list_empty(&dev_priv->ring[BCS].request_list)) {
c2c347a9
CW
371 seq_printf(m, "BLT requests:\n");
372 list_for_each_entry(gem_request,
1ec14ad3 373 &dev_priv->ring[BCS].request_list,
c2c347a9
CW
374 list) {
375 seq_printf(m, " %d @ %d\n",
376 gem_request->seqno,
377 (int) (jiffies - gem_request->emitted_jiffies));
378 }
379 count++;
2017263e 380 }
de227ef0
CW
381 mutex_unlock(&dev->struct_mutex);
382
c2c347a9
CW
383 if (count == 0)
384 seq_printf(m, "No requests\n");
385
2017263e
BG
386 return 0;
387}
388
b2223497
CW
389static void i915_ring_seqno_info(struct seq_file *m,
390 struct intel_ring_buffer *ring)
391{
392 if (ring->get_seqno) {
393 seq_printf(m, "Current sequence (%s): %d\n",
394 ring->name, ring->get_seqno(ring));
b2223497
CW
395 }
396}
397
2017263e
BG
398static int i915_gem_seqno_info(struct seq_file *m, void *data)
399{
400 struct drm_info_node *node = (struct drm_info_node *) m->private;
401 struct drm_device *dev = node->minor->dev;
402 drm_i915_private_t *dev_priv = dev->dev_private;
1ec14ad3 403 int ret, i;
de227ef0
CW
404
405 ret = mutex_lock_interruptible(&dev->struct_mutex);
406 if (ret)
407 return ret;
2017263e 408
1ec14ad3
CW
409 for (i = 0; i < I915_NUM_RINGS; i++)
410 i915_ring_seqno_info(m, &dev_priv->ring[i]);
de227ef0
CW
411
412 mutex_unlock(&dev->struct_mutex);
413
2017263e
BG
414 return 0;
415}
416
417
418static int i915_interrupt_info(struct seq_file *m, void *data)
419{
420 struct drm_info_node *node = (struct drm_info_node *) m->private;
421 struct drm_device *dev = node->minor->dev;
422 drm_i915_private_t *dev_priv = dev->dev_private;
9db4a9c7 423 int ret, i, pipe;
de227ef0
CW
424
425 ret = mutex_lock_interruptible(&dev->struct_mutex);
426 if (ret)
427 return ret;
2017263e 428
7e231dbe
JB
429 if (IS_VALLEYVIEW(dev)) {
430 seq_printf(m, "Display IER:\t%08x\n",
431 I915_READ(VLV_IER));
432 seq_printf(m, "Display IIR:\t%08x\n",
433 I915_READ(VLV_IIR));
434 seq_printf(m, "Display IIR_RW:\t%08x\n",
435 I915_READ(VLV_IIR_RW));
436 seq_printf(m, "Display IMR:\t%08x\n",
437 I915_READ(VLV_IMR));
438 for_each_pipe(pipe)
439 seq_printf(m, "Pipe %c stat:\t%08x\n",
440 pipe_name(pipe),
441 I915_READ(PIPESTAT(pipe)));
442
443 seq_printf(m, "Master IER:\t%08x\n",
444 I915_READ(VLV_MASTER_IER));
445
446 seq_printf(m, "Render IER:\t%08x\n",
447 I915_READ(GTIER));
448 seq_printf(m, "Render IIR:\t%08x\n",
449 I915_READ(GTIIR));
450 seq_printf(m, "Render IMR:\t%08x\n",
451 I915_READ(GTIMR));
452
453 seq_printf(m, "PM IER:\t\t%08x\n",
454 I915_READ(GEN6_PMIER));
455 seq_printf(m, "PM IIR:\t\t%08x\n",
456 I915_READ(GEN6_PMIIR));
457 seq_printf(m, "PM IMR:\t\t%08x\n",
458 I915_READ(GEN6_PMIMR));
459
460 seq_printf(m, "Port hotplug:\t%08x\n",
461 I915_READ(PORT_HOTPLUG_EN));
462 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
463 I915_READ(VLV_DPFLIPSTAT));
464 seq_printf(m, "DPINVGTT:\t%08x\n",
465 I915_READ(DPINVGTT));
466
467 } else if (!HAS_PCH_SPLIT(dev)) {
5f6a1695
ZW
468 seq_printf(m, "Interrupt enable: %08x\n",
469 I915_READ(IER));
470 seq_printf(m, "Interrupt identity: %08x\n",
471 I915_READ(IIR));
472 seq_printf(m, "Interrupt mask: %08x\n",
473 I915_READ(IMR));
9db4a9c7
JB
474 for_each_pipe(pipe)
475 seq_printf(m, "Pipe %c stat: %08x\n",
476 pipe_name(pipe),
477 I915_READ(PIPESTAT(pipe)));
5f6a1695
ZW
478 } else {
479 seq_printf(m, "North Display Interrupt enable: %08x\n",
480 I915_READ(DEIER));
481 seq_printf(m, "North Display Interrupt identity: %08x\n",
482 I915_READ(DEIIR));
483 seq_printf(m, "North Display Interrupt mask: %08x\n",
484 I915_READ(DEIMR));
485 seq_printf(m, "South Display Interrupt enable: %08x\n",
486 I915_READ(SDEIER));
487 seq_printf(m, "South Display Interrupt identity: %08x\n",
488 I915_READ(SDEIIR));
489 seq_printf(m, "South Display Interrupt mask: %08x\n",
490 I915_READ(SDEIMR));
491 seq_printf(m, "Graphics Interrupt enable: %08x\n",
492 I915_READ(GTIER));
493 seq_printf(m, "Graphics Interrupt identity: %08x\n",
494 I915_READ(GTIIR));
495 seq_printf(m, "Graphics Interrupt mask: %08x\n",
496 I915_READ(GTIMR));
497 }
2017263e
BG
498 seq_printf(m, "Interrupts received: %d\n",
499 atomic_read(&dev_priv->irq_received));
9862e600 500 for (i = 0; i < I915_NUM_RINGS; i++) {
da64c6fc 501 if (IS_GEN6(dev) || IS_GEN7(dev)) {
9862e600
CW
502 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
503 dev_priv->ring[i].name,
504 I915_READ_IMR(&dev_priv->ring[i]));
505 }
1ec14ad3 506 i915_ring_seqno_info(m, &dev_priv->ring[i]);
9862e600 507 }
de227ef0
CW
508 mutex_unlock(&dev->struct_mutex);
509
2017263e
BG
510 return 0;
511}
512
a6172a80
CW
513static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
514{
515 struct drm_info_node *node = (struct drm_info_node *) m->private;
516 struct drm_device *dev = node->minor->dev;
517 drm_i915_private_t *dev_priv = dev->dev_private;
de227ef0
CW
518 int i, ret;
519
520 ret = mutex_lock_interruptible(&dev->struct_mutex);
521 if (ret)
522 return ret;
a6172a80
CW
523
524 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
525 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
526 for (i = 0; i < dev_priv->num_fence_regs; i++) {
05394f39 527 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
a6172a80 528
c2c347a9
CW
529 seq_printf(m, "Fenced object[%2d] = ", i);
530 if (obj == NULL)
531 seq_printf(m, "unused");
532 else
05394f39 533 describe_obj(m, obj);
c2c347a9 534 seq_printf(m, "\n");
a6172a80
CW
535 }
536
05394f39 537 mutex_unlock(&dev->struct_mutex);
a6172a80
CW
538 return 0;
539}
540
2017263e
BG
541static int i915_hws_info(struct seq_file *m, void *data)
542{
543 struct drm_info_node *node = (struct drm_info_node *) m->private;
544 struct drm_device *dev = node->minor->dev;
545 drm_i915_private_t *dev_priv = dev->dev_private;
4066c0ae 546 struct intel_ring_buffer *ring;
311bd68e 547 const volatile u32 __iomem *hws;
4066c0ae
CW
548 int i;
549
1ec14ad3 550 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
311bd68e 551 hws = (volatile u32 __iomem *)ring->status_page.page_addr;
2017263e
BG
552 if (hws == NULL)
553 return 0;
554
555 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
556 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
557 i * 4,
558 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
559 }
560 return 0;
561}
562
e5c65260
CW
563static const char *ring_str(int ring)
564{
565 switch (ring) {
96154f2f
DV
566 case RCS: return "render";
567 case VCS: return "bsd";
568 case BCS: return "blt";
e5c65260
CW
569 default: return "";
570 }
571}
572
9df30794
CW
573static const char *pin_flag(int pinned)
574{
575 if (pinned > 0)
576 return " P";
577 else if (pinned < 0)
578 return " p";
579 else
580 return "";
581}
582
583static const char *tiling_flag(int tiling)
584{
585 switch (tiling) {
586 default:
587 case I915_TILING_NONE: return "";
588 case I915_TILING_X: return " X";
589 case I915_TILING_Y: return " Y";
590 }
591}
592
593static const char *dirty_flag(int dirty)
594{
595 return dirty ? " dirty" : "";
596}
597
598static const char *purgeable_flag(int purgeable)
599{
600 return purgeable ? " purgeable" : "";
601}
602
c724e8a9
CW
603static void print_error_buffers(struct seq_file *m,
604 const char *name,
605 struct drm_i915_error_buffer *err,
606 int count)
607{
608 seq_printf(m, "%s [%d]:\n", name, count);
609
610 while (count--) {
0201f1ec 611 seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
c724e8a9
CW
612 err->gtt_offset,
613 err->size,
614 err->read_domains,
615 err->write_domain,
0201f1ec 616 err->rseqno, err->wseqno,
c724e8a9
CW
617 pin_flag(err->pinned),
618 tiling_flag(err->tiling),
619 dirty_flag(err->dirty),
620 purgeable_flag(err->purgeable),
96154f2f 621 err->ring != -1 ? " " : "",
a779e5ab 622 ring_str(err->ring),
93dfb40c 623 cache_level_str(err->cache_level));
c724e8a9
CW
624
625 if (err->name)
626 seq_printf(m, " (name: %d)", err->name);
627 if (err->fence_reg != I915_FENCE_REG_NONE)
628 seq_printf(m, " (fence: %d)", err->fence_reg);
629
630 seq_printf(m, "\n");
631 err++;
632 }
633}
634
d27b1e0e
DV
635static void i915_ring_error_state(struct seq_file *m,
636 struct drm_device *dev,
637 struct drm_i915_error_state *error,
638 unsigned ring)
639{
ec34a01d 640 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
d27b1e0e 641 seq_printf(m, "%s command stream:\n", ring_str(ring));
c1cd90ed
DV
642 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
643 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
d27b1e0e
DV
644 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
645 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
646 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
647 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
c1cd90ed
DV
648 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
649 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
650 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
d27b1e0e 651 }
c1cd90ed
DV
652 if (INTEL_INFO(dev)->gen >= 4)
653 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
654 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
9d2f41fa 655 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
33f3f518 656 if (INTEL_INFO(dev)->gen >= 6) {
12f55818 657 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
33f3f518 658 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
7e3b8737
DV
659 seq_printf(m, " SYNC_0: 0x%08x\n",
660 error->semaphore_mboxes[ring][0]);
661 seq_printf(m, " SYNC_1: 0x%08x\n",
662 error->semaphore_mboxes[ring][1]);
33f3f518 663 }
d27b1e0e 664 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
9574b3fe 665 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
7e3b8737
DV
666 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
667 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
d27b1e0e
DV
668}
669
d5442303
DV
670struct i915_error_state_file_priv {
671 struct drm_device *dev;
672 struct drm_i915_error_state *error;
673};
674
63eeaf38
JB
675static int i915_error_state(struct seq_file *m, void *unused)
676{
d5442303
DV
677 struct i915_error_state_file_priv *error_priv = m->private;
678 struct drm_device *dev = error_priv->dev;
63eeaf38 679 drm_i915_private_t *dev_priv = dev->dev_private;
d5442303 680 struct drm_i915_error_state *error = error_priv->error;
b4519513 681 struct intel_ring_buffer *ring;
52d39a21 682 int i, j, page, offset, elt;
63eeaf38 683
742cbee8 684 if (!error) {
63eeaf38 685 seq_printf(m, "no error state collected\n");
742cbee8 686 return 0;
63eeaf38
JB
687 }
688
8a905236
JB
689 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
690 error->time.tv_usec);
9df30794 691 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
1d8f38f4 692 seq_printf(m, "EIR: 0x%08x\n", error->eir);
be998e2e 693 seq_printf(m, "IER: 0x%08x\n", error->ier);
1d8f38f4 694 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
b9a3906b 695 seq_printf(m, "CCID: 0x%08x\n", error->ccid);
9df30794 696
bf3301ab 697 for (i = 0; i < dev_priv->num_fence_regs; i++)
748ebc60
CW
698 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
699
33f3f518 700 if (INTEL_INFO(dev)->gen >= 6) {
d27b1e0e 701 seq_printf(m, "ERROR: 0x%08x\n", error->error);
33f3f518
DV
702 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
703 }
d27b1e0e 704
b4519513
CW
705 for_each_ring(ring, dev_priv, i)
706 i915_ring_error_state(m, dev, error, i);
d27b1e0e 707
c724e8a9
CW
708 if (error->active_bo)
709 print_error_buffers(m, "Active",
710 error->active_bo,
711 error->active_bo_count);
712
713 if (error->pinned_bo)
714 print_error_buffers(m, "Pinned",
715 error->pinned_bo,
716 error->pinned_bo_count);
9df30794 717
52d39a21
CW
718 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
719 struct drm_i915_error_object *obj;
9df30794 720
52d39a21 721 if ((obj = error->ring[i].batchbuffer)) {
bcfb2e28
CW
722 seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
723 dev_priv->ring[i].name,
724 obj->gtt_offset);
9df30794
CW
725 offset = 0;
726 for (page = 0; page < obj->page_count; page++) {
727 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
728 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
729 offset += 4;
730 }
731 }
732 }
9df30794 733
52d39a21
CW
734 if (error->ring[i].num_requests) {
735 seq_printf(m, "%s --- %d requests\n",
736 dev_priv->ring[i].name,
737 error->ring[i].num_requests);
738 for (j = 0; j < error->ring[i].num_requests; j++) {
ee4f42b1 739 seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
52d39a21 740 error->ring[i].requests[j].seqno,
ee4f42b1
CW
741 error->ring[i].requests[j].jiffies,
742 error->ring[i].requests[j].tail);
52d39a21
CW
743 }
744 }
745
746 if ((obj = error->ring[i].ringbuffer)) {
e2f973d5
CW
747 seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
748 dev_priv->ring[i].name,
749 obj->gtt_offset);
750 offset = 0;
751 for (page = 0; page < obj->page_count; page++) {
752 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
753 seq_printf(m, "%08x : %08x\n",
754 offset,
755 obj->pages[page][elt]);
756 offset += 4;
757 }
9df30794
CW
758 }
759 }
760 }
63eeaf38 761
6ef3d427
CW
762 if (error->overlay)
763 intel_overlay_print_error_state(m, error->overlay);
764
c4a1d9e4
CW
765 if (error->display)
766 intel_display_print_error_state(m, dev, error->display);
767
63eeaf38
JB
768 return 0;
769}
6911a9b8 770
d5442303
DV
771static ssize_t
772i915_error_state_write(struct file *filp,
773 const char __user *ubuf,
774 size_t cnt,
775 loff_t *ppos)
776{
777 struct seq_file *m = filp->private_data;
778 struct i915_error_state_file_priv *error_priv = m->private;
779 struct drm_device *dev = error_priv->dev;
780
781 DRM_DEBUG_DRIVER("Resetting error state\n");
782
783 mutex_lock(&dev->struct_mutex);
784 i915_destroy_error_state(dev);
785 mutex_unlock(&dev->struct_mutex);
786
787 return cnt;
788}
789
790static int i915_error_state_open(struct inode *inode, struct file *file)
791{
792 struct drm_device *dev = inode->i_private;
793 drm_i915_private_t *dev_priv = dev->dev_private;
794 struct i915_error_state_file_priv *error_priv;
795 unsigned long flags;
796
797 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
798 if (!error_priv)
799 return -ENOMEM;
800
801 error_priv->dev = dev;
802
803 spin_lock_irqsave(&dev_priv->error_lock, flags);
804 error_priv->error = dev_priv->first_error;
805 if (error_priv->error)
806 kref_get(&error_priv->error->ref);
807 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
808
809 return single_open(file, i915_error_state, error_priv);
810}
811
812static int i915_error_state_release(struct inode *inode, struct file *file)
813{
814 struct seq_file *m = file->private_data;
815 struct i915_error_state_file_priv *error_priv = m->private;
816
817 if (error_priv->error)
818 kref_put(&error_priv->error->ref, i915_error_state_free);
819 kfree(error_priv);
820
821 return single_release(inode, file);
822}
823
824static const struct file_operations i915_error_state_fops = {
825 .owner = THIS_MODULE,
826 .open = i915_error_state_open,
827 .read = seq_read,
828 .write = i915_error_state_write,
829 .llseek = default_llseek,
830 .release = i915_error_state_release,
831};
832
f97108d1
JB
833static int i915_rstdby_delays(struct seq_file *m, void *unused)
834{
835 struct drm_info_node *node = (struct drm_info_node *) m->private;
836 struct drm_device *dev = node->minor->dev;
837 drm_i915_private_t *dev_priv = dev->dev_private;
616fdb5a
BW
838 u16 crstanddelay;
839 int ret;
840
841 ret = mutex_lock_interruptible(&dev->struct_mutex);
842 if (ret)
843 return ret;
844
845 crstanddelay = I915_READ16(CRSTANDVID);
846
847 mutex_unlock(&dev->struct_mutex);
f97108d1
JB
848
849 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
850
851 return 0;
852}
853
854static int i915_cur_delayinfo(struct seq_file *m, void *unused)
855{
856 struct drm_info_node *node = (struct drm_info_node *) m->private;
857 struct drm_device *dev = node->minor->dev;
858 drm_i915_private_t *dev_priv = dev->dev_private;
d1ebd816 859 int ret;
3b8d8d91
JB
860
861 if (IS_GEN5(dev)) {
862 u16 rgvswctl = I915_READ16(MEMSWCTL);
863 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
864
865 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
866 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
867 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
868 MEMSTAT_VID_SHIFT);
869 seq_printf(m, "Current P-state: %d\n",
870 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1c70c0ce 871 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
3b8d8d91
JB
872 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
873 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
874 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
ccab5c82
JB
875 u32 rpstat;
876 u32 rpupei, rpcurup, rpprevup;
877 u32 rpdownei, rpcurdown, rpprevdown;
3b8d8d91
JB
878 int max_freq;
879
880 /* RPSTAT1 is in the GT power well */
d1ebd816
BW
881 ret = mutex_lock_interruptible(&dev->struct_mutex);
882 if (ret)
883 return ret;
884
fcca7926 885 gen6_gt_force_wake_get(dev_priv);
3b8d8d91 886
ccab5c82
JB
887 rpstat = I915_READ(GEN6_RPSTAT1);
888 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
889 rpcurup = I915_READ(GEN6_RP_CUR_UP);
890 rpprevup = I915_READ(GEN6_RP_PREV_UP);
891 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
892 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
893 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
894
d1ebd816
BW
895 gen6_gt_force_wake_put(dev_priv);
896 mutex_unlock(&dev->struct_mutex);
897
3b8d8d91 898 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
ccab5c82 899 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
3b8d8d91
JB
900 seq_printf(m, "Render p-state ratio: %d\n",
901 (gt_perf_status & 0xff00) >> 8);
902 seq_printf(m, "Render p-state VID: %d\n",
903 gt_perf_status & 0xff);
904 seq_printf(m, "Render p-state limit: %d\n",
905 rp_state_limits & 0xff);
ccab5c82 906 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
e281fcaa 907 GEN6_CAGF_SHIFT) * 50);
ccab5c82
JB
908 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
909 GEN6_CURICONT_MASK);
910 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
911 GEN6_CURBSYTAVG_MASK);
912 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
913 GEN6_CURBSYTAVG_MASK);
914 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
915 GEN6_CURIAVG_MASK);
916 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
917 GEN6_CURBSYTAVG_MASK);
918 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
919 GEN6_CURBSYTAVG_MASK);
3b8d8d91
JB
920
921 max_freq = (rp_state_cap & 0xff0000) >> 16;
922 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
e281fcaa 923 max_freq * 50);
3b8d8d91
JB
924
925 max_freq = (rp_state_cap & 0xff00) >> 8;
926 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
e281fcaa 927 max_freq * 50);
3b8d8d91
JB
928
929 max_freq = rp_state_cap & 0xff;
930 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
e281fcaa 931 max_freq * 50);
3b8d8d91
JB
932 } else {
933 seq_printf(m, "no P-state info available\n");
934 }
f97108d1
JB
935
936 return 0;
937}
938
939static int i915_delayfreq_table(struct seq_file *m, void *unused)
940{
941 struct drm_info_node *node = (struct drm_info_node *) m->private;
942 struct drm_device *dev = node->minor->dev;
943 drm_i915_private_t *dev_priv = dev->dev_private;
944 u32 delayfreq;
616fdb5a
BW
945 int ret, i;
946
947 ret = mutex_lock_interruptible(&dev->struct_mutex);
948 if (ret)
949 return ret;
f97108d1
JB
950
951 for (i = 0; i < 16; i++) {
952 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
7648fa99
JB
953 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
954 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
f97108d1
JB
955 }
956
616fdb5a
BW
957 mutex_unlock(&dev->struct_mutex);
958
f97108d1
JB
959 return 0;
960}
961
962static inline int MAP_TO_MV(int map)
963{
964 return 1250 - (map * 25);
965}
966
967static int i915_inttoext_table(struct seq_file *m, void *unused)
968{
969 struct drm_info_node *node = (struct drm_info_node *) m->private;
970 struct drm_device *dev = node->minor->dev;
971 drm_i915_private_t *dev_priv = dev->dev_private;
972 u32 inttoext;
616fdb5a
BW
973 int ret, i;
974
975 ret = mutex_lock_interruptible(&dev->struct_mutex);
976 if (ret)
977 return ret;
f97108d1
JB
978
979 for (i = 1; i <= 32; i++) {
980 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
981 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
982 }
983
616fdb5a
BW
984 mutex_unlock(&dev->struct_mutex);
985
f97108d1
JB
986 return 0;
987}
988
4d85529d 989static int ironlake_drpc_info(struct seq_file *m)
f97108d1
JB
990{
991 struct drm_info_node *node = (struct drm_info_node *) m->private;
992 struct drm_device *dev = node->minor->dev;
993 drm_i915_private_t *dev_priv = dev->dev_private;
616fdb5a
BW
994 u32 rgvmodectl, rstdbyctl;
995 u16 crstandvid;
996 int ret;
997
998 ret = mutex_lock_interruptible(&dev->struct_mutex);
999 if (ret)
1000 return ret;
1001
1002 rgvmodectl = I915_READ(MEMMODECTL);
1003 rstdbyctl = I915_READ(RSTDBYCTL);
1004 crstandvid = I915_READ16(CRSTANDVID);
1005
1006 mutex_unlock(&dev->struct_mutex);
f97108d1
JB
1007
1008 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1009 "yes" : "no");
1010 seq_printf(m, "Boost freq: %d\n",
1011 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1012 MEMMODE_BOOST_FREQ_SHIFT);
1013 seq_printf(m, "HW control enabled: %s\n",
1014 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1015 seq_printf(m, "SW control enabled: %s\n",
1016 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1017 seq_printf(m, "Gated voltage change: %s\n",
1018 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1019 seq_printf(m, "Starting frequency: P%d\n",
1020 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
7648fa99 1021 seq_printf(m, "Max P-state: P%d\n",
f97108d1 1022 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
7648fa99
JB
1023 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1024 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1025 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1026 seq_printf(m, "Render standby enabled: %s\n",
1027 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
88271da3
JB
1028 seq_printf(m, "Current RS state: ");
1029 switch (rstdbyctl & RSX_STATUS_MASK) {
1030 case RSX_STATUS_ON:
1031 seq_printf(m, "on\n");
1032 break;
1033 case RSX_STATUS_RC1:
1034 seq_printf(m, "RC1\n");
1035 break;
1036 case RSX_STATUS_RC1E:
1037 seq_printf(m, "RC1E\n");
1038 break;
1039 case RSX_STATUS_RS1:
1040 seq_printf(m, "RS1\n");
1041 break;
1042 case RSX_STATUS_RS2:
1043 seq_printf(m, "RS2 (RC6)\n");
1044 break;
1045 case RSX_STATUS_RS3:
1046 seq_printf(m, "RC3 (RC6+)\n");
1047 break;
1048 default:
1049 seq_printf(m, "unknown\n");
1050 break;
1051 }
f97108d1
JB
1052
1053 return 0;
1054}
1055
4d85529d
BW
1056static int gen6_drpc_info(struct seq_file *m)
1057{
1058
1059 struct drm_info_node *node = (struct drm_info_node *) m->private;
1060 struct drm_device *dev = node->minor->dev;
1061 struct drm_i915_private *dev_priv = dev->dev_private;
1062 u32 rpmodectl1, gt_core_status, rcctl1;
93b525dc 1063 unsigned forcewake_count;
4d85529d
BW
1064 int count=0, ret;
1065
1066
1067 ret = mutex_lock_interruptible(&dev->struct_mutex);
1068 if (ret)
1069 return ret;
1070
93b525dc
DV
1071 spin_lock_irq(&dev_priv->gt_lock);
1072 forcewake_count = dev_priv->forcewake_count;
1073 spin_unlock_irq(&dev_priv->gt_lock);
1074
1075 if (forcewake_count) {
1076 seq_printf(m, "RC information inaccurate because somebody "
1077 "holds a forcewake reference \n");
4d85529d
BW
1078 } else {
1079 /* NB: we cannot use forcewake, else we read the wrong values */
1080 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1081 udelay(10);
1082 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1083 }
1084
1085 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1086 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
1087
1088 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1089 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1090 mutex_unlock(&dev->struct_mutex);
1091
1092 seq_printf(m, "Video Turbo Mode: %s\n",
1093 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1094 seq_printf(m, "HW control enabled: %s\n",
1095 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1096 seq_printf(m, "SW control enabled: %s\n",
1097 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1098 GEN6_RP_MEDIA_SW_MODE));
fff24e21 1099 seq_printf(m, "RC1e Enabled: %s\n",
4d85529d
BW
1100 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1101 seq_printf(m, "RC6 Enabled: %s\n",
1102 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1103 seq_printf(m, "Deep RC6 Enabled: %s\n",
1104 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1105 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1106 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1107 seq_printf(m, "Current RC state: ");
1108 switch (gt_core_status & GEN6_RCn_MASK) {
1109 case GEN6_RC0:
1110 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1111 seq_printf(m, "Core Power Down\n");
1112 else
1113 seq_printf(m, "on\n");
1114 break;
1115 case GEN6_RC3:
1116 seq_printf(m, "RC3\n");
1117 break;
1118 case GEN6_RC6:
1119 seq_printf(m, "RC6\n");
1120 break;
1121 case GEN6_RC7:
1122 seq_printf(m, "RC7\n");
1123 break;
1124 default:
1125 seq_printf(m, "Unknown\n");
1126 break;
1127 }
1128
1129 seq_printf(m, "Core Power Down: %s\n",
1130 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
cce66a28
BW
1131
1132 /* Not exactly sure what this is */
1133 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1134 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1135 seq_printf(m, "RC6 residency since boot: %u\n",
1136 I915_READ(GEN6_GT_GFX_RC6));
1137 seq_printf(m, "RC6+ residency since boot: %u\n",
1138 I915_READ(GEN6_GT_GFX_RC6p));
1139 seq_printf(m, "RC6++ residency since boot: %u\n",
1140 I915_READ(GEN6_GT_GFX_RC6pp));
1141
4d85529d
BW
1142 return 0;
1143}
1144
1145static int i915_drpc_info(struct seq_file *m, void *unused)
1146{
1147 struct drm_info_node *node = (struct drm_info_node *) m->private;
1148 struct drm_device *dev = node->minor->dev;
1149
1150 if (IS_GEN6(dev) || IS_GEN7(dev))
1151 return gen6_drpc_info(m);
1152 else
1153 return ironlake_drpc_info(m);
1154}
1155
b5e50c3f
JB
1156static int i915_fbc_status(struct seq_file *m, void *unused)
1157{
1158 struct drm_info_node *node = (struct drm_info_node *) m->private;
1159 struct drm_device *dev = node->minor->dev;
b5e50c3f 1160 drm_i915_private_t *dev_priv = dev->dev_private;
b5e50c3f 1161
ee5382ae 1162 if (!I915_HAS_FBC(dev)) {
b5e50c3f
JB
1163 seq_printf(m, "FBC unsupported on this chipset\n");
1164 return 0;
1165 }
1166
ee5382ae 1167 if (intel_fbc_enabled(dev)) {
b5e50c3f
JB
1168 seq_printf(m, "FBC enabled\n");
1169 } else {
1170 seq_printf(m, "FBC disabled: ");
1171 switch (dev_priv->no_fbc_reason) {
bed4a673
CW
1172 case FBC_NO_OUTPUT:
1173 seq_printf(m, "no outputs");
1174 break;
b5e50c3f
JB
1175 case FBC_STOLEN_TOO_SMALL:
1176 seq_printf(m, "not enough stolen memory");
1177 break;
1178 case FBC_UNSUPPORTED_MODE:
1179 seq_printf(m, "mode not supported");
1180 break;
1181 case FBC_MODE_TOO_LARGE:
1182 seq_printf(m, "mode too large");
1183 break;
1184 case FBC_BAD_PLANE:
1185 seq_printf(m, "FBC unsupported on plane");
1186 break;
1187 case FBC_NOT_TILED:
1188 seq_printf(m, "scanout buffer not tiled");
1189 break;
9c928d16
JB
1190 case FBC_MULTIPLE_PIPES:
1191 seq_printf(m, "multiple pipes are enabled");
1192 break;
c1a9f047
JB
1193 case FBC_MODULE_PARAM:
1194 seq_printf(m, "disabled per module param (default off)");
1195 break;
b5e50c3f
JB
1196 default:
1197 seq_printf(m, "unknown reason");
1198 }
1199 seq_printf(m, "\n");
1200 }
1201 return 0;
1202}
1203
4a9bef37
JB
1204static int i915_sr_status(struct seq_file *m, void *unused)
1205{
1206 struct drm_info_node *node = (struct drm_info_node *) m->private;
1207 struct drm_device *dev = node->minor->dev;
1208 drm_i915_private_t *dev_priv = dev->dev_private;
1209 bool sr_enabled = false;
1210
1398261a 1211 if (HAS_PCH_SPLIT(dev))
5ba2aaaa 1212 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
a6c45cf0 1213 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
4a9bef37
JB
1214 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1215 else if (IS_I915GM(dev))
1216 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1217 else if (IS_PINEVIEW(dev))
1218 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1219
5ba2aaaa
CW
1220 seq_printf(m, "self-refresh: %s\n",
1221 sr_enabled ? "enabled" : "disabled");
4a9bef37
JB
1222
1223 return 0;
1224}
1225
7648fa99
JB
1226static int i915_emon_status(struct seq_file *m, void *unused)
1227{
1228 struct drm_info_node *node = (struct drm_info_node *) m->private;
1229 struct drm_device *dev = node->minor->dev;
1230 drm_i915_private_t *dev_priv = dev->dev_private;
1231 unsigned long temp, chipset, gfx;
de227ef0
CW
1232 int ret;
1233
582be6b4
CW
1234 if (!IS_GEN5(dev))
1235 return -ENODEV;
1236
de227ef0
CW
1237 ret = mutex_lock_interruptible(&dev->struct_mutex);
1238 if (ret)
1239 return ret;
7648fa99
JB
1240
1241 temp = i915_mch_val(dev_priv);
1242 chipset = i915_chipset_val(dev_priv);
1243 gfx = i915_gfx_val(dev_priv);
de227ef0 1244 mutex_unlock(&dev->struct_mutex);
7648fa99
JB
1245
1246 seq_printf(m, "GMCH temp: %ld\n", temp);
1247 seq_printf(m, "Chipset power: %ld\n", chipset);
1248 seq_printf(m, "GFX power: %ld\n", gfx);
1249 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1250
1251 return 0;
1252}
1253
23b2f8bb
JB
1254static int i915_ring_freq_table(struct seq_file *m, void *unused)
1255{
1256 struct drm_info_node *node = (struct drm_info_node *) m->private;
1257 struct drm_device *dev = node->minor->dev;
1258 drm_i915_private_t *dev_priv = dev->dev_private;
1259 int ret;
1260 int gpu_freq, ia_freq;
1261
1c70c0ce 1262 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
23b2f8bb
JB
1263 seq_printf(m, "unsupported on this chipset\n");
1264 return 0;
1265 }
1266
1267 ret = mutex_lock_interruptible(&dev->struct_mutex);
1268 if (ret)
1269 return ret;
1270
1271 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1272
1273 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
1274 gpu_freq++) {
1275 I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1276 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
1277 GEN6_PCODE_READ_MIN_FREQ_TABLE);
1278 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
1279 GEN6_PCODE_READY) == 0, 10)) {
1280 DRM_ERROR("pcode read of freq table timed out\n");
1281 continue;
1282 }
1283 ia_freq = I915_READ(GEN6_PCODE_DATA);
1284 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
1285 }
1286
1287 mutex_unlock(&dev->struct_mutex);
1288
1289 return 0;
1290}
1291
7648fa99
JB
1292static int i915_gfxec(struct seq_file *m, void *unused)
1293{
1294 struct drm_info_node *node = (struct drm_info_node *) m->private;
1295 struct drm_device *dev = node->minor->dev;
1296 drm_i915_private_t *dev_priv = dev->dev_private;
616fdb5a
BW
1297 int ret;
1298
1299 ret = mutex_lock_interruptible(&dev->struct_mutex);
1300 if (ret)
1301 return ret;
7648fa99
JB
1302
1303 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1304
616fdb5a
BW
1305 mutex_unlock(&dev->struct_mutex);
1306
7648fa99
JB
1307 return 0;
1308}
1309
44834a67
CW
1310static int i915_opregion(struct seq_file *m, void *unused)
1311{
1312 struct drm_info_node *node = (struct drm_info_node *) m->private;
1313 struct drm_device *dev = node->minor->dev;
1314 drm_i915_private_t *dev_priv = dev->dev_private;
1315 struct intel_opregion *opregion = &dev_priv->opregion;
0d38f009 1316 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
44834a67
CW
1317 int ret;
1318
0d38f009
DV
1319 if (data == NULL)
1320 return -ENOMEM;
1321
44834a67
CW
1322 ret = mutex_lock_interruptible(&dev->struct_mutex);
1323 if (ret)
0d38f009 1324 goto out;
44834a67 1325
0d38f009
DV
1326 if (opregion->header) {
1327 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1328 seq_write(m, data, OPREGION_SIZE);
1329 }
44834a67
CW
1330
1331 mutex_unlock(&dev->struct_mutex);
1332
0d38f009
DV
1333out:
1334 kfree(data);
44834a67
CW
1335 return 0;
1336}
1337
37811fcc
CW
1338static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1339{
1340 struct drm_info_node *node = (struct drm_info_node *) m->private;
1341 struct drm_device *dev = node->minor->dev;
1342 drm_i915_private_t *dev_priv = dev->dev_private;
1343 struct intel_fbdev *ifbdev;
1344 struct intel_framebuffer *fb;
1345 int ret;
1346
1347 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1348 if (ret)
1349 return ret;
1350
1351 ifbdev = dev_priv->fbdev;
1352 fb = to_intel_framebuffer(ifbdev->helper.fb);
1353
1354 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
1355 fb->base.width,
1356 fb->base.height,
1357 fb->base.depth,
1358 fb->base.bits_per_pixel);
05394f39 1359 describe_obj(m, fb->obj);
37811fcc
CW
1360 seq_printf(m, "\n");
1361
1362 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1363 if (&fb->base == ifbdev->helper.fb)
1364 continue;
1365
1366 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
1367 fb->base.width,
1368 fb->base.height,
1369 fb->base.depth,
1370 fb->base.bits_per_pixel);
05394f39 1371 describe_obj(m, fb->obj);
37811fcc
CW
1372 seq_printf(m, "\n");
1373 }
1374
1375 mutex_unlock(&dev->mode_config.mutex);
1376
1377 return 0;
1378}
1379
e76d3630
BW
1380static int i915_context_status(struct seq_file *m, void *unused)
1381{
1382 struct drm_info_node *node = (struct drm_info_node *) m->private;
1383 struct drm_device *dev = node->minor->dev;
1384 drm_i915_private_t *dev_priv = dev->dev_private;
1385 int ret;
1386
1387 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1388 if (ret)
1389 return ret;
1390
dc501fbc
BW
1391 if (dev_priv->pwrctx) {
1392 seq_printf(m, "power context ");
1393 describe_obj(m, dev_priv->pwrctx);
1394 seq_printf(m, "\n");
1395 }
e76d3630 1396
dc501fbc
BW
1397 if (dev_priv->renderctx) {
1398 seq_printf(m, "render context ");
1399 describe_obj(m, dev_priv->renderctx);
1400 seq_printf(m, "\n");
1401 }
e76d3630
BW
1402
1403 mutex_unlock(&dev->mode_config.mutex);
1404
1405 return 0;
1406}
1407
6d794d42
BW
1408static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1409{
1410 struct drm_info_node *node = (struct drm_info_node *) m->private;
1411 struct drm_device *dev = node->minor->dev;
1412 struct drm_i915_private *dev_priv = dev->dev_private;
9f1f46a4 1413 unsigned forcewake_count;
6d794d42 1414
9f1f46a4
DV
1415 spin_lock_irq(&dev_priv->gt_lock);
1416 forcewake_count = dev_priv->forcewake_count;
1417 spin_unlock_irq(&dev_priv->gt_lock);
6d794d42 1418
9f1f46a4 1419 seq_printf(m, "forcewake count = %u\n", forcewake_count);
6d794d42
BW
1420
1421 return 0;
1422}
1423
ea16a3cd
DV
1424static const char *swizzle_string(unsigned swizzle)
1425{
1426 switch(swizzle) {
1427 case I915_BIT_6_SWIZZLE_NONE:
1428 return "none";
1429 case I915_BIT_6_SWIZZLE_9:
1430 return "bit9";
1431 case I915_BIT_6_SWIZZLE_9_10:
1432 return "bit9/bit10";
1433 case I915_BIT_6_SWIZZLE_9_11:
1434 return "bit9/bit11";
1435 case I915_BIT_6_SWIZZLE_9_10_11:
1436 return "bit9/bit10/bit11";
1437 case I915_BIT_6_SWIZZLE_9_17:
1438 return "bit9/bit17";
1439 case I915_BIT_6_SWIZZLE_9_10_17:
1440 return "bit9/bit10/bit17";
1441 case I915_BIT_6_SWIZZLE_UNKNOWN:
1442 return "unkown";
1443 }
1444
1445 return "bug";
1446}
1447
1448static int i915_swizzle_info(struct seq_file *m, void *data)
1449{
1450 struct drm_info_node *node = (struct drm_info_node *) m->private;
1451 struct drm_device *dev = node->minor->dev;
1452 struct drm_i915_private *dev_priv = dev->dev_private;
1453
1454 mutex_lock(&dev->struct_mutex);
1455 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1456 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1457 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1458 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1459
1460 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1461 seq_printf(m, "DDC = 0x%08x\n",
1462 I915_READ(DCC));
1463 seq_printf(m, "C0DRB3 = 0x%04x\n",
1464 I915_READ16(C0DRB3));
1465 seq_printf(m, "C1DRB3 = 0x%04x\n",
1466 I915_READ16(C1DRB3));
3fa7d235
DV
1467 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
1468 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1469 I915_READ(MAD_DIMM_C0));
1470 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1471 I915_READ(MAD_DIMM_C1));
1472 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1473 I915_READ(MAD_DIMM_C2));
1474 seq_printf(m, "TILECTL = 0x%08x\n",
1475 I915_READ(TILECTL));
1476 seq_printf(m, "ARB_MODE = 0x%08x\n",
1477 I915_READ(ARB_MODE));
1478 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1479 I915_READ(DISP_ARB_CTL));
ea16a3cd
DV
1480 }
1481 mutex_unlock(&dev->struct_mutex);
1482
1483 return 0;
1484}
1485
3cf17fc5
DV
1486static int i915_ppgtt_info(struct seq_file *m, void *data)
1487{
1488 struct drm_info_node *node = (struct drm_info_node *) m->private;
1489 struct drm_device *dev = node->minor->dev;
1490 struct drm_i915_private *dev_priv = dev->dev_private;
1491 struct intel_ring_buffer *ring;
1492 int i, ret;
1493
1494
1495 ret = mutex_lock_interruptible(&dev->struct_mutex);
1496 if (ret)
1497 return ret;
1498 if (INTEL_INFO(dev)->gen == 6)
1499 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1500
1501 for (i = 0; i < I915_NUM_RINGS; i++) {
1502 ring = &dev_priv->ring[i];
1503
1504 seq_printf(m, "%s\n", ring->name);
1505 if (INTEL_INFO(dev)->gen == 7)
1506 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1507 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1508 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1509 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1510 }
1511 if (dev_priv->mm.aliasing_ppgtt) {
1512 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1513
1514 seq_printf(m, "aliasing PPGTT:\n");
1515 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1516 }
1517 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1518 mutex_unlock(&dev->struct_mutex);
1519
1520 return 0;
1521}
1522
57f350b6
JB
1523static int i915_dpio_info(struct seq_file *m, void *data)
1524{
1525 struct drm_info_node *node = (struct drm_info_node *) m->private;
1526 struct drm_device *dev = node->minor->dev;
1527 struct drm_i915_private *dev_priv = dev->dev_private;
1528 int ret;
1529
1530
1531 if (!IS_VALLEYVIEW(dev)) {
1532 seq_printf(m, "unsupported\n");
1533 return 0;
1534 }
1535
1536 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1537 if (ret)
1538 return ret;
1539
1540 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1541
1542 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1543 intel_dpio_read(dev_priv, _DPIO_DIV_A));
1544 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1545 intel_dpio_read(dev_priv, _DPIO_DIV_B));
1546
1547 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1548 intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
1549 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1550 intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
1551
1552 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1553 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
1554 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1555 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
1556
1557 seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
1558 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
1559 seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
1560 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
1561
1562 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1563 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
1564
1565 mutex_unlock(&dev->mode_config.mutex);
1566
1567 return 0;
1568}
1569
f3cd474b
CW
1570static ssize_t
1571i915_wedged_read(struct file *filp,
1572 char __user *ubuf,
1573 size_t max,
1574 loff_t *ppos)
1575{
1576 struct drm_device *dev = filp->private_data;
1577 drm_i915_private_t *dev_priv = dev->dev_private;
1578 char buf[80];
1579 int len;
1580
0206e353 1581 len = snprintf(buf, sizeof(buf),
f3cd474b
CW
1582 "wedged : %d\n",
1583 atomic_read(&dev_priv->mm.wedged));
1584
0206e353
AJ
1585 if (len > sizeof(buf))
1586 len = sizeof(buf);
f4433a8d 1587
f3cd474b
CW
1588 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1589}
1590
1591static ssize_t
1592i915_wedged_write(struct file *filp,
1593 const char __user *ubuf,
1594 size_t cnt,
1595 loff_t *ppos)
1596{
1597 struct drm_device *dev = filp->private_data;
f3cd474b
CW
1598 char buf[20];
1599 int val = 1;
1600
1601 if (cnt > 0) {
0206e353 1602 if (cnt > sizeof(buf) - 1)
f3cd474b
CW
1603 return -EINVAL;
1604
1605 if (copy_from_user(buf, ubuf, cnt))
1606 return -EFAULT;
1607 buf[cnt] = 0;
1608
1609 val = simple_strtoul(buf, NULL, 0);
1610 }
1611
1612 DRM_INFO("Manually setting wedged to %d\n", val);
527f9e90 1613 i915_handle_error(dev, val);
f3cd474b
CW
1614
1615 return cnt;
1616}
1617
1618static const struct file_operations i915_wedged_fops = {
1619 .owner = THIS_MODULE,
234e3405 1620 .open = simple_open,
f3cd474b
CW
1621 .read = i915_wedged_read,
1622 .write = i915_wedged_write,
6038f373 1623 .llseek = default_llseek,
f3cd474b
CW
1624};
1625
e5eb3d63
DV
1626static ssize_t
1627i915_ring_stop_read(struct file *filp,
1628 char __user *ubuf,
1629 size_t max,
1630 loff_t *ppos)
1631{
1632 struct drm_device *dev = filp->private_data;
1633 drm_i915_private_t *dev_priv = dev->dev_private;
1634 char buf[20];
1635 int len;
1636
1637 len = snprintf(buf, sizeof(buf),
1638 "0x%08x\n", dev_priv->stop_rings);
1639
1640 if (len > sizeof(buf))
1641 len = sizeof(buf);
1642
1643 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1644}
1645
1646static ssize_t
1647i915_ring_stop_write(struct file *filp,
1648 const char __user *ubuf,
1649 size_t cnt,
1650 loff_t *ppos)
1651{
1652 struct drm_device *dev = filp->private_data;
1653 struct drm_i915_private *dev_priv = dev->dev_private;
1654 char buf[20];
1655 int val = 0;
1656
1657 if (cnt > 0) {
1658 if (cnt > sizeof(buf) - 1)
1659 return -EINVAL;
1660
1661 if (copy_from_user(buf, ubuf, cnt))
1662 return -EFAULT;
1663 buf[cnt] = 0;
1664
1665 val = simple_strtoul(buf, NULL, 0);
1666 }
1667
1668 DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
1669
1670 mutex_lock(&dev->struct_mutex);
1671 dev_priv->stop_rings = val;
1672 mutex_unlock(&dev->struct_mutex);
1673
1674 return cnt;
1675}
1676
1677static const struct file_operations i915_ring_stop_fops = {
1678 .owner = THIS_MODULE,
1679 .open = simple_open,
1680 .read = i915_ring_stop_read,
1681 .write = i915_ring_stop_write,
1682 .llseek = default_llseek,
1683};
d5442303 1684
358733e9
JB
1685static ssize_t
1686i915_max_freq_read(struct file *filp,
1687 char __user *ubuf,
1688 size_t max,
1689 loff_t *ppos)
1690{
1691 struct drm_device *dev = filp->private_data;
1692 drm_i915_private_t *dev_priv = dev->dev_private;
1693 char buf[80];
1694 int len;
1695
0206e353 1696 len = snprintf(buf, sizeof(buf),
358733e9
JB
1697 "max freq: %d\n", dev_priv->max_delay * 50);
1698
0206e353
AJ
1699 if (len > sizeof(buf))
1700 len = sizeof(buf);
358733e9
JB
1701
1702 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1703}
1704
1705static ssize_t
1706i915_max_freq_write(struct file *filp,
1707 const char __user *ubuf,
1708 size_t cnt,
1709 loff_t *ppos)
1710{
1711 struct drm_device *dev = filp->private_data;
1712 struct drm_i915_private *dev_priv = dev->dev_private;
1713 char buf[20];
1714 int val = 1;
1715
1716 if (cnt > 0) {
0206e353 1717 if (cnt > sizeof(buf) - 1)
358733e9
JB
1718 return -EINVAL;
1719
1720 if (copy_from_user(buf, ubuf, cnt))
1721 return -EFAULT;
1722 buf[cnt] = 0;
1723
1724 val = simple_strtoul(buf, NULL, 0);
1725 }
1726
1727 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
1728
1729 /*
1730 * Turbo will still be enabled, but won't go above the set value.
1731 */
1732 dev_priv->max_delay = val / 50;
1733
1734 gen6_set_rps(dev, val / 50);
1735
1736 return cnt;
1737}
1738
1739static const struct file_operations i915_max_freq_fops = {
1740 .owner = THIS_MODULE,
234e3405 1741 .open = simple_open,
358733e9
JB
1742 .read = i915_max_freq_read,
1743 .write = i915_max_freq_write,
1744 .llseek = default_llseek,
1745};
1746
1523c310
JB
1747static ssize_t
1748i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
1749 loff_t *ppos)
1750{
1751 struct drm_device *dev = filp->private_data;
1752 drm_i915_private_t *dev_priv = dev->dev_private;
1753 char buf[80];
1754 int len;
1755
1756 len = snprintf(buf, sizeof(buf),
1757 "min freq: %d\n", dev_priv->min_delay * 50);
1758
1759 if (len > sizeof(buf))
1760 len = sizeof(buf);
1761
1762 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1763}
1764
1765static ssize_t
1766i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
1767 loff_t *ppos)
1768{
1769 struct drm_device *dev = filp->private_data;
1770 struct drm_i915_private *dev_priv = dev->dev_private;
1771 char buf[20];
1772 int val = 1;
1773
1774 if (cnt > 0) {
1775 if (cnt > sizeof(buf) - 1)
1776 return -EINVAL;
1777
1778 if (copy_from_user(buf, ubuf, cnt))
1779 return -EFAULT;
1780 buf[cnt] = 0;
1781
1782 val = simple_strtoul(buf, NULL, 0);
1783 }
1784
1785 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
1786
1787 /*
1788 * Turbo will still be enabled, but won't go below the set value.
1789 */
1790 dev_priv->min_delay = val / 50;
1791
1792 gen6_set_rps(dev, val / 50);
1793
1794 return cnt;
1795}
1796
1797static const struct file_operations i915_min_freq_fops = {
1798 .owner = THIS_MODULE,
1799 .open = simple_open,
1800 .read = i915_min_freq_read,
1801 .write = i915_min_freq_write,
1802 .llseek = default_llseek,
1803};
1804
07b7ddd9
JB
1805static ssize_t
1806i915_cache_sharing_read(struct file *filp,
1807 char __user *ubuf,
1808 size_t max,
1809 loff_t *ppos)
1810{
1811 struct drm_device *dev = filp->private_data;
1812 drm_i915_private_t *dev_priv = dev->dev_private;
1813 char buf[80];
1814 u32 snpcr;
1815 int len;
1816
1817 mutex_lock(&dev_priv->dev->struct_mutex);
1818 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1819 mutex_unlock(&dev_priv->dev->struct_mutex);
1820
0206e353 1821 len = snprintf(buf, sizeof(buf),
07b7ddd9
JB
1822 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
1823 GEN6_MBC_SNPCR_SHIFT);
1824
0206e353
AJ
1825 if (len > sizeof(buf))
1826 len = sizeof(buf);
07b7ddd9
JB
1827
1828 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1829}
1830
1831static ssize_t
1832i915_cache_sharing_write(struct file *filp,
1833 const char __user *ubuf,
1834 size_t cnt,
1835 loff_t *ppos)
1836{
1837 struct drm_device *dev = filp->private_data;
1838 struct drm_i915_private *dev_priv = dev->dev_private;
1839 char buf[20];
1840 u32 snpcr;
1841 int val = 1;
1842
1843 if (cnt > 0) {
0206e353 1844 if (cnt > sizeof(buf) - 1)
07b7ddd9
JB
1845 return -EINVAL;
1846
1847 if (copy_from_user(buf, ubuf, cnt))
1848 return -EFAULT;
1849 buf[cnt] = 0;
1850
1851 val = simple_strtoul(buf, NULL, 0);
1852 }
1853
1854 if (val < 0 || val > 3)
1855 return -EINVAL;
1856
1857 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
1858
1859 /* Update the cache sharing policy here as well */
1860 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1861 snpcr &= ~GEN6_MBC_SNPCR_MASK;
1862 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
1863 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1864
1865 return cnt;
1866}
1867
1868static const struct file_operations i915_cache_sharing_fops = {
1869 .owner = THIS_MODULE,
234e3405 1870 .open = simple_open,
07b7ddd9
JB
1871 .read = i915_cache_sharing_read,
1872 .write = i915_cache_sharing_write,
1873 .llseek = default_llseek,
1874};
1875
f3cd474b
CW
1876/* As the drm_debugfs_init() routines are called before dev->dev_private is
1877 * allocated we need to hook into the minor for release. */
1878static int
1879drm_add_fake_info_node(struct drm_minor *minor,
1880 struct dentry *ent,
1881 const void *key)
1882{
1883 struct drm_info_node *node;
1884
1885 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
1886 if (node == NULL) {
1887 debugfs_remove(ent);
1888 return -ENOMEM;
1889 }
1890
1891 node->minor = minor;
1892 node->dent = ent;
1893 node->info_ent = (void *) key;
b3e067c0
MS
1894
1895 mutex_lock(&minor->debugfs_lock);
1896 list_add(&node->list, &minor->debugfs_list);
1897 mutex_unlock(&minor->debugfs_lock);
f3cd474b
CW
1898
1899 return 0;
1900}
1901
6d794d42
BW
1902static int i915_forcewake_open(struct inode *inode, struct file *file)
1903{
1904 struct drm_device *dev = inode->i_private;
1905 struct drm_i915_private *dev_priv = dev->dev_private;
1906 int ret;
1907
075edca4 1908 if (INTEL_INFO(dev)->gen < 6)
6d794d42
BW
1909 return 0;
1910
1911 ret = mutex_lock_interruptible(&dev->struct_mutex);
1912 if (ret)
1913 return ret;
1914 gen6_gt_force_wake_get(dev_priv);
1915 mutex_unlock(&dev->struct_mutex);
1916
1917 return 0;
1918}
1919
c43b5634 1920static int i915_forcewake_release(struct inode *inode, struct file *file)
6d794d42
BW
1921{
1922 struct drm_device *dev = inode->i_private;
1923 struct drm_i915_private *dev_priv = dev->dev_private;
1924
075edca4 1925 if (INTEL_INFO(dev)->gen < 6)
6d794d42
BW
1926 return 0;
1927
1928 /*
1929 * It's bad that we can potentially hang userspace if struct_mutex gets
1930 * forever stuck. However, if we cannot acquire this lock it means that
1931 * almost certainly the driver has hung, is not unload-able. Therefore
1932 * hanging here is probably a minor inconvenience not to be seen my
1933 * almost every user.
1934 */
1935 mutex_lock(&dev->struct_mutex);
1936 gen6_gt_force_wake_put(dev_priv);
1937 mutex_unlock(&dev->struct_mutex);
1938
1939 return 0;
1940}
1941
1942static const struct file_operations i915_forcewake_fops = {
1943 .owner = THIS_MODULE,
1944 .open = i915_forcewake_open,
1945 .release = i915_forcewake_release,
1946};
1947
1948static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
1949{
1950 struct drm_device *dev = minor->dev;
1951 struct dentry *ent;
1952
1953 ent = debugfs_create_file("i915_forcewake_user",
8eb57294 1954 S_IRUSR,
6d794d42
BW
1955 root, dev,
1956 &i915_forcewake_fops);
1957 if (IS_ERR(ent))
1958 return PTR_ERR(ent);
1959
8eb57294 1960 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
6d794d42
BW
1961}
1962
6a9c308d
DV
1963static int i915_debugfs_create(struct dentry *root,
1964 struct drm_minor *minor,
1965 const char *name,
1966 const struct file_operations *fops)
07b7ddd9
JB
1967{
1968 struct drm_device *dev = minor->dev;
1969 struct dentry *ent;
1970
6a9c308d 1971 ent = debugfs_create_file(name,
07b7ddd9
JB
1972 S_IRUGO | S_IWUSR,
1973 root, dev,
6a9c308d 1974 fops);
07b7ddd9
JB
1975 if (IS_ERR(ent))
1976 return PTR_ERR(ent);
1977
6a9c308d 1978 return drm_add_fake_info_node(minor, ent, fops);
07b7ddd9
JB
1979}
1980
27c202ad 1981static struct drm_info_list i915_debugfs_list[] = {
311bd68e 1982 {"i915_capabilities", i915_capabilities, 0},
73aa808f 1983 {"i915_gem_objects", i915_gem_object_info, 0},
08c18323 1984 {"i915_gem_gtt", i915_gem_gtt_info, 0},
1b50247a 1985 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
433e12f7 1986 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
433e12f7 1987 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
4e5359cd 1988 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2017263e
BG
1989 {"i915_gem_request", i915_gem_request_info, 0},
1990 {"i915_gem_seqno", i915_gem_seqno_info, 0},
a6172a80 1991 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2017263e 1992 {"i915_gem_interrupt", i915_interrupt_info, 0},
1ec14ad3
CW
1993 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
1994 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
1995 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
f97108d1
JB
1996 {"i915_rstdby_delays", i915_rstdby_delays, 0},
1997 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1998 {"i915_delayfreq_table", i915_delayfreq_table, 0},
1999 {"i915_inttoext_table", i915_inttoext_table, 0},
2000 {"i915_drpc_info", i915_drpc_info, 0},
7648fa99 2001 {"i915_emon_status", i915_emon_status, 0},
23b2f8bb 2002 {"i915_ring_freq_table", i915_ring_freq_table, 0},
7648fa99 2003 {"i915_gfxec", i915_gfxec, 0},
b5e50c3f 2004 {"i915_fbc_status", i915_fbc_status, 0},
4a9bef37 2005 {"i915_sr_status", i915_sr_status, 0},
44834a67 2006 {"i915_opregion", i915_opregion, 0},
37811fcc 2007 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
e76d3630 2008 {"i915_context_status", i915_context_status, 0},
6d794d42 2009 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
ea16a3cd 2010 {"i915_swizzle_info", i915_swizzle_info, 0},
3cf17fc5 2011 {"i915_ppgtt_info", i915_ppgtt_info, 0},
57f350b6 2012 {"i915_dpio", i915_dpio_info, 0},
2017263e 2013};
27c202ad 2014#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2017263e 2015
27c202ad 2016int i915_debugfs_init(struct drm_minor *minor)
2017263e 2017{
f3cd474b
CW
2018 int ret;
2019
6a9c308d
DV
2020 ret = i915_debugfs_create(minor->debugfs_root, minor,
2021 "i915_wedged",
2022 &i915_wedged_fops);
f3cd474b
CW
2023 if (ret)
2024 return ret;
2025
6d794d42 2026 ret = i915_forcewake_create(minor->debugfs_root, minor);
358733e9
JB
2027 if (ret)
2028 return ret;
6a9c308d
DV
2029
2030 ret = i915_debugfs_create(minor->debugfs_root, minor,
2031 "i915_max_freq",
2032 &i915_max_freq_fops);
07b7ddd9
JB
2033 if (ret)
2034 return ret;
6a9c308d 2035
1523c310
JB
2036 ret = i915_debugfs_create(minor->debugfs_root, minor,
2037 "i915_min_freq",
2038 &i915_min_freq_fops);
2039 if (ret)
2040 return ret;
2041
6a9c308d
DV
2042 ret = i915_debugfs_create(minor->debugfs_root, minor,
2043 "i915_cache_sharing",
2044 &i915_cache_sharing_fops);
6d794d42
BW
2045 if (ret)
2046 return ret;
e5eb3d63
DV
2047 ret = i915_debugfs_create(minor->debugfs_root, minor,
2048 "i915_ring_stop",
2049 &i915_ring_stop_fops);
2050 if (ret)
2051 return ret;
6d794d42 2052
d5442303
DV
2053 ret = i915_debugfs_create(minor->debugfs_root, minor,
2054 "i915_error_state",
2055 &i915_error_state_fops);
2056 if (ret)
2057 return ret;
2058
27c202ad
BG
2059 return drm_debugfs_create_files(i915_debugfs_list,
2060 I915_DEBUGFS_ENTRIES,
2017263e
BG
2061 minor->debugfs_root, minor);
2062}
2063
27c202ad 2064void i915_debugfs_cleanup(struct drm_minor *minor)
2017263e 2065{
27c202ad
BG
2066 drm_debugfs_remove_files(i915_debugfs_list,
2067 I915_DEBUGFS_ENTRIES, minor);
6d794d42
BW
2068 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
2069 1, minor);
33db679b
KH
2070 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
2071 1, minor);
358733e9
JB
2072 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
2073 1, minor);
1523c310
JB
2074 drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
2075 1, minor);
07b7ddd9
JB
2076 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
2077 1, minor);
e5eb3d63
DV
2078 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
2079 1, minor);
6bd459df
DV
2080 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
2081 1, minor);
2017263e
BG
2082}
2083
2084#endif /* CONFIG_DEBUG_FS */
This page took 0.317983 seconds and 5 git commands to generate.