drm: micro optimise cache flushing
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include "drmP.h"
34 #include "drm.h"
35 #include "intel_drv.h"
36 #include "intel_ringbuffer.h"
37 #include "i915_drm.h"
38 #include "i915_drv.h"
39
40 #define DRM_I915_RING_DEBUG 1
41
42
43 #if defined(CONFIG_DEBUG_FS)
44
45 enum {
46 ACTIVE_LIST,
47 INACTIVE_LIST,
48 PINNED_LIST,
49 };
50
51 static const char *yesno(int v)
52 {
53 return v ? "yes" : "no";
54 }
55
56 static int i915_capabilities(struct seq_file *m, void *data)
57 {
58 struct drm_info_node *node = (struct drm_info_node *) m->private;
59 struct drm_device *dev = node->minor->dev;
60 const struct intel_device_info *info = INTEL_INFO(dev);
61
62 seq_printf(m, "gen: %d\n", info->gen);
63 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
64 #define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65 #define DEV_INFO_SEP ;
66 DEV_INFO_FLAGS;
67 #undef DEV_INFO_FLAG
68 #undef DEV_INFO_SEP
69
70 return 0;
71 }
72
73 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
74 {
75 if (obj->user_pin_count > 0)
76 return "P";
77 else if (obj->pin_count > 0)
78 return "p";
79 else
80 return " ";
81 }
82
83 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
84 {
85 switch (obj->tiling_mode) {
86 default:
87 case I915_TILING_NONE: return " ";
88 case I915_TILING_X: return "X";
89 case I915_TILING_Y: return "Y";
90 }
91 }
92
93 static const char *cache_level_str(int type)
94 {
95 switch (type) {
96 case I915_CACHE_NONE: return " uncached";
97 case I915_CACHE_LLC: return " snooped (LLC)";
98 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
99 default: return "";
100 }
101 }
102
103 static void
104 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
105 {
106 seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
107 &obj->base,
108 get_pin_flag(obj),
109 get_tiling_flag(obj),
110 obj->base.size / 1024,
111 obj->base.read_domains,
112 obj->base.write_domain,
113 obj->last_read_seqno,
114 obj->last_write_seqno,
115 obj->last_fenced_seqno,
116 cache_level_str(obj->cache_level),
117 obj->dirty ? " dirty" : "",
118 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
119 if (obj->base.name)
120 seq_printf(m, " (name: %d)", obj->base.name);
121 if (obj->pin_count)
122 seq_printf(m, " (pinned x %d)", obj->pin_count);
123 if (obj->fence_reg != I915_FENCE_REG_NONE)
124 seq_printf(m, " (fence: %d)", obj->fence_reg);
125 if (obj->gtt_space != NULL)
126 seq_printf(m, " (gtt offset: %08x, size: %08x)",
127 obj->gtt_offset, (unsigned int)obj->gtt_space->size);
128 if (obj->pin_mappable || obj->fault_mappable) {
129 char s[3], *t = s;
130 if (obj->pin_mappable)
131 *t++ = 'p';
132 if (obj->fault_mappable)
133 *t++ = 'f';
134 *t = '\0';
135 seq_printf(m, " (%s mappable)", s);
136 }
137 if (obj->ring != NULL)
138 seq_printf(m, " (%s)", obj->ring->name);
139 }
140
141 static int i915_gem_object_list_info(struct seq_file *m, void *data)
142 {
143 struct drm_info_node *node = (struct drm_info_node *) m->private;
144 uintptr_t list = (uintptr_t) node->info_ent->data;
145 struct list_head *head;
146 struct drm_device *dev = node->minor->dev;
147 drm_i915_private_t *dev_priv = dev->dev_private;
148 struct drm_i915_gem_object *obj;
149 size_t total_obj_size, total_gtt_size;
150 int count, ret;
151
152 ret = mutex_lock_interruptible(&dev->struct_mutex);
153 if (ret)
154 return ret;
155
156 switch (list) {
157 case ACTIVE_LIST:
158 seq_printf(m, "Active:\n");
159 head = &dev_priv->mm.active_list;
160 break;
161 case INACTIVE_LIST:
162 seq_printf(m, "Inactive:\n");
163 head = &dev_priv->mm.inactive_list;
164 break;
165 default:
166 mutex_unlock(&dev->struct_mutex);
167 return -EINVAL;
168 }
169
170 total_obj_size = total_gtt_size = count = 0;
171 list_for_each_entry(obj, head, mm_list) {
172 seq_printf(m, " ");
173 describe_obj(m, obj);
174 seq_printf(m, "\n");
175 total_obj_size += obj->base.size;
176 total_gtt_size += obj->gtt_space->size;
177 count++;
178 }
179 mutex_unlock(&dev->struct_mutex);
180
181 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
182 count, total_obj_size, total_gtt_size);
183 return 0;
184 }
185
186 #define count_objects(list, member) do { \
187 list_for_each_entry(obj, list, member) { \
188 size += obj->gtt_space->size; \
189 ++count; \
190 if (obj->map_and_fenceable) { \
191 mappable_size += obj->gtt_space->size; \
192 ++mappable_count; \
193 } \
194 } \
195 } while (0)
196
197 static int i915_gem_object_info(struct seq_file *m, void* data)
198 {
199 struct drm_info_node *node = (struct drm_info_node *) m->private;
200 struct drm_device *dev = node->minor->dev;
201 struct drm_i915_private *dev_priv = dev->dev_private;
202 u32 count, mappable_count, purgeable_count;
203 size_t size, mappable_size, purgeable_size;
204 struct drm_i915_gem_object *obj;
205 int ret;
206
207 ret = mutex_lock_interruptible(&dev->struct_mutex);
208 if (ret)
209 return ret;
210
211 seq_printf(m, "%u objects, %zu bytes\n",
212 dev_priv->mm.object_count,
213 dev_priv->mm.object_memory);
214
215 size = count = mappable_size = mappable_count = 0;
216 count_objects(&dev_priv->mm.bound_list, gtt_list);
217 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
218 count, mappable_count, size, mappable_size);
219
220 size = count = mappable_size = mappable_count = 0;
221 count_objects(&dev_priv->mm.active_list, mm_list);
222 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
223 count, mappable_count, size, mappable_size);
224
225 size = count = mappable_size = mappable_count = 0;
226 count_objects(&dev_priv->mm.inactive_list, mm_list);
227 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
228 count, mappable_count, size, mappable_size);
229
230 size = count = purgeable_size = purgeable_count = 0;
231 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) {
232 size += obj->base.size, ++count;
233 if (obj->madv == I915_MADV_DONTNEED)
234 purgeable_size += obj->base.size, ++purgeable_count;
235 }
236 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
237
238 size = count = mappable_size = mappable_count = 0;
239 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
240 if (obj->fault_mappable) {
241 size += obj->gtt_space->size;
242 ++count;
243 }
244 if (obj->pin_mappable) {
245 mappable_size += obj->gtt_space->size;
246 ++mappable_count;
247 }
248 if (obj->madv == I915_MADV_DONTNEED) {
249 purgeable_size += obj->base.size;
250 ++purgeable_count;
251 }
252 }
253 seq_printf(m, "%u purgeable objects, %zu bytes\n",
254 purgeable_count, purgeable_size);
255 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
256 mappable_count, mappable_size);
257 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
258 count, size);
259
260 seq_printf(m, "%zu [%zu] gtt total\n",
261 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
262
263 mutex_unlock(&dev->struct_mutex);
264
265 return 0;
266 }
267
268 static int i915_gem_gtt_info(struct seq_file *m, void* data)
269 {
270 struct drm_info_node *node = (struct drm_info_node *) m->private;
271 struct drm_device *dev = node->minor->dev;
272 uintptr_t list = (uintptr_t) node->info_ent->data;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 struct drm_i915_gem_object *obj;
275 size_t total_obj_size, total_gtt_size;
276 int count, ret;
277
278 ret = mutex_lock_interruptible(&dev->struct_mutex);
279 if (ret)
280 return ret;
281
282 total_obj_size = total_gtt_size = count = 0;
283 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
284 if (list == PINNED_LIST && obj->pin_count == 0)
285 continue;
286
287 seq_printf(m, " ");
288 describe_obj(m, obj);
289 seq_printf(m, "\n");
290 total_obj_size += obj->base.size;
291 total_gtt_size += obj->gtt_space->size;
292 count++;
293 }
294
295 mutex_unlock(&dev->struct_mutex);
296
297 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
298 count, total_obj_size, total_gtt_size);
299
300 return 0;
301 }
302
303 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
304 {
305 struct drm_info_node *node = (struct drm_info_node *) m->private;
306 struct drm_device *dev = node->minor->dev;
307 unsigned long flags;
308 struct intel_crtc *crtc;
309
310 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
311 const char pipe = pipe_name(crtc->pipe);
312 const char plane = plane_name(crtc->plane);
313 struct intel_unpin_work *work;
314
315 spin_lock_irqsave(&dev->event_lock, flags);
316 work = crtc->unpin_work;
317 if (work == NULL) {
318 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
319 pipe, plane);
320 } else {
321 if (!work->pending) {
322 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
323 pipe, plane);
324 } else {
325 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
326 pipe, plane);
327 }
328 if (work->enable_stall_check)
329 seq_printf(m, "Stall check enabled, ");
330 else
331 seq_printf(m, "Stall check waiting for page flip ioctl, ");
332 seq_printf(m, "%d prepares\n", work->pending);
333
334 if (work->old_fb_obj) {
335 struct drm_i915_gem_object *obj = work->old_fb_obj;
336 if (obj)
337 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
338 }
339 if (work->pending_flip_obj) {
340 struct drm_i915_gem_object *obj = work->pending_flip_obj;
341 if (obj)
342 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
343 }
344 }
345 spin_unlock_irqrestore(&dev->event_lock, flags);
346 }
347
348 return 0;
349 }
350
351 static int i915_gem_request_info(struct seq_file *m, void *data)
352 {
353 struct drm_info_node *node = (struct drm_info_node *) m->private;
354 struct drm_device *dev = node->minor->dev;
355 drm_i915_private_t *dev_priv = dev->dev_private;
356 struct drm_i915_gem_request *gem_request;
357 int ret, count;
358
359 ret = mutex_lock_interruptible(&dev->struct_mutex);
360 if (ret)
361 return ret;
362
363 count = 0;
364 if (!list_empty(&dev_priv->ring[RCS].request_list)) {
365 seq_printf(m, "Render requests:\n");
366 list_for_each_entry(gem_request,
367 &dev_priv->ring[RCS].request_list,
368 list) {
369 seq_printf(m, " %d @ %d\n",
370 gem_request->seqno,
371 (int) (jiffies - gem_request->emitted_jiffies));
372 }
373 count++;
374 }
375 if (!list_empty(&dev_priv->ring[VCS].request_list)) {
376 seq_printf(m, "BSD requests:\n");
377 list_for_each_entry(gem_request,
378 &dev_priv->ring[VCS].request_list,
379 list) {
380 seq_printf(m, " %d @ %d\n",
381 gem_request->seqno,
382 (int) (jiffies - gem_request->emitted_jiffies));
383 }
384 count++;
385 }
386 if (!list_empty(&dev_priv->ring[BCS].request_list)) {
387 seq_printf(m, "BLT requests:\n");
388 list_for_each_entry(gem_request,
389 &dev_priv->ring[BCS].request_list,
390 list) {
391 seq_printf(m, " %d @ %d\n",
392 gem_request->seqno,
393 (int) (jiffies - gem_request->emitted_jiffies));
394 }
395 count++;
396 }
397 mutex_unlock(&dev->struct_mutex);
398
399 if (count == 0)
400 seq_printf(m, "No requests\n");
401
402 return 0;
403 }
404
405 static void i915_ring_seqno_info(struct seq_file *m,
406 struct intel_ring_buffer *ring)
407 {
408 if (ring->get_seqno) {
409 seq_printf(m, "Current sequence (%s): %d\n",
410 ring->name, ring->get_seqno(ring, false));
411 }
412 }
413
414 static int i915_gem_seqno_info(struct seq_file *m, void *data)
415 {
416 struct drm_info_node *node = (struct drm_info_node *) m->private;
417 struct drm_device *dev = node->minor->dev;
418 drm_i915_private_t *dev_priv = dev->dev_private;
419 int ret, i;
420
421 ret = mutex_lock_interruptible(&dev->struct_mutex);
422 if (ret)
423 return ret;
424
425 for (i = 0; i < I915_NUM_RINGS; i++)
426 i915_ring_seqno_info(m, &dev_priv->ring[i]);
427
428 mutex_unlock(&dev->struct_mutex);
429
430 return 0;
431 }
432
433
434 static int i915_interrupt_info(struct seq_file *m, void *data)
435 {
436 struct drm_info_node *node = (struct drm_info_node *) m->private;
437 struct drm_device *dev = node->minor->dev;
438 drm_i915_private_t *dev_priv = dev->dev_private;
439 int ret, i, pipe;
440
441 ret = mutex_lock_interruptible(&dev->struct_mutex);
442 if (ret)
443 return ret;
444
445 if (IS_VALLEYVIEW(dev)) {
446 seq_printf(m, "Display IER:\t%08x\n",
447 I915_READ(VLV_IER));
448 seq_printf(m, "Display IIR:\t%08x\n",
449 I915_READ(VLV_IIR));
450 seq_printf(m, "Display IIR_RW:\t%08x\n",
451 I915_READ(VLV_IIR_RW));
452 seq_printf(m, "Display IMR:\t%08x\n",
453 I915_READ(VLV_IMR));
454 for_each_pipe(pipe)
455 seq_printf(m, "Pipe %c stat:\t%08x\n",
456 pipe_name(pipe),
457 I915_READ(PIPESTAT(pipe)));
458
459 seq_printf(m, "Master IER:\t%08x\n",
460 I915_READ(VLV_MASTER_IER));
461
462 seq_printf(m, "Render IER:\t%08x\n",
463 I915_READ(GTIER));
464 seq_printf(m, "Render IIR:\t%08x\n",
465 I915_READ(GTIIR));
466 seq_printf(m, "Render IMR:\t%08x\n",
467 I915_READ(GTIMR));
468
469 seq_printf(m, "PM IER:\t\t%08x\n",
470 I915_READ(GEN6_PMIER));
471 seq_printf(m, "PM IIR:\t\t%08x\n",
472 I915_READ(GEN6_PMIIR));
473 seq_printf(m, "PM IMR:\t\t%08x\n",
474 I915_READ(GEN6_PMIMR));
475
476 seq_printf(m, "Port hotplug:\t%08x\n",
477 I915_READ(PORT_HOTPLUG_EN));
478 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
479 I915_READ(VLV_DPFLIPSTAT));
480 seq_printf(m, "DPINVGTT:\t%08x\n",
481 I915_READ(DPINVGTT));
482
483 } else if (!HAS_PCH_SPLIT(dev)) {
484 seq_printf(m, "Interrupt enable: %08x\n",
485 I915_READ(IER));
486 seq_printf(m, "Interrupt identity: %08x\n",
487 I915_READ(IIR));
488 seq_printf(m, "Interrupt mask: %08x\n",
489 I915_READ(IMR));
490 for_each_pipe(pipe)
491 seq_printf(m, "Pipe %c stat: %08x\n",
492 pipe_name(pipe),
493 I915_READ(PIPESTAT(pipe)));
494 } else {
495 seq_printf(m, "North Display Interrupt enable: %08x\n",
496 I915_READ(DEIER));
497 seq_printf(m, "North Display Interrupt identity: %08x\n",
498 I915_READ(DEIIR));
499 seq_printf(m, "North Display Interrupt mask: %08x\n",
500 I915_READ(DEIMR));
501 seq_printf(m, "South Display Interrupt enable: %08x\n",
502 I915_READ(SDEIER));
503 seq_printf(m, "South Display Interrupt identity: %08x\n",
504 I915_READ(SDEIIR));
505 seq_printf(m, "South Display Interrupt mask: %08x\n",
506 I915_READ(SDEIMR));
507 seq_printf(m, "Graphics Interrupt enable: %08x\n",
508 I915_READ(GTIER));
509 seq_printf(m, "Graphics Interrupt identity: %08x\n",
510 I915_READ(GTIIR));
511 seq_printf(m, "Graphics Interrupt mask: %08x\n",
512 I915_READ(GTIMR));
513 }
514 seq_printf(m, "Interrupts received: %d\n",
515 atomic_read(&dev_priv->irq_received));
516 for (i = 0; i < I915_NUM_RINGS; i++) {
517 if (IS_GEN6(dev) || IS_GEN7(dev)) {
518 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
519 dev_priv->ring[i].name,
520 I915_READ_IMR(&dev_priv->ring[i]));
521 }
522 i915_ring_seqno_info(m, &dev_priv->ring[i]);
523 }
524 mutex_unlock(&dev->struct_mutex);
525
526 return 0;
527 }
528
529 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
530 {
531 struct drm_info_node *node = (struct drm_info_node *) m->private;
532 struct drm_device *dev = node->minor->dev;
533 drm_i915_private_t *dev_priv = dev->dev_private;
534 int i, ret;
535
536 ret = mutex_lock_interruptible(&dev->struct_mutex);
537 if (ret)
538 return ret;
539
540 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
541 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
542 for (i = 0; i < dev_priv->num_fence_regs; i++) {
543 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
544
545 seq_printf(m, "Fence %d, pin count = %d, object = ",
546 i, dev_priv->fence_regs[i].pin_count);
547 if (obj == NULL)
548 seq_printf(m, "unused");
549 else
550 describe_obj(m, obj);
551 seq_printf(m, "\n");
552 }
553
554 mutex_unlock(&dev->struct_mutex);
555 return 0;
556 }
557
558 static int i915_hws_info(struct seq_file *m, void *data)
559 {
560 struct drm_info_node *node = (struct drm_info_node *) m->private;
561 struct drm_device *dev = node->minor->dev;
562 drm_i915_private_t *dev_priv = dev->dev_private;
563 struct intel_ring_buffer *ring;
564 const volatile u32 __iomem *hws;
565 int i;
566
567 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
568 hws = (volatile u32 __iomem *)ring->status_page.page_addr;
569 if (hws == NULL)
570 return 0;
571
572 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
573 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
574 i * 4,
575 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
576 }
577 return 0;
578 }
579
580 static const char *ring_str(int ring)
581 {
582 switch (ring) {
583 case RCS: return "render";
584 case VCS: return "bsd";
585 case BCS: return "blt";
586 default: return "";
587 }
588 }
589
590 static const char *pin_flag(int pinned)
591 {
592 if (pinned > 0)
593 return " P";
594 else if (pinned < 0)
595 return " p";
596 else
597 return "";
598 }
599
600 static const char *tiling_flag(int tiling)
601 {
602 switch (tiling) {
603 default:
604 case I915_TILING_NONE: return "";
605 case I915_TILING_X: return " X";
606 case I915_TILING_Y: return " Y";
607 }
608 }
609
610 static const char *dirty_flag(int dirty)
611 {
612 return dirty ? " dirty" : "";
613 }
614
615 static const char *purgeable_flag(int purgeable)
616 {
617 return purgeable ? " purgeable" : "";
618 }
619
620 static void print_error_buffers(struct seq_file *m,
621 const char *name,
622 struct drm_i915_error_buffer *err,
623 int count)
624 {
625 seq_printf(m, "%s [%d]:\n", name, count);
626
627 while (count--) {
628 seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
629 err->gtt_offset,
630 err->size,
631 err->read_domains,
632 err->write_domain,
633 err->rseqno, err->wseqno,
634 pin_flag(err->pinned),
635 tiling_flag(err->tiling),
636 dirty_flag(err->dirty),
637 purgeable_flag(err->purgeable),
638 err->ring != -1 ? " " : "",
639 ring_str(err->ring),
640 cache_level_str(err->cache_level));
641
642 if (err->name)
643 seq_printf(m, " (name: %d)", err->name);
644 if (err->fence_reg != I915_FENCE_REG_NONE)
645 seq_printf(m, " (fence: %d)", err->fence_reg);
646
647 seq_printf(m, "\n");
648 err++;
649 }
650 }
651
652 static void i915_ring_error_state(struct seq_file *m,
653 struct drm_device *dev,
654 struct drm_i915_error_state *error,
655 unsigned ring)
656 {
657 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
658 seq_printf(m, "%s command stream:\n", ring_str(ring));
659 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
660 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
661 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
662 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
663 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
664 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
665 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
666 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
667
668 if (INTEL_INFO(dev)->gen >= 4)
669 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
670 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
671 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
672 if (INTEL_INFO(dev)->gen >= 6) {
673 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
674 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
675 seq_printf(m, " SYNC_0: 0x%08x\n",
676 error->semaphore_mboxes[ring][0]);
677 seq_printf(m, " SYNC_1: 0x%08x\n",
678 error->semaphore_mboxes[ring][1]);
679 }
680 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
681 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
682 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
683 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
684 }
685
686 struct i915_error_state_file_priv {
687 struct drm_device *dev;
688 struct drm_i915_error_state *error;
689 };
690
691 static int i915_error_state(struct seq_file *m, void *unused)
692 {
693 struct i915_error_state_file_priv *error_priv = m->private;
694 struct drm_device *dev = error_priv->dev;
695 drm_i915_private_t *dev_priv = dev->dev_private;
696 struct drm_i915_error_state *error = error_priv->error;
697 struct intel_ring_buffer *ring;
698 int i, j, page, offset, elt;
699
700 if (!error) {
701 seq_printf(m, "no error state collected\n");
702 return 0;
703 }
704
705 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
706 error->time.tv_usec);
707 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
708 seq_printf(m, "EIR: 0x%08x\n", error->eir);
709 seq_printf(m, "IER: 0x%08x\n", error->ier);
710 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
711 seq_printf(m, "CCID: 0x%08x\n", error->ccid);
712
713 for (i = 0; i < dev_priv->num_fence_regs; i++)
714 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
715
716 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
717 seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]);
718
719 if (INTEL_INFO(dev)->gen >= 6) {
720 seq_printf(m, "ERROR: 0x%08x\n", error->error);
721 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
722 }
723
724 if (INTEL_INFO(dev)->gen == 7)
725 seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
726
727 for_each_ring(ring, dev_priv, i)
728 i915_ring_error_state(m, dev, error, i);
729
730 if (error->active_bo)
731 print_error_buffers(m, "Active",
732 error->active_bo,
733 error->active_bo_count);
734
735 if (error->pinned_bo)
736 print_error_buffers(m, "Pinned",
737 error->pinned_bo,
738 error->pinned_bo_count);
739
740 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
741 struct drm_i915_error_object *obj;
742
743 if ((obj = error->ring[i].batchbuffer)) {
744 seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
745 dev_priv->ring[i].name,
746 obj->gtt_offset);
747 offset = 0;
748 for (page = 0; page < obj->page_count; page++) {
749 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
750 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
751 offset += 4;
752 }
753 }
754 }
755
756 if (error->ring[i].num_requests) {
757 seq_printf(m, "%s --- %d requests\n",
758 dev_priv->ring[i].name,
759 error->ring[i].num_requests);
760 for (j = 0; j < error->ring[i].num_requests; j++) {
761 seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
762 error->ring[i].requests[j].seqno,
763 error->ring[i].requests[j].jiffies,
764 error->ring[i].requests[j].tail);
765 }
766 }
767
768 if ((obj = error->ring[i].ringbuffer)) {
769 seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
770 dev_priv->ring[i].name,
771 obj->gtt_offset);
772 offset = 0;
773 for (page = 0; page < obj->page_count; page++) {
774 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
775 seq_printf(m, "%08x : %08x\n",
776 offset,
777 obj->pages[page][elt]);
778 offset += 4;
779 }
780 }
781 }
782 }
783
784 if (error->overlay)
785 intel_overlay_print_error_state(m, error->overlay);
786
787 if (error->display)
788 intel_display_print_error_state(m, dev, error->display);
789
790 return 0;
791 }
792
793 static ssize_t
794 i915_error_state_write(struct file *filp,
795 const char __user *ubuf,
796 size_t cnt,
797 loff_t *ppos)
798 {
799 struct seq_file *m = filp->private_data;
800 struct i915_error_state_file_priv *error_priv = m->private;
801 struct drm_device *dev = error_priv->dev;
802 int ret;
803
804 DRM_DEBUG_DRIVER("Resetting error state\n");
805
806 ret = mutex_lock_interruptible(&dev->struct_mutex);
807 if (ret)
808 return ret;
809
810 i915_destroy_error_state(dev);
811 mutex_unlock(&dev->struct_mutex);
812
813 return cnt;
814 }
815
816 static int i915_error_state_open(struct inode *inode, struct file *file)
817 {
818 struct drm_device *dev = inode->i_private;
819 drm_i915_private_t *dev_priv = dev->dev_private;
820 struct i915_error_state_file_priv *error_priv;
821 unsigned long flags;
822
823 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
824 if (!error_priv)
825 return -ENOMEM;
826
827 error_priv->dev = dev;
828
829 spin_lock_irqsave(&dev_priv->error_lock, flags);
830 error_priv->error = dev_priv->first_error;
831 if (error_priv->error)
832 kref_get(&error_priv->error->ref);
833 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
834
835 return single_open(file, i915_error_state, error_priv);
836 }
837
838 static int i915_error_state_release(struct inode *inode, struct file *file)
839 {
840 struct seq_file *m = file->private_data;
841 struct i915_error_state_file_priv *error_priv = m->private;
842
843 if (error_priv->error)
844 kref_put(&error_priv->error->ref, i915_error_state_free);
845 kfree(error_priv);
846
847 return single_release(inode, file);
848 }
849
850 static const struct file_operations i915_error_state_fops = {
851 .owner = THIS_MODULE,
852 .open = i915_error_state_open,
853 .read = seq_read,
854 .write = i915_error_state_write,
855 .llseek = default_llseek,
856 .release = i915_error_state_release,
857 };
858
859 static int i915_rstdby_delays(struct seq_file *m, void *unused)
860 {
861 struct drm_info_node *node = (struct drm_info_node *) m->private;
862 struct drm_device *dev = node->minor->dev;
863 drm_i915_private_t *dev_priv = dev->dev_private;
864 u16 crstanddelay;
865 int ret;
866
867 ret = mutex_lock_interruptible(&dev->struct_mutex);
868 if (ret)
869 return ret;
870
871 crstanddelay = I915_READ16(CRSTANDVID);
872
873 mutex_unlock(&dev->struct_mutex);
874
875 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
876
877 return 0;
878 }
879
880 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
881 {
882 struct drm_info_node *node = (struct drm_info_node *) m->private;
883 struct drm_device *dev = node->minor->dev;
884 drm_i915_private_t *dev_priv = dev->dev_private;
885 int ret;
886
887 if (IS_GEN5(dev)) {
888 u16 rgvswctl = I915_READ16(MEMSWCTL);
889 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
890
891 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
892 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
893 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
894 MEMSTAT_VID_SHIFT);
895 seq_printf(m, "Current P-state: %d\n",
896 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
897 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
898 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
899 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
900 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
901 u32 rpstat;
902 u32 rpupei, rpcurup, rpprevup;
903 u32 rpdownei, rpcurdown, rpprevdown;
904 int max_freq;
905
906 /* RPSTAT1 is in the GT power well */
907 ret = mutex_lock_interruptible(&dev->struct_mutex);
908 if (ret)
909 return ret;
910
911 gen6_gt_force_wake_get(dev_priv);
912
913 rpstat = I915_READ(GEN6_RPSTAT1);
914 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
915 rpcurup = I915_READ(GEN6_RP_CUR_UP);
916 rpprevup = I915_READ(GEN6_RP_PREV_UP);
917 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
918 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
919 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
920
921 gen6_gt_force_wake_put(dev_priv);
922 mutex_unlock(&dev->struct_mutex);
923
924 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
925 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
926 seq_printf(m, "Render p-state ratio: %d\n",
927 (gt_perf_status & 0xff00) >> 8);
928 seq_printf(m, "Render p-state VID: %d\n",
929 gt_perf_status & 0xff);
930 seq_printf(m, "Render p-state limit: %d\n",
931 rp_state_limits & 0xff);
932 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
933 GEN6_CAGF_SHIFT) * 50);
934 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
935 GEN6_CURICONT_MASK);
936 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
937 GEN6_CURBSYTAVG_MASK);
938 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
939 GEN6_CURBSYTAVG_MASK);
940 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
941 GEN6_CURIAVG_MASK);
942 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
943 GEN6_CURBSYTAVG_MASK);
944 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
945 GEN6_CURBSYTAVG_MASK);
946
947 max_freq = (rp_state_cap & 0xff0000) >> 16;
948 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
949 max_freq * 50);
950
951 max_freq = (rp_state_cap & 0xff00) >> 8;
952 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
953 max_freq * 50);
954
955 max_freq = rp_state_cap & 0xff;
956 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
957 max_freq * 50);
958 } else {
959 seq_printf(m, "no P-state info available\n");
960 }
961
962 return 0;
963 }
964
965 static int i915_delayfreq_table(struct seq_file *m, void *unused)
966 {
967 struct drm_info_node *node = (struct drm_info_node *) m->private;
968 struct drm_device *dev = node->minor->dev;
969 drm_i915_private_t *dev_priv = dev->dev_private;
970 u32 delayfreq;
971 int ret, i;
972
973 ret = mutex_lock_interruptible(&dev->struct_mutex);
974 if (ret)
975 return ret;
976
977 for (i = 0; i < 16; i++) {
978 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
979 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
980 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
981 }
982
983 mutex_unlock(&dev->struct_mutex);
984
985 return 0;
986 }
987
988 static inline int MAP_TO_MV(int map)
989 {
990 return 1250 - (map * 25);
991 }
992
993 static int i915_inttoext_table(struct seq_file *m, void *unused)
994 {
995 struct drm_info_node *node = (struct drm_info_node *) m->private;
996 struct drm_device *dev = node->minor->dev;
997 drm_i915_private_t *dev_priv = dev->dev_private;
998 u32 inttoext;
999 int ret, i;
1000
1001 ret = mutex_lock_interruptible(&dev->struct_mutex);
1002 if (ret)
1003 return ret;
1004
1005 for (i = 1; i <= 32; i++) {
1006 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
1007 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
1008 }
1009
1010 mutex_unlock(&dev->struct_mutex);
1011
1012 return 0;
1013 }
1014
1015 static int ironlake_drpc_info(struct seq_file *m)
1016 {
1017 struct drm_info_node *node = (struct drm_info_node *) m->private;
1018 struct drm_device *dev = node->minor->dev;
1019 drm_i915_private_t *dev_priv = dev->dev_private;
1020 u32 rgvmodectl, rstdbyctl;
1021 u16 crstandvid;
1022 int ret;
1023
1024 ret = mutex_lock_interruptible(&dev->struct_mutex);
1025 if (ret)
1026 return ret;
1027
1028 rgvmodectl = I915_READ(MEMMODECTL);
1029 rstdbyctl = I915_READ(RSTDBYCTL);
1030 crstandvid = I915_READ16(CRSTANDVID);
1031
1032 mutex_unlock(&dev->struct_mutex);
1033
1034 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1035 "yes" : "no");
1036 seq_printf(m, "Boost freq: %d\n",
1037 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1038 MEMMODE_BOOST_FREQ_SHIFT);
1039 seq_printf(m, "HW control enabled: %s\n",
1040 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1041 seq_printf(m, "SW control enabled: %s\n",
1042 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1043 seq_printf(m, "Gated voltage change: %s\n",
1044 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1045 seq_printf(m, "Starting frequency: P%d\n",
1046 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1047 seq_printf(m, "Max P-state: P%d\n",
1048 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1049 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1050 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1051 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1052 seq_printf(m, "Render standby enabled: %s\n",
1053 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1054 seq_printf(m, "Current RS state: ");
1055 switch (rstdbyctl & RSX_STATUS_MASK) {
1056 case RSX_STATUS_ON:
1057 seq_printf(m, "on\n");
1058 break;
1059 case RSX_STATUS_RC1:
1060 seq_printf(m, "RC1\n");
1061 break;
1062 case RSX_STATUS_RC1E:
1063 seq_printf(m, "RC1E\n");
1064 break;
1065 case RSX_STATUS_RS1:
1066 seq_printf(m, "RS1\n");
1067 break;
1068 case RSX_STATUS_RS2:
1069 seq_printf(m, "RS2 (RC6)\n");
1070 break;
1071 case RSX_STATUS_RS3:
1072 seq_printf(m, "RC3 (RC6+)\n");
1073 break;
1074 default:
1075 seq_printf(m, "unknown\n");
1076 break;
1077 }
1078
1079 return 0;
1080 }
1081
1082 static int gen6_drpc_info(struct seq_file *m)
1083 {
1084
1085 struct drm_info_node *node = (struct drm_info_node *) m->private;
1086 struct drm_device *dev = node->minor->dev;
1087 struct drm_i915_private *dev_priv = dev->dev_private;
1088 u32 rpmodectl1, gt_core_status, rcctl1;
1089 unsigned forcewake_count;
1090 int count=0, ret;
1091
1092
1093 ret = mutex_lock_interruptible(&dev->struct_mutex);
1094 if (ret)
1095 return ret;
1096
1097 spin_lock_irq(&dev_priv->gt_lock);
1098 forcewake_count = dev_priv->forcewake_count;
1099 spin_unlock_irq(&dev_priv->gt_lock);
1100
1101 if (forcewake_count) {
1102 seq_printf(m, "RC information inaccurate because somebody "
1103 "holds a forcewake reference \n");
1104 } else {
1105 /* NB: we cannot use forcewake, else we read the wrong values */
1106 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1107 udelay(10);
1108 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1109 }
1110
1111 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1112 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
1113
1114 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1115 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1116 mutex_unlock(&dev->struct_mutex);
1117
1118 seq_printf(m, "Video Turbo Mode: %s\n",
1119 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1120 seq_printf(m, "HW control enabled: %s\n",
1121 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1122 seq_printf(m, "SW control enabled: %s\n",
1123 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1124 GEN6_RP_MEDIA_SW_MODE));
1125 seq_printf(m, "RC1e Enabled: %s\n",
1126 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1127 seq_printf(m, "RC6 Enabled: %s\n",
1128 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1129 seq_printf(m, "Deep RC6 Enabled: %s\n",
1130 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1131 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1132 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1133 seq_printf(m, "Current RC state: ");
1134 switch (gt_core_status & GEN6_RCn_MASK) {
1135 case GEN6_RC0:
1136 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1137 seq_printf(m, "Core Power Down\n");
1138 else
1139 seq_printf(m, "on\n");
1140 break;
1141 case GEN6_RC3:
1142 seq_printf(m, "RC3\n");
1143 break;
1144 case GEN6_RC6:
1145 seq_printf(m, "RC6\n");
1146 break;
1147 case GEN6_RC7:
1148 seq_printf(m, "RC7\n");
1149 break;
1150 default:
1151 seq_printf(m, "Unknown\n");
1152 break;
1153 }
1154
1155 seq_printf(m, "Core Power Down: %s\n",
1156 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1157
1158 /* Not exactly sure what this is */
1159 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1160 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1161 seq_printf(m, "RC6 residency since boot: %u\n",
1162 I915_READ(GEN6_GT_GFX_RC6));
1163 seq_printf(m, "RC6+ residency since boot: %u\n",
1164 I915_READ(GEN6_GT_GFX_RC6p));
1165 seq_printf(m, "RC6++ residency since boot: %u\n",
1166 I915_READ(GEN6_GT_GFX_RC6pp));
1167
1168 return 0;
1169 }
1170
1171 static int i915_drpc_info(struct seq_file *m, void *unused)
1172 {
1173 struct drm_info_node *node = (struct drm_info_node *) m->private;
1174 struct drm_device *dev = node->minor->dev;
1175
1176 if (IS_GEN6(dev) || IS_GEN7(dev))
1177 return gen6_drpc_info(m);
1178 else
1179 return ironlake_drpc_info(m);
1180 }
1181
1182 static int i915_fbc_status(struct seq_file *m, void *unused)
1183 {
1184 struct drm_info_node *node = (struct drm_info_node *) m->private;
1185 struct drm_device *dev = node->minor->dev;
1186 drm_i915_private_t *dev_priv = dev->dev_private;
1187
1188 if (!I915_HAS_FBC(dev)) {
1189 seq_printf(m, "FBC unsupported on this chipset\n");
1190 return 0;
1191 }
1192
1193 if (intel_fbc_enabled(dev)) {
1194 seq_printf(m, "FBC enabled\n");
1195 } else {
1196 seq_printf(m, "FBC disabled: ");
1197 switch (dev_priv->no_fbc_reason) {
1198 case FBC_NO_OUTPUT:
1199 seq_printf(m, "no outputs");
1200 break;
1201 case FBC_STOLEN_TOO_SMALL:
1202 seq_printf(m, "not enough stolen memory");
1203 break;
1204 case FBC_UNSUPPORTED_MODE:
1205 seq_printf(m, "mode not supported");
1206 break;
1207 case FBC_MODE_TOO_LARGE:
1208 seq_printf(m, "mode too large");
1209 break;
1210 case FBC_BAD_PLANE:
1211 seq_printf(m, "FBC unsupported on plane");
1212 break;
1213 case FBC_NOT_TILED:
1214 seq_printf(m, "scanout buffer not tiled");
1215 break;
1216 case FBC_MULTIPLE_PIPES:
1217 seq_printf(m, "multiple pipes are enabled");
1218 break;
1219 case FBC_MODULE_PARAM:
1220 seq_printf(m, "disabled per module param (default off)");
1221 break;
1222 default:
1223 seq_printf(m, "unknown reason");
1224 }
1225 seq_printf(m, "\n");
1226 }
1227 return 0;
1228 }
1229
1230 static int i915_sr_status(struct seq_file *m, void *unused)
1231 {
1232 struct drm_info_node *node = (struct drm_info_node *) m->private;
1233 struct drm_device *dev = node->minor->dev;
1234 drm_i915_private_t *dev_priv = dev->dev_private;
1235 bool sr_enabled = false;
1236
1237 if (HAS_PCH_SPLIT(dev))
1238 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1239 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1240 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1241 else if (IS_I915GM(dev))
1242 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1243 else if (IS_PINEVIEW(dev))
1244 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1245
1246 seq_printf(m, "self-refresh: %s\n",
1247 sr_enabled ? "enabled" : "disabled");
1248
1249 return 0;
1250 }
1251
1252 static int i915_emon_status(struct seq_file *m, void *unused)
1253 {
1254 struct drm_info_node *node = (struct drm_info_node *) m->private;
1255 struct drm_device *dev = node->minor->dev;
1256 drm_i915_private_t *dev_priv = dev->dev_private;
1257 unsigned long temp, chipset, gfx;
1258 int ret;
1259
1260 if (!IS_GEN5(dev))
1261 return -ENODEV;
1262
1263 ret = mutex_lock_interruptible(&dev->struct_mutex);
1264 if (ret)
1265 return ret;
1266
1267 temp = i915_mch_val(dev_priv);
1268 chipset = i915_chipset_val(dev_priv);
1269 gfx = i915_gfx_val(dev_priv);
1270 mutex_unlock(&dev->struct_mutex);
1271
1272 seq_printf(m, "GMCH temp: %ld\n", temp);
1273 seq_printf(m, "Chipset power: %ld\n", chipset);
1274 seq_printf(m, "GFX power: %ld\n", gfx);
1275 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1276
1277 return 0;
1278 }
1279
1280 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1281 {
1282 struct drm_info_node *node = (struct drm_info_node *) m->private;
1283 struct drm_device *dev = node->minor->dev;
1284 drm_i915_private_t *dev_priv = dev->dev_private;
1285 int ret;
1286 int gpu_freq, ia_freq;
1287
1288 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1289 seq_printf(m, "unsupported on this chipset\n");
1290 return 0;
1291 }
1292
1293 ret = mutex_lock_interruptible(&dev->struct_mutex);
1294 if (ret)
1295 return ret;
1296
1297 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1298
1299 for (gpu_freq = dev_priv->rps.min_delay;
1300 gpu_freq <= dev_priv->rps.max_delay;
1301 gpu_freq++) {
1302 I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1303 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
1304 GEN6_PCODE_READ_MIN_FREQ_TABLE);
1305 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
1306 GEN6_PCODE_READY) == 0, 10)) {
1307 DRM_ERROR("pcode read of freq table timed out\n");
1308 continue;
1309 }
1310 ia_freq = I915_READ(GEN6_PCODE_DATA);
1311 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
1312 }
1313
1314 mutex_unlock(&dev->struct_mutex);
1315
1316 return 0;
1317 }
1318
1319 static int i915_gfxec(struct seq_file *m, void *unused)
1320 {
1321 struct drm_info_node *node = (struct drm_info_node *) m->private;
1322 struct drm_device *dev = node->minor->dev;
1323 drm_i915_private_t *dev_priv = dev->dev_private;
1324 int ret;
1325
1326 ret = mutex_lock_interruptible(&dev->struct_mutex);
1327 if (ret)
1328 return ret;
1329
1330 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1331
1332 mutex_unlock(&dev->struct_mutex);
1333
1334 return 0;
1335 }
1336
1337 static int i915_opregion(struct seq_file *m, void *unused)
1338 {
1339 struct drm_info_node *node = (struct drm_info_node *) m->private;
1340 struct drm_device *dev = node->minor->dev;
1341 drm_i915_private_t *dev_priv = dev->dev_private;
1342 struct intel_opregion *opregion = &dev_priv->opregion;
1343 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1344 int ret;
1345
1346 if (data == NULL)
1347 return -ENOMEM;
1348
1349 ret = mutex_lock_interruptible(&dev->struct_mutex);
1350 if (ret)
1351 goto out;
1352
1353 if (opregion->header) {
1354 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1355 seq_write(m, data, OPREGION_SIZE);
1356 }
1357
1358 mutex_unlock(&dev->struct_mutex);
1359
1360 out:
1361 kfree(data);
1362 return 0;
1363 }
1364
1365 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1366 {
1367 struct drm_info_node *node = (struct drm_info_node *) m->private;
1368 struct drm_device *dev = node->minor->dev;
1369 drm_i915_private_t *dev_priv = dev->dev_private;
1370 struct intel_fbdev *ifbdev;
1371 struct intel_framebuffer *fb;
1372 int ret;
1373
1374 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1375 if (ret)
1376 return ret;
1377
1378 ifbdev = dev_priv->fbdev;
1379 fb = to_intel_framebuffer(ifbdev->helper.fb);
1380
1381 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
1382 fb->base.width,
1383 fb->base.height,
1384 fb->base.depth,
1385 fb->base.bits_per_pixel);
1386 describe_obj(m, fb->obj);
1387 seq_printf(m, "\n");
1388
1389 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1390 if (&fb->base == ifbdev->helper.fb)
1391 continue;
1392
1393 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
1394 fb->base.width,
1395 fb->base.height,
1396 fb->base.depth,
1397 fb->base.bits_per_pixel);
1398 describe_obj(m, fb->obj);
1399 seq_printf(m, "\n");
1400 }
1401
1402 mutex_unlock(&dev->mode_config.mutex);
1403
1404 return 0;
1405 }
1406
1407 static int i915_context_status(struct seq_file *m, void *unused)
1408 {
1409 struct drm_info_node *node = (struct drm_info_node *) m->private;
1410 struct drm_device *dev = node->minor->dev;
1411 drm_i915_private_t *dev_priv = dev->dev_private;
1412 int ret;
1413
1414 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1415 if (ret)
1416 return ret;
1417
1418 if (dev_priv->pwrctx) {
1419 seq_printf(m, "power context ");
1420 describe_obj(m, dev_priv->pwrctx);
1421 seq_printf(m, "\n");
1422 }
1423
1424 if (dev_priv->renderctx) {
1425 seq_printf(m, "render context ");
1426 describe_obj(m, dev_priv->renderctx);
1427 seq_printf(m, "\n");
1428 }
1429
1430 mutex_unlock(&dev->mode_config.mutex);
1431
1432 return 0;
1433 }
1434
1435 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1436 {
1437 struct drm_info_node *node = (struct drm_info_node *) m->private;
1438 struct drm_device *dev = node->minor->dev;
1439 struct drm_i915_private *dev_priv = dev->dev_private;
1440 unsigned forcewake_count;
1441
1442 spin_lock_irq(&dev_priv->gt_lock);
1443 forcewake_count = dev_priv->forcewake_count;
1444 spin_unlock_irq(&dev_priv->gt_lock);
1445
1446 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1447
1448 return 0;
1449 }
1450
1451 static const char *swizzle_string(unsigned swizzle)
1452 {
1453 switch(swizzle) {
1454 case I915_BIT_6_SWIZZLE_NONE:
1455 return "none";
1456 case I915_BIT_6_SWIZZLE_9:
1457 return "bit9";
1458 case I915_BIT_6_SWIZZLE_9_10:
1459 return "bit9/bit10";
1460 case I915_BIT_6_SWIZZLE_9_11:
1461 return "bit9/bit11";
1462 case I915_BIT_6_SWIZZLE_9_10_11:
1463 return "bit9/bit10/bit11";
1464 case I915_BIT_6_SWIZZLE_9_17:
1465 return "bit9/bit17";
1466 case I915_BIT_6_SWIZZLE_9_10_17:
1467 return "bit9/bit10/bit17";
1468 case I915_BIT_6_SWIZZLE_UNKNOWN:
1469 return "unkown";
1470 }
1471
1472 return "bug";
1473 }
1474
1475 static int i915_swizzle_info(struct seq_file *m, void *data)
1476 {
1477 struct drm_info_node *node = (struct drm_info_node *) m->private;
1478 struct drm_device *dev = node->minor->dev;
1479 struct drm_i915_private *dev_priv = dev->dev_private;
1480 int ret;
1481
1482 ret = mutex_lock_interruptible(&dev->struct_mutex);
1483 if (ret)
1484 return ret;
1485
1486 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1487 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1488 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1489 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1490
1491 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1492 seq_printf(m, "DDC = 0x%08x\n",
1493 I915_READ(DCC));
1494 seq_printf(m, "C0DRB3 = 0x%04x\n",
1495 I915_READ16(C0DRB3));
1496 seq_printf(m, "C1DRB3 = 0x%04x\n",
1497 I915_READ16(C1DRB3));
1498 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
1499 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1500 I915_READ(MAD_DIMM_C0));
1501 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1502 I915_READ(MAD_DIMM_C1));
1503 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1504 I915_READ(MAD_DIMM_C2));
1505 seq_printf(m, "TILECTL = 0x%08x\n",
1506 I915_READ(TILECTL));
1507 seq_printf(m, "ARB_MODE = 0x%08x\n",
1508 I915_READ(ARB_MODE));
1509 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1510 I915_READ(DISP_ARB_CTL));
1511 }
1512 mutex_unlock(&dev->struct_mutex);
1513
1514 return 0;
1515 }
1516
1517 static int i915_ppgtt_info(struct seq_file *m, void *data)
1518 {
1519 struct drm_info_node *node = (struct drm_info_node *) m->private;
1520 struct drm_device *dev = node->minor->dev;
1521 struct drm_i915_private *dev_priv = dev->dev_private;
1522 struct intel_ring_buffer *ring;
1523 int i, ret;
1524
1525
1526 ret = mutex_lock_interruptible(&dev->struct_mutex);
1527 if (ret)
1528 return ret;
1529 if (INTEL_INFO(dev)->gen == 6)
1530 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1531
1532 for (i = 0; i < I915_NUM_RINGS; i++) {
1533 ring = &dev_priv->ring[i];
1534
1535 seq_printf(m, "%s\n", ring->name);
1536 if (INTEL_INFO(dev)->gen == 7)
1537 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1538 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1539 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1540 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1541 }
1542 if (dev_priv->mm.aliasing_ppgtt) {
1543 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1544
1545 seq_printf(m, "aliasing PPGTT:\n");
1546 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1547 }
1548 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1549 mutex_unlock(&dev->struct_mutex);
1550
1551 return 0;
1552 }
1553
1554 static int i915_dpio_info(struct seq_file *m, void *data)
1555 {
1556 struct drm_info_node *node = (struct drm_info_node *) m->private;
1557 struct drm_device *dev = node->minor->dev;
1558 struct drm_i915_private *dev_priv = dev->dev_private;
1559 int ret;
1560
1561
1562 if (!IS_VALLEYVIEW(dev)) {
1563 seq_printf(m, "unsupported\n");
1564 return 0;
1565 }
1566
1567 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1568 if (ret)
1569 return ret;
1570
1571 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1572
1573 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1574 intel_dpio_read(dev_priv, _DPIO_DIV_A));
1575 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1576 intel_dpio_read(dev_priv, _DPIO_DIV_B));
1577
1578 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1579 intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
1580 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1581 intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
1582
1583 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1584 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
1585 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1586 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
1587
1588 seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
1589 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
1590 seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
1591 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
1592
1593 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1594 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
1595
1596 mutex_unlock(&dev->mode_config.mutex);
1597
1598 return 0;
1599 }
1600
1601 static ssize_t
1602 i915_wedged_read(struct file *filp,
1603 char __user *ubuf,
1604 size_t max,
1605 loff_t *ppos)
1606 {
1607 struct drm_device *dev = filp->private_data;
1608 drm_i915_private_t *dev_priv = dev->dev_private;
1609 char buf[80];
1610 int len;
1611
1612 len = snprintf(buf, sizeof(buf),
1613 "wedged : %d\n",
1614 atomic_read(&dev_priv->mm.wedged));
1615
1616 if (len > sizeof(buf))
1617 len = sizeof(buf);
1618
1619 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1620 }
1621
1622 static ssize_t
1623 i915_wedged_write(struct file *filp,
1624 const char __user *ubuf,
1625 size_t cnt,
1626 loff_t *ppos)
1627 {
1628 struct drm_device *dev = filp->private_data;
1629 char buf[20];
1630 int val = 1;
1631
1632 if (cnt > 0) {
1633 if (cnt > sizeof(buf) - 1)
1634 return -EINVAL;
1635
1636 if (copy_from_user(buf, ubuf, cnt))
1637 return -EFAULT;
1638 buf[cnt] = 0;
1639
1640 val = simple_strtoul(buf, NULL, 0);
1641 }
1642
1643 DRM_INFO("Manually setting wedged to %d\n", val);
1644 i915_handle_error(dev, val);
1645
1646 return cnt;
1647 }
1648
1649 static const struct file_operations i915_wedged_fops = {
1650 .owner = THIS_MODULE,
1651 .open = simple_open,
1652 .read = i915_wedged_read,
1653 .write = i915_wedged_write,
1654 .llseek = default_llseek,
1655 };
1656
1657 static ssize_t
1658 i915_ring_stop_read(struct file *filp,
1659 char __user *ubuf,
1660 size_t max,
1661 loff_t *ppos)
1662 {
1663 struct drm_device *dev = filp->private_data;
1664 drm_i915_private_t *dev_priv = dev->dev_private;
1665 char buf[20];
1666 int len;
1667
1668 len = snprintf(buf, sizeof(buf),
1669 "0x%08x\n", dev_priv->stop_rings);
1670
1671 if (len > sizeof(buf))
1672 len = sizeof(buf);
1673
1674 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1675 }
1676
1677 static ssize_t
1678 i915_ring_stop_write(struct file *filp,
1679 const char __user *ubuf,
1680 size_t cnt,
1681 loff_t *ppos)
1682 {
1683 struct drm_device *dev = filp->private_data;
1684 struct drm_i915_private *dev_priv = dev->dev_private;
1685 char buf[20];
1686 int val = 0, ret;
1687
1688 if (cnt > 0) {
1689 if (cnt > sizeof(buf) - 1)
1690 return -EINVAL;
1691
1692 if (copy_from_user(buf, ubuf, cnt))
1693 return -EFAULT;
1694 buf[cnt] = 0;
1695
1696 val = simple_strtoul(buf, NULL, 0);
1697 }
1698
1699 DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
1700
1701 ret = mutex_lock_interruptible(&dev->struct_mutex);
1702 if (ret)
1703 return ret;
1704
1705 dev_priv->stop_rings = val;
1706 mutex_unlock(&dev->struct_mutex);
1707
1708 return cnt;
1709 }
1710
1711 static const struct file_operations i915_ring_stop_fops = {
1712 .owner = THIS_MODULE,
1713 .open = simple_open,
1714 .read = i915_ring_stop_read,
1715 .write = i915_ring_stop_write,
1716 .llseek = default_llseek,
1717 };
1718
1719 static ssize_t
1720 i915_max_freq_read(struct file *filp,
1721 char __user *ubuf,
1722 size_t max,
1723 loff_t *ppos)
1724 {
1725 struct drm_device *dev = filp->private_data;
1726 drm_i915_private_t *dev_priv = dev->dev_private;
1727 char buf[80];
1728 int len, ret;
1729
1730 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1731 return -ENODEV;
1732
1733 ret = mutex_lock_interruptible(&dev->struct_mutex);
1734 if (ret)
1735 return ret;
1736
1737 len = snprintf(buf, sizeof(buf),
1738 "max freq: %d\n", dev_priv->rps.max_delay * 50);
1739 mutex_unlock(&dev->struct_mutex);
1740
1741 if (len > sizeof(buf))
1742 len = sizeof(buf);
1743
1744 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1745 }
1746
1747 static ssize_t
1748 i915_max_freq_write(struct file *filp,
1749 const char __user *ubuf,
1750 size_t cnt,
1751 loff_t *ppos)
1752 {
1753 struct drm_device *dev = filp->private_data;
1754 struct drm_i915_private *dev_priv = dev->dev_private;
1755 char buf[20];
1756 int val = 1, ret;
1757
1758 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1759 return -ENODEV;
1760
1761 if (cnt > 0) {
1762 if (cnt > sizeof(buf) - 1)
1763 return -EINVAL;
1764
1765 if (copy_from_user(buf, ubuf, cnt))
1766 return -EFAULT;
1767 buf[cnt] = 0;
1768
1769 val = simple_strtoul(buf, NULL, 0);
1770 }
1771
1772 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
1773
1774 ret = mutex_lock_interruptible(&dev->struct_mutex);
1775 if (ret)
1776 return ret;
1777
1778 /*
1779 * Turbo will still be enabled, but won't go above the set value.
1780 */
1781 dev_priv->rps.max_delay = val / 50;
1782
1783 gen6_set_rps(dev, val / 50);
1784 mutex_unlock(&dev->struct_mutex);
1785
1786 return cnt;
1787 }
1788
1789 static const struct file_operations i915_max_freq_fops = {
1790 .owner = THIS_MODULE,
1791 .open = simple_open,
1792 .read = i915_max_freq_read,
1793 .write = i915_max_freq_write,
1794 .llseek = default_llseek,
1795 };
1796
1797 static ssize_t
1798 i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
1799 loff_t *ppos)
1800 {
1801 struct drm_device *dev = filp->private_data;
1802 drm_i915_private_t *dev_priv = dev->dev_private;
1803 char buf[80];
1804 int len, ret;
1805
1806 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1807 return -ENODEV;
1808
1809 ret = mutex_lock_interruptible(&dev->struct_mutex);
1810 if (ret)
1811 return ret;
1812
1813 len = snprintf(buf, sizeof(buf),
1814 "min freq: %d\n", dev_priv->rps.min_delay * 50);
1815 mutex_unlock(&dev->struct_mutex);
1816
1817 if (len > sizeof(buf))
1818 len = sizeof(buf);
1819
1820 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1821 }
1822
1823 static ssize_t
1824 i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
1825 loff_t *ppos)
1826 {
1827 struct drm_device *dev = filp->private_data;
1828 struct drm_i915_private *dev_priv = dev->dev_private;
1829 char buf[20];
1830 int val = 1, ret;
1831
1832 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1833 return -ENODEV;
1834
1835 if (cnt > 0) {
1836 if (cnt > sizeof(buf) - 1)
1837 return -EINVAL;
1838
1839 if (copy_from_user(buf, ubuf, cnt))
1840 return -EFAULT;
1841 buf[cnt] = 0;
1842
1843 val = simple_strtoul(buf, NULL, 0);
1844 }
1845
1846 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
1847
1848 ret = mutex_lock_interruptible(&dev->struct_mutex);
1849 if (ret)
1850 return ret;
1851
1852 /*
1853 * Turbo will still be enabled, but won't go below the set value.
1854 */
1855 dev_priv->rps.min_delay = val / 50;
1856
1857 gen6_set_rps(dev, val / 50);
1858 mutex_unlock(&dev->struct_mutex);
1859
1860 return cnt;
1861 }
1862
1863 static const struct file_operations i915_min_freq_fops = {
1864 .owner = THIS_MODULE,
1865 .open = simple_open,
1866 .read = i915_min_freq_read,
1867 .write = i915_min_freq_write,
1868 .llseek = default_llseek,
1869 };
1870
1871 static ssize_t
1872 i915_cache_sharing_read(struct file *filp,
1873 char __user *ubuf,
1874 size_t max,
1875 loff_t *ppos)
1876 {
1877 struct drm_device *dev = filp->private_data;
1878 drm_i915_private_t *dev_priv = dev->dev_private;
1879 char buf[80];
1880 u32 snpcr;
1881 int len, ret;
1882
1883 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1884 return -ENODEV;
1885
1886 ret = mutex_lock_interruptible(&dev->struct_mutex);
1887 if (ret)
1888 return ret;
1889
1890 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1891 mutex_unlock(&dev_priv->dev->struct_mutex);
1892
1893 len = snprintf(buf, sizeof(buf),
1894 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
1895 GEN6_MBC_SNPCR_SHIFT);
1896
1897 if (len > sizeof(buf))
1898 len = sizeof(buf);
1899
1900 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1901 }
1902
1903 static ssize_t
1904 i915_cache_sharing_write(struct file *filp,
1905 const char __user *ubuf,
1906 size_t cnt,
1907 loff_t *ppos)
1908 {
1909 struct drm_device *dev = filp->private_data;
1910 struct drm_i915_private *dev_priv = dev->dev_private;
1911 char buf[20];
1912 u32 snpcr;
1913 int val = 1;
1914
1915 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1916 return -ENODEV;
1917
1918 if (cnt > 0) {
1919 if (cnt > sizeof(buf) - 1)
1920 return -EINVAL;
1921
1922 if (copy_from_user(buf, ubuf, cnt))
1923 return -EFAULT;
1924 buf[cnt] = 0;
1925
1926 val = simple_strtoul(buf, NULL, 0);
1927 }
1928
1929 if (val < 0 || val > 3)
1930 return -EINVAL;
1931
1932 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
1933
1934 /* Update the cache sharing policy here as well */
1935 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1936 snpcr &= ~GEN6_MBC_SNPCR_MASK;
1937 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
1938 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1939
1940 return cnt;
1941 }
1942
1943 static const struct file_operations i915_cache_sharing_fops = {
1944 .owner = THIS_MODULE,
1945 .open = simple_open,
1946 .read = i915_cache_sharing_read,
1947 .write = i915_cache_sharing_write,
1948 .llseek = default_llseek,
1949 };
1950
1951 /* As the drm_debugfs_init() routines are called before dev->dev_private is
1952 * allocated we need to hook into the minor for release. */
1953 static int
1954 drm_add_fake_info_node(struct drm_minor *minor,
1955 struct dentry *ent,
1956 const void *key)
1957 {
1958 struct drm_info_node *node;
1959
1960 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
1961 if (node == NULL) {
1962 debugfs_remove(ent);
1963 return -ENOMEM;
1964 }
1965
1966 node->minor = minor;
1967 node->dent = ent;
1968 node->info_ent = (void *) key;
1969
1970 mutex_lock(&minor->debugfs_lock);
1971 list_add(&node->list, &minor->debugfs_list);
1972 mutex_unlock(&minor->debugfs_lock);
1973
1974 return 0;
1975 }
1976
1977 static int i915_forcewake_open(struct inode *inode, struct file *file)
1978 {
1979 struct drm_device *dev = inode->i_private;
1980 struct drm_i915_private *dev_priv = dev->dev_private;
1981
1982 if (INTEL_INFO(dev)->gen < 6)
1983 return 0;
1984
1985 gen6_gt_force_wake_get(dev_priv);
1986
1987 return 0;
1988 }
1989
1990 static int i915_forcewake_release(struct inode *inode, struct file *file)
1991 {
1992 struct drm_device *dev = inode->i_private;
1993 struct drm_i915_private *dev_priv = dev->dev_private;
1994
1995 if (INTEL_INFO(dev)->gen < 6)
1996 return 0;
1997
1998 gen6_gt_force_wake_put(dev_priv);
1999
2000 return 0;
2001 }
2002
2003 static const struct file_operations i915_forcewake_fops = {
2004 .owner = THIS_MODULE,
2005 .open = i915_forcewake_open,
2006 .release = i915_forcewake_release,
2007 };
2008
2009 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
2010 {
2011 struct drm_device *dev = minor->dev;
2012 struct dentry *ent;
2013
2014 ent = debugfs_create_file("i915_forcewake_user",
2015 S_IRUSR,
2016 root, dev,
2017 &i915_forcewake_fops);
2018 if (IS_ERR(ent))
2019 return PTR_ERR(ent);
2020
2021 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
2022 }
2023
2024 static int i915_debugfs_create(struct dentry *root,
2025 struct drm_minor *minor,
2026 const char *name,
2027 const struct file_operations *fops)
2028 {
2029 struct drm_device *dev = minor->dev;
2030 struct dentry *ent;
2031
2032 ent = debugfs_create_file(name,
2033 S_IRUGO | S_IWUSR,
2034 root, dev,
2035 fops);
2036 if (IS_ERR(ent))
2037 return PTR_ERR(ent);
2038
2039 return drm_add_fake_info_node(minor, ent, fops);
2040 }
2041
2042 static struct drm_info_list i915_debugfs_list[] = {
2043 {"i915_capabilities", i915_capabilities, 0},
2044 {"i915_gem_objects", i915_gem_object_info, 0},
2045 {"i915_gem_gtt", i915_gem_gtt_info, 0},
2046 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
2047 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
2048 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
2049 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2050 {"i915_gem_request", i915_gem_request_info, 0},
2051 {"i915_gem_seqno", i915_gem_seqno_info, 0},
2052 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2053 {"i915_gem_interrupt", i915_interrupt_info, 0},
2054 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
2055 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
2056 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
2057 {"i915_rstdby_delays", i915_rstdby_delays, 0},
2058 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
2059 {"i915_delayfreq_table", i915_delayfreq_table, 0},
2060 {"i915_inttoext_table", i915_inttoext_table, 0},
2061 {"i915_drpc_info", i915_drpc_info, 0},
2062 {"i915_emon_status", i915_emon_status, 0},
2063 {"i915_ring_freq_table", i915_ring_freq_table, 0},
2064 {"i915_gfxec", i915_gfxec, 0},
2065 {"i915_fbc_status", i915_fbc_status, 0},
2066 {"i915_sr_status", i915_sr_status, 0},
2067 {"i915_opregion", i915_opregion, 0},
2068 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2069 {"i915_context_status", i915_context_status, 0},
2070 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
2071 {"i915_swizzle_info", i915_swizzle_info, 0},
2072 {"i915_ppgtt_info", i915_ppgtt_info, 0},
2073 {"i915_dpio", i915_dpio_info, 0},
2074 };
2075 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2076
2077 int i915_debugfs_init(struct drm_minor *minor)
2078 {
2079 int ret;
2080
2081 ret = i915_debugfs_create(minor->debugfs_root, minor,
2082 "i915_wedged",
2083 &i915_wedged_fops);
2084 if (ret)
2085 return ret;
2086
2087 ret = i915_forcewake_create(minor->debugfs_root, minor);
2088 if (ret)
2089 return ret;
2090
2091 ret = i915_debugfs_create(minor->debugfs_root, minor,
2092 "i915_max_freq",
2093 &i915_max_freq_fops);
2094 if (ret)
2095 return ret;
2096
2097 ret = i915_debugfs_create(minor->debugfs_root, minor,
2098 "i915_min_freq",
2099 &i915_min_freq_fops);
2100 if (ret)
2101 return ret;
2102
2103 ret = i915_debugfs_create(minor->debugfs_root, minor,
2104 "i915_cache_sharing",
2105 &i915_cache_sharing_fops);
2106 if (ret)
2107 return ret;
2108
2109 ret = i915_debugfs_create(minor->debugfs_root, minor,
2110 "i915_ring_stop",
2111 &i915_ring_stop_fops);
2112 if (ret)
2113 return ret;
2114
2115 ret = i915_debugfs_create(minor->debugfs_root, minor,
2116 "i915_error_state",
2117 &i915_error_state_fops);
2118 if (ret)
2119 return ret;
2120
2121 return drm_debugfs_create_files(i915_debugfs_list,
2122 I915_DEBUGFS_ENTRIES,
2123 minor->debugfs_root, minor);
2124 }
2125
2126 void i915_debugfs_cleanup(struct drm_minor *minor)
2127 {
2128 drm_debugfs_remove_files(i915_debugfs_list,
2129 I915_DEBUGFS_ENTRIES, minor);
2130 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
2131 1, minor);
2132 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
2133 1, minor);
2134 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
2135 1, minor);
2136 drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
2137 1, minor);
2138 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
2139 1, minor);
2140 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
2141 1, minor);
2142 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
2143 1, minor);
2144 }
2145
2146 #endif /* CONFIG_DEBUG_FS */
This page took 0.105974 seconds and 5 git commands to generate.