2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
34 #include "intel_drv.h"
35 #include "intel_ringbuffer.h"
36 #include <drm/i915_drm.h>
39 #define DRM_I915_RING_DEBUG 1
42 #if defined(CONFIG_DEBUG_FS)
50 static const char *yesno(int v
)
52 return v
? "yes" : "no";
55 static int i915_capabilities(struct seq_file
*m
, void *data
)
57 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
58 struct drm_device
*dev
= node
->minor
->dev
;
59 const struct intel_device_info
*info
= INTEL_INFO(dev
);
61 seq_printf(m
, "gen: %d\n", info
->gen
);
62 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev
));
63 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
64 #define SEP_SEMICOLON ;
65 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_SEMICOLON
);
72 static const char *get_pin_flag(struct drm_i915_gem_object
*obj
)
74 if (obj
->user_pin_count
> 0)
76 else if (obj
->pin_count
> 0)
82 static const char *get_tiling_flag(struct drm_i915_gem_object
*obj
)
84 switch (obj
->tiling_mode
) {
86 case I915_TILING_NONE
: return " ";
87 case I915_TILING_X
: return "X";
88 case I915_TILING_Y
: return "Y";
93 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
95 seq_printf(m
, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
99 obj
->base
.size
/ 1024,
100 obj
->base
.read_domains
,
101 obj
->base
.write_domain
,
102 obj
->last_read_seqno
,
103 obj
->last_write_seqno
,
104 obj
->last_fenced_seqno
,
105 i915_cache_level_str(obj
->cache_level
),
106 obj
->dirty
? " dirty" : "",
107 obj
->madv
== I915_MADV_DONTNEED
? " purgeable" : "");
109 seq_printf(m
, " (name: %d)", obj
->base
.name
);
111 seq_printf(m
, " (pinned x %d)", obj
->pin_count
);
112 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
)
113 seq_printf(m
, " (fence: %d)", obj
->fence_reg
);
114 if (i915_gem_obj_ggtt_bound(obj
))
115 seq_printf(m
, " (gtt offset: %08lx, size: %08x)",
116 i915_gem_obj_ggtt_offset(obj
), (unsigned int)i915_gem_obj_ggtt_size(obj
));
118 seq_printf(m
, " (stolen: %08lx)", obj
->stolen
->start
);
119 if (obj
->pin_mappable
|| obj
->fault_mappable
) {
121 if (obj
->pin_mappable
)
123 if (obj
->fault_mappable
)
126 seq_printf(m
, " (%s mappable)", s
);
128 if (obj
->ring
!= NULL
)
129 seq_printf(m
, " (%s)", obj
->ring
->name
);
132 static int i915_gem_object_list_info(struct seq_file
*m
, void *data
)
134 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
135 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
136 struct list_head
*head
;
137 struct drm_device
*dev
= node
->minor
->dev
;
138 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
139 struct drm_i915_gem_object
*obj
;
140 size_t total_obj_size
, total_gtt_size
;
143 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
149 seq_puts(m
, "Active:\n");
150 head
= &dev_priv
->mm
.active_list
;
153 seq_puts(m
, "Inactive:\n");
154 head
= &dev_priv
->mm
.inactive_list
;
157 mutex_unlock(&dev
->struct_mutex
);
161 total_obj_size
= total_gtt_size
= count
= 0;
162 list_for_each_entry(obj
, head
, mm_list
) {
164 describe_obj(m
, obj
);
166 total_obj_size
+= obj
->base
.size
;
167 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
170 mutex_unlock(&dev
->struct_mutex
);
172 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
173 count
, total_obj_size
, total_gtt_size
);
177 #define count_objects(list, member) do { \
178 list_for_each_entry(obj, list, member) { \
179 size += i915_gem_obj_ggtt_size(obj); \
181 if (obj->map_and_fenceable) { \
182 mappable_size += i915_gem_obj_ggtt_size(obj); \
190 size_t total
, active
, inactive
, unbound
;
193 static int per_file_stats(int id
, void *ptr
, void *data
)
195 struct drm_i915_gem_object
*obj
= ptr
;
196 struct file_stats
*stats
= data
;
199 stats
->total
+= obj
->base
.size
;
201 if (i915_gem_obj_ggtt_bound(obj
)) {
202 if (!list_empty(&obj
->ring_list
))
203 stats
->active
+= obj
->base
.size
;
205 stats
->inactive
+= obj
->base
.size
;
207 if (!list_empty(&obj
->global_list
))
208 stats
->unbound
+= obj
->base
.size
;
214 static int i915_gem_object_info(struct seq_file
*m
, void *data
)
216 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
217 struct drm_device
*dev
= node
->minor
->dev
;
218 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
219 u32 count
, mappable_count
, purgeable_count
;
220 size_t size
, mappable_size
, purgeable_size
;
221 struct drm_i915_gem_object
*obj
;
222 struct drm_file
*file
;
225 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
229 seq_printf(m
, "%u objects, %zu bytes\n",
230 dev_priv
->mm
.object_count
,
231 dev_priv
->mm
.object_memory
);
233 size
= count
= mappable_size
= mappable_count
= 0;
234 count_objects(&dev_priv
->mm
.bound_list
, global_list
);
235 seq_printf(m
, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
236 count
, mappable_count
, size
, mappable_size
);
238 size
= count
= mappable_size
= mappable_count
= 0;
239 count_objects(&dev_priv
->mm
.active_list
, mm_list
);
240 seq_printf(m
, " %u [%u] active objects, %zu [%zu] bytes\n",
241 count
, mappable_count
, size
, mappable_size
);
243 size
= count
= mappable_size
= mappable_count
= 0;
244 count_objects(&dev_priv
->mm
.inactive_list
, mm_list
);
245 seq_printf(m
, " %u [%u] inactive objects, %zu [%zu] bytes\n",
246 count
, mappable_count
, size
, mappable_size
);
248 size
= count
= purgeable_size
= purgeable_count
= 0;
249 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
250 size
+= obj
->base
.size
, ++count
;
251 if (obj
->madv
== I915_MADV_DONTNEED
)
252 purgeable_size
+= obj
->base
.size
, ++purgeable_count
;
254 seq_printf(m
, "%u unbound objects, %zu bytes\n", count
, size
);
256 size
= count
= mappable_size
= mappable_count
= 0;
257 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
258 if (obj
->fault_mappable
) {
259 size
+= i915_gem_obj_ggtt_size(obj
);
262 if (obj
->pin_mappable
) {
263 mappable_size
+= i915_gem_obj_ggtt_size(obj
);
266 if (obj
->madv
== I915_MADV_DONTNEED
) {
267 purgeable_size
+= obj
->base
.size
;
271 seq_printf(m
, "%u purgeable objects, %zu bytes\n",
272 purgeable_count
, purgeable_size
);
273 seq_printf(m
, "%u pinned mappable objects, %zu bytes\n",
274 mappable_count
, mappable_size
);
275 seq_printf(m
, "%u fault mappable objects, %zu bytes\n",
278 seq_printf(m
, "%zu [%lu] gtt total\n",
280 dev_priv
->gtt
.mappable_end
- dev_priv
->gtt
.start
);
283 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
284 struct file_stats stats
;
286 memset(&stats
, 0, sizeof(stats
));
287 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
288 seq_printf(m
, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
289 get_pid_task(file
->pid
, PIDTYPE_PID
)->comm
,
297 mutex_unlock(&dev
->struct_mutex
);
302 static int i915_gem_gtt_info(struct seq_file
*m
, void *data
)
304 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
305 struct drm_device
*dev
= node
->minor
->dev
;
306 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
307 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
308 struct drm_i915_gem_object
*obj
;
309 size_t total_obj_size
, total_gtt_size
;
312 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
316 total_obj_size
= total_gtt_size
= count
= 0;
317 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
318 if (list
== PINNED_LIST
&& obj
->pin_count
== 0)
322 describe_obj(m
, obj
);
324 total_obj_size
+= obj
->base
.size
;
325 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
329 mutex_unlock(&dev
->struct_mutex
);
331 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
332 count
, total_obj_size
, total_gtt_size
);
337 static int i915_gem_pageflip_info(struct seq_file
*m
, void *data
)
339 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
340 struct drm_device
*dev
= node
->minor
->dev
;
342 struct intel_crtc
*crtc
;
344 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, base
.head
) {
345 const char pipe
= pipe_name(crtc
->pipe
);
346 const char plane
= plane_name(crtc
->plane
);
347 struct intel_unpin_work
*work
;
349 spin_lock_irqsave(&dev
->event_lock
, flags
);
350 work
= crtc
->unpin_work
;
352 seq_printf(m
, "No flip due on pipe %c (plane %c)\n",
355 if (atomic_read(&work
->pending
) < INTEL_FLIP_COMPLETE
) {
356 seq_printf(m
, "Flip queued on pipe %c (plane %c)\n",
359 seq_printf(m
, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
362 if (work
->enable_stall_check
)
363 seq_puts(m
, "Stall check enabled, ");
365 seq_puts(m
, "Stall check waiting for page flip ioctl, ");
366 seq_printf(m
, "%d prepares\n", atomic_read(&work
->pending
));
368 if (work
->old_fb_obj
) {
369 struct drm_i915_gem_object
*obj
= work
->old_fb_obj
;
371 seq_printf(m
, "Old framebuffer gtt_offset 0x%08lx\n",
372 i915_gem_obj_ggtt_offset(obj
));
374 if (work
->pending_flip_obj
) {
375 struct drm_i915_gem_object
*obj
= work
->pending_flip_obj
;
377 seq_printf(m
, "New framebuffer gtt_offset 0x%08lx\n",
378 i915_gem_obj_ggtt_offset(obj
));
381 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
387 static int i915_gem_request_info(struct seq_file
*m
, void *data
)
389 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
390 struct drm_device
*dev
= node
->minor
->dev
;
391 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
392 struct intel_ring_buffer
*ring
;
393 struct drm_i915_gem_request
*gem_request
;
396 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
401 for_each_ring(ring
, dev_priv
, i
) {
402 if (list_empty(&ring
->request_list
))
405 seq_printf(m
, "%s requests:\n", ring
->name
);
406 list_for_each_entry(gem_request
,
409 seq_printf(m
, " %d @ %d\n",
411 (int) (jiffies
- gem_request
->emitted_jiffies
));
415 mutex_unlock(&dev
->struct_mutex
);
418 seq_puts(m
, "No requests\n");
423 static void i915_ring_seqno_info(struct seq_file
*m
,
424 struct intel_ring_buffer
*ring
)
426 if (ring
->get_seqno
) {
427 seq_printf(m
, "Current sequence (%s): %u\n",
428 ring
->name
, ring
->get_seqno(ring
, false));
432 static int i915_gem_seqno_info(struct seq_file
*m
, void *data
)
434 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
435 struct drm_device
*dev
= node
->minor
->dev
;
436 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
437 struct intel_ring_buffer
*ring
;
440 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
444 for_each_ring(ring
, dev_priv
, i
)
445 i915_ring_seqno_info(m
, ring
);
447 mutex_unlock(&dev
->struct_mutex
);
453 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
455 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
456 struct drm_device
*dev
= node
->minor
->dev
;
457 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
458 struct intel_ring_buffer
*ring
;
461 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
465 if (IS_VALLEYVIEW(dev
)) {
466 seq_printf(m
, "Display IER:\t%08x\n",
468 seq_printf(m
, "Display IIR:\t%08x\n",
470 seq_printf(m
, "Display IIR_RW:\t%08x\n",
471 I915_READ(VLV_IIR_RW
));
472 seq_printf(m
, "Display IMR:\t%08x\n",
475 seq_printf(m
, "Pipe %c stat:\t%08x\n",
477 I915_READ(PIPESTAT(pipe
)));
479 seq_printf(m
, "Master IER:\t%08x\n",
480 I915_READ(VLV_MASTER_IER
));
482 seq_printf(m
, "Render IER:\t%08x\n",
484 seq_printf(m
, "Render IIR:\t%08x\n",
486 seq_printf(m
, "Render IMR:\t%08x\n",
489 seq_printf(m
, "PM IER:\t\t%08x\n",
490 I915_READ(GEN6_PMIER
));
491 seq_printf(m
, "PM IIR:\t\t%08x\n",
492 I915_READ(GEN6_PMIIR
));
493 seq_printf(m
, "PM IMR:\t\t%08x\n",
494 I915_READ(GEN6_PMIMR
));
496 seq_printf(m
, "Port hotplug:\t%08x\n",
497 I915_READ(PORT_HOTPLUG_EN
));
498 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
499 I915_READ(VLV_DPFLIPSTAT
));
500 seq_printf(m
, "DPINVGTT:\t%08x\n",
501 I915_READ(DPINVGTT
));
503 } else if (!HAS_PCH_SPLIT(dev
)) {
504 seq_printf(m
, "Interrupt enable: %08x\n",
506 seq_printf(m
, "Interrupt identity: %08x\n",
508 seq_printf(m
, "Interrupt mask: %08x\n",
511 seq_printf(m
, "Pipe %c stat: %08x\n",
513 I915_READ(PIPESTAT(pipe
)));
515 seq_printf(m
, "North Display Interrupt enable: %08x\n",
517 seq_printf(m
, "North Display Interrupt identity: %08x\n",
519 seq_printf(m
, "North Display Interrupt mask: %08x\n",
521 seq_printf(m
, "South Display Interrupt enable: %08x\n",
523 seq_printf(m
, "South Display Interrupt identity: %08x\n",
525 seq_printf(m
, "South Display Interrupt mask: %08x\n",
527 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
529 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
531 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
534 seq_printf(m
, "Interrupts received: %d\n",
535 atomic_read(&dev_priv
->irq_received
));
536 for_each_ring(ring
, dev_priv
, i
) {
537 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
539 "Graphics Interrupt mask (%s): %08x\n",
540 ring
->name
, I915_READ_IMR(ring
));
542 i915_ring_seqno_info(m
, ring
);
544 mutex_unlock(&dev
->struct_mutex
);
549 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
551 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
552 struct drm_device
*dev
= node
->minor
->dev
;
553 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
556 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
560 seq_printf(m
, "Reserved fences = %d\n", dev_priv
->fence_reg_start
);
561 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
562 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
563 struct drm_i915_gem_object
*obj
= dev_priv
->fence_regs
[i
].obj
;
565 seq_printf(m
, "Fence %d, pin count = %d, object = ",
566 i
, dev_priv
->fence_regs
[i
].pin_count
);
568 seq_puts(m
, "unused");
570 describe_obj(m
, obj
);
574 mutex_unlock(&dev
->struct_mutex
);
578 static int i915_hws_info(struct seq_file
*m
, void *data
)
580 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
581 struct drm_device
*dev
= node
->minor
->dev
;
582 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
583 struct intel_ring_buffer
*ring
;
587 ring
= &dev_priv
->ring
[(uintptr_t)node
->info_ent
->data
];
588 hws
= ring
->status_page
.page_addr
;
592 for (i
= 0; i
< 4096 / sizeof(u32
) / 4; i
+= 4) {
593 seq_printf(m
, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
595 hws
[i
], hws
[i
+ 1], hws
[i
+ 2], hws
[i
+ 3]);
601 i915_error_state_write(struct file
*filp
,
602 const char __user
*ubuf
,
606 struct i915_error_state_file_priv
*error_priv
= filp
->private_data
;
607 struct drm_device
*dev
= error_priv
->dev
;
610 DRM_DEBUG_DRIVER("Resetting error state\n");
612 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
616 i915_destroy_error_state(dev
);
617 mutex_unlock(&dev
->struct_mutex
);
622 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
624 struct drm_device
*dev
= inode
->i_private
;
625 struct i915_error_state_file_priv
*error_priv
;
627 error_priv
= kzalloc(sizeof(*error_priv
), GFP_KERNEL
);
631 error_priv
->dev
= dev
;
633 i915_error_state_get(dev
, error_priv
);
635 file
->private_data
= error_priv
;
640 static int i915_error_state_release(struct inode
*inode
, struct file
*file
)
642 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
644 i915_error_state_put(error_priv
);
650 static ssize_t
i915_error_state_read(struct file
*file
, char __user
*userbuf
,
651 size_t count
, loff_t
*pos
)
653 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
654 struct drm_i915_error_state_buf error_str
;
656 ssize_t ret_count
= 0;
659 ret
= i915_error_state_buf_init(&error_str
, count
, *pos
);
663 ret
= i915_error_state_to_str(&error_str
, error_priv
);
667 ret_count
= simple_read_from_buffer(userbuf
, count
, &tmp_pos
,
674 *pos
= error_str
.start
+ ret_count
;
676 i915_error_state_buf_release(&error_str
);
677 return ret
?: ret_count
;
680 static const struct file_operations i915_error_state_fops
= {
681 .owner
= THIS_MODULE
,
682 .open
= i915_error_state_open
,
683 .read
= i915_error_state_read
,
684 .write
= i915_error_state_write
,
685 .llseek
= default_llseek
,
686 .release
= i915_error_state_release
,
690 i915_next_seqno_get(void *data
, u64
*val
)
692 struct drm_device
*dev
= data
;
693 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
696 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
700 *val
= dev_priv
->next_seqno
;
701 mutex_unlock(&dev
->struct_mutex
);
707 i915_next_seqno_set(void *data
, u64 val
)
709 struct drm_device
*dev
= data
;
712 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
716 ret
= i915_gem_set_seqno(dev
, val
);
717 mutex_unlock(&dev
->struct_mutex
);
722 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
723 i915_next_seqno_get
, i915_next_seqno_set
,
726 static int i915_rstdby_delays(struct seq_file
*m
, void *unused
)
728 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
729 struct drm_device
*dev
= node
->minor
->dev
;
730 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
734 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
738 crstanddelay
= I915_READ16(CRSTANDVID
);
740 mutex_unlock(&dev
->struct_mutex
);
742 seq_printf(m
, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay
>> 8) & 0x3f, (crstanddelay
& 0x3f));
747 static int i915_cur_delayinfo(struct seq_file
*m
, void *unused
)
749 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
750 struct drm_device
*dev
= node
->minor
->dev
;
751 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
755 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
756 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
758 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
759 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
760 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
762 seq_printf(m
, "Current P-state: %d\n",
763 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
764 } else if ((IS_GEN6(dev
) || IS_GEN7(dev
)) && !IS_VALLEYVIEW(dev
)) {
765 u32 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
766 u32 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
767 u32 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
769 u32 rpupei
, rpcurup
, rpprevup
;
770 u32 rpdownei
, rpcurdown
, rpprevdown
;
773 /* RPSTAT1 is in the GT power well */
774 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
778 gen6_gt_force_wake_get(dev_priv
);
780 rpstat
= I915_READ(GEN6_RPSTAT1
);
781 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
);
782 rpcurup
= I915_READ(GEN6_RP_CUR_UP
);
783 rpprevup
= I915_READ(GEN6_RP_PREV_UP
);
784 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
);
785 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
);
786 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
);
788 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
790 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
791 cagf
*= GT_FREQUENCY_MULTIPLIER
;
793 gen6_gt_force_wake_put(dev_priv
);
794 mutex_unlock(&dev
->struct_mutex
);
796 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
797 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
798 seq_printf(m
, "Render p-state ratio: %d\n",
799 (gt_perf_status
& 0xff00) >> 8);
800 seq_printf(m
, "Render p-state VID: %d\n",
801 gt_perf_status
& 0xff);
802 seq_printf(m
, "Render p-state limit: %d\n",
803 rp_state_limits
& 0xff);
804 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
805 seq_printf(m
, "RP CUR UP EI: %dus\n", rpupei
&
807 seq_printf(m
, "RP CUR UP: %dus\n", rpcurup
&
808 GEN6_CURBSYTAVG_MASK
);
809 seq_printf(m
, "RP PREV UP: %dus\n", rpprevup
&
810 GEN6_CURBSYTAVG_MASK
);
811 seq_printf(m
, "RP CUR DOWN EI: %dus\n", rpdownei
&
813 seq_printf(m
, "RP CUR DOWN: %dus\n", rpcurdown
&
814 GEN6_CURBSYTAVG_MASK
);
815 seq_printf(m
, "RP PREV DOWN: %dus\n", rpprevdown
&
816 GEN6_CURBSYTAVG_MASK
);
818 max_freq
= (rp_state_cap
& 0xff0000) >> 16;
819 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
820 max_freq
* GT_FREQUENCY_MULTIPLIER
);
822 max_freq
= (rp_state_cap
& 0xff00) >> 8;
823 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
824 max_freq
* GT_FREQUENCY_MULTIPLIER
);
826 max_freq
= rp_state_cap
& 0xff;
827 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
828 max_freq
* GT_FREQUENCY_MULTIPLIER
);
830 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
831 dev_priv
->rps
.hw_max
* GT_FREQUENCY_MULTIPLIER
);
832 } else if (IS_VALLEYVIEW(dev
)) {
835 mutex_lock(&dev_priv
->rps
.hw_lock
);
836 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
837 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
838 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
840 val
= vlv_punit_read(dev_priv
, PUNIT_FUSE_BUS1
);
841 seq_printf(m
, "max GPU freq: %d MHz\n",
842 vlv_gpu_freq(dev_priv
->mem_freq
, val
));
844 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
);
845 seq_printf(m
, "min GPU freq: %d MHz\n",
846 vlv_gpu_freq(dev_priv
->mem_freq
, val
));
848 seq_printf(m
, "current GPU freq: %d MHz\n",
849 vlv_gpu_freq(dev_priv
->mem_freq
,
850 (freq_sts
>> 8) & 0xff));
851 mutex_unlock(&dev_priv
->rps
.hw_lock
);
853 seq_puts(m
, "no P-state info available\n");
859 static int i915_delayfreq_table(struct seq_file
*m
, void *unused
)
861 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
862 struct drm_device
*dev
= node
->minor
->dev
;
863 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
867 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
871 for (i
= 0; i
< 16; i
++) {
872 delayfreq
= I915_READ(PXVFREQ_BASE
+ i
* 4);
873 seq_printf(m
, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i
, delayfreq
,
874 (delayfreq
& PXVFREQ_PX_MASK
) >> PXVFREQ_PX_SHIFT
);
877 mutex_unlock(&dev
->struct_mutex
);
882 static inline int MAP_TO_MV(int map
)
884 return 1250 - (map
* 25);
887 static int i915_inttoext_table(struct seq_file
*m
, void *unused
)
889 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
890 struct drm_device
*dev
= node
->minor
->dev
;
891 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
895 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
899 for (i
= 1; i
<= 32; i
++) {
900 inttoext
= I915_READ(INTTOEXT_BASE_ILK
+ i
* 4);
901 seq_printf(m
, "INTTOEXT%02d: 0x%08x\n", i
, inttoext
);
904 mutex_unlock(&dev
->struct_mutex
);
909 static int ironlake_drpc_info(struct seq_file
*m
)
911 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
912 struct drm_device
*dev
= node
->minor
->dev
;
913 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
914 u32 rgvmodectl
, rstdbyctl
;
918 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
922 rgvmodectl
= I915_READ(MEMMODECTL
);
923 rstdbyctl
= I915_READ(RSTDBYCTL
);
924 crstandvid
= I915_READ16(CRSTANDVID
);
926 mutex_unlock(&dev
->struct_mutex
);
928 seq_printf(m
, "HD boost: %s\n", (rgvmodectl
& MEMMODE_BOOST_EN
) ?
930 seq_printf(m
, "Boost freq: %d\n",
931 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
932 MEMMODE_BOOST_FREQ_SHIFT
);
933 seq_printf(m
, "HW control enabled: %s\n",
934 rgvmodectl
& MEMMODE_HWIDLE_EN
? "yes" : "no");
935 seq_printf(m
, "SW control enabled: %s\n",
936 rgvmodectl
& MEMMODE_SWMODE_EN
? "yes" : "no");
937 seq_printf(m
, "Gated voltage change: %s\n",
938 rgvmodectl
& MEMMODE_RCLK_GATE
? "yes" : "no");
939 seq_printf(m
, "Starting frequency: P%d\n",
940 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
941 seq_printf(m
, "Max P-state: P%d\n",
942 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
943 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
944 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
945 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
946 seq_printf(m
, "Render standby enabled: %s\n",
947 (rstdbyctl
& RCX_SW_EXIT
) ? "no" : "yes");
948 seq_puts(m
, "Current RS state: ");
949 switch (rstdbyctl
& RSX_STATUS_MASK
) {
954 seq_puts(m
, "RC1\n");
956 case RSX_STATUS_RC1E
:
957 seq_puts(m
, "RC1E\n");
960 seq_puts(m
, "RS1\n");
963 seq_puts(m
, "RS2 (RC6)\n");
966 seq_puts(m
, "RC3 (RC6+)\n");
969 seq_puts(m
, "unknown\n");
976 static int gen6_drpc_info(struct seq_file
*m
)
979 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
980 struct drm_device
*dev
= node
->minor
->dev
;
981 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
982 u32 rpmodectl1
, gt_core_status
, rcctl1
, rc6vids
= 0;
983 unsigned forcewake_count
;
986 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
990 spin_lock_irq(&dev_priv
->gt_lock
);
991 forcewake_count
= dev_priv
->forcewake_count
;
992 spin_unlock_irq(&dev_priv
->gt_lock
);
994 if (forcewake_count
) {
995 seq_puts(m
, "RC information inaccurate because somebody "
996 "holds a forcewake reference \n");
998 /* NB: we cannot use forcewake, else we read the wrong values */
999 while (count
++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK
) & 1))
1001 seq_printf(m
, "RC information accurate: %s\n", yesno(count
< 51));
1004 gt_core_status
= readl(dev_priv
->regs
+ GEN6_GT_CORE_STATUS
);
1005 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4);
1007 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1008 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1009 mutex_unlock(&dev
->struct_mutex
);
1010 mutex_lock(&dev_priv
->rps
.hw_lock
);
1011 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
1012 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1014 seq_printf(m
, "Video Turbo Mode: %s\n",
1015 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1016 seq_printf(m
, "HW control enabled: %s\n",
1017 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1018 seq_printf(m
, "SW control enabled: %s\n",
1019 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1020 GEN6_RP_MEDIA_SW_MODE
));
1021 seq_printf(m
, "RC1e Enabled: %s\n",
1022 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1023 seq_printf(m
, "RC6 Enabled: %s\n",
1024 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1025 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1026 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1027 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1028 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1029 seq_puts(m
, "Current RC state: ");
1030 switch (gt_core_status
& GEN6_RCn_MASK
) {
1032 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1033 seq_puts(m
, "Core Power Down\n");
1035 seq_puts(m
, "on\n");
1038 seq_puts(m
, "RC3\n");
1041 seq_puts(m
, "RC6\n");
1044 seq_puts(m
, "RC7\n");
1047 seq_puts(m
, "Unknown\n");
1051 seq_printf(m
, "Core Power Down: %s\n",
1052 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1054 /* Not exactly sure what this is */
1055 seq_printf(m
, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1056 I915_READ(GEN6_GT_GFX_RC6_LOCKED
));
1057 seq_printf(m
, "RC6 residency since boot: %u\n",
1058 I915_READ(GEN6_GT_GFX_RC6
));
1059 seq_printf(m
, "RC6+ residency since boot: %u\n",
1060 I915_READ(GEN6_GT_GFX_RC6p
));
1061 seq_printf(m
, "RC6++ residency since boot: %u\n",
1062 I915_READ(GEN6_GT_GFX_RC6pp
));
1064 seq_printf(m
, "RC6 voltage: %dmV\n",
1065 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1066 seq_printf(m
, "RC6+ voltage: %dmV\n",
1067 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1068 seq_printf(m
, "RC6++ voltage: %dmV\n",
1069 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1073 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1075 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1076 struct drm_device
*dev
= node
->minor
->dev
;
1078 if (IS_GEN6(dev
) || IS_GEN7(dev
))
1079 return gen6_drpc_info(m
);
1081 return ironlake_drpc_info(m
);
1084 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1086 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1087 struct drm_device
*dev
= node
->minor
->dev
;
1088 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1090 if (!I915_HAS_FBC(dev
)) {
1091 seq_puts(m
, "FBC unsupported on this chipset\n");
1095 if (intel_fbc_enabled(dev
)) {
1096 seq_puts(m
, "FBC enabled\n");
1098 seq_puts(m
, "FBC disabled: ");
1099 switch (dev_priv
->fbc
.no_fbc_reason
) {
1101 seq_puts(m
, "no outputs");
1103 case FBC_STOLEN_TOO_SMALL
:
1104 seq_puts(m
, "not enough stolen memory");
1106 case FBC_UNSUPPORTED_MODE
:
1107 seq_puts(m
, "mode not supported");
1109 case FBC_MODE_TOO_LARGE
:
1110 seq_puts(m
, "mode too large");
1113 seq_puts(m
, "FBC unsupported on plane");
1116 seq_puts(m
, "scanout buffer not tiled");
1118 case FBC_MULTIPLE_PIPES
:
1119 seq_puts(m
, "multiple pipes are enabled");
1121 case FBC_MODULE_PARAM
:
1122 seq_puts(m
, "disabled per module param (default off)");
1124 case FBC_CHIP_DEFAULT
:
1125 seq_puts(m
, "disabled per chip default");
1128 seq_puts(m
, "unknown reason");
1135 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1137 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1138 struct drm_device
*dev
= node
->minor
->dev
;
1139 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1141 if (!HAS_IPS(dev
)) {
1142 seq_puts(m
, "not supported\n");
1146 if (I915_READ(IPS_CTL
) & IPS_ENABLE
)
1147 seq_puts(m
, "enabled\n");
1149 seq_puts(m
, "disabled\n");
1154 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1156 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1157 struct drm_device
*dev
= node
->minor
->dev
;
1158 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1159 bool sr_enabled
= false;
1161 if (HAS_PCH_SPLIT(dev
))
1162 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1163 else if (IS_CRESTLINE(dev
) || IS_I945G(dev
) || IS_I945GM(dev
))
1164 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1165 else if (IS_I915GM(dev
))
1166 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1167 else if (IS_PINEVIEW(dev
))
1168 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1170 seq_printf(m
, "self-refresh: %s\n",
1171 sr_enabled
? "enabled" : "disabled");
1176 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1178 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1179 struct drm_device
*dev
= node
->minor
->dev
;
1180 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1181 unsigned long temp
, chipset
, gfx
;
1187 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1191 temp
= i915_mch_val(dev_priv
);
1192 chipset
= i915_chipset_val(dev_priv
);
1193 gfx
= i915_gfx_val(dev_priv
);
1194 mutex_unlock(&dev
->struct_mutex
);
1196 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1197 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1198 seq_printf(m
, "GFX power: %ld\n", gfx
);
1199 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1204 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1206 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1207 struct drm_device
*dev
= node
->minor
->dev
;
1208 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1210 int gpu_freq
, ia_freq
;
1212 if (!(IS_GEN6(dev
) || IS_GEN7(dev
))) {
1213 seq_puts(m
, "unsupported on this chipset\n");
1217 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1221 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1223 for (gpu_freq
= dev_priv
->rps
.min_delay
;
1224 gpu_freq
<= dev_priv
->rps
.max_delay
;
1227 sandybridge_pcode_read(dev_priv
,
1228 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1230 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1231 gpu_freq
* GT_FREQUENCY_MULTIPLIER
,
1232 ((ia_freq
>> 0) & 0xff) * 100,
1233 ((ia_freq
>> 8) & 0xff) * 100);
1236 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1241 static int i915_gfxec(struct seq_file
*m
, void *unused
)
1243 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1244 struct drm_device
*dev
= node
->minor
->dev
;
1245 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1248 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1252 seq_printf(m
, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1254 mutex_unlock(&dev
->struct_mutex
);
1259 static int i915_opregion(struct seq_file
*m
, void *unused
)
1261 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1262 struct drm_device
*dev
= node
->minor
->dev
;
1263 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1264 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1265 void *data
= kmalloc(OPREGION_SIZE
, GFP_KERNEL
);
1271 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1275 if (opregion
->header
) {
1276 memcpy_fromio(data
, opregion
->header
, OPREGION_SIZE
);
1277 seq_write(m
, data
, OPREGION_SIZE
);
1280 mutex_unlock(&dev
->struct_mutex
);
1287 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1289 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1290 struct drm_device
*dev
= node
->minor
->dev
;
1291 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1292 struct intel_fbdev
*ifbdev
;
1293 struct intel_framebuffer
*fb
;
1296 ret
= mutex_lock_interruptible(&dev
->mode_config
.mutex
);
1300 ifbdev
= dev_priv
->fbdev
;
1301 fb
= to_intel_framebuffer(ifbdev
->helper
.fb
);
1303 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1307 fb
->base
.bits_per_pixel
,
1308 atomic_read(&fb
->base
.refcount
.refcount
));
1309 describe_obj(m
, fb
->obj
);
1311 mutex_unlock(&dev
->mode_config
.mutex
);
1313 mutex_lock(&dev
->mode_config
.fb_lock
);
1314 list_for_each_entry(fb
, &dev
->mode_config
.fb_list
, base
.head
) {
1315 if (&fb
->base
== ifbdev
->helper
.fb
)
1318 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1322 fb
->base
.bits_per_pixel
,
1323 atomic_read(&fb
->base
.refcount
.refcount
));
1324 describe_obj(m
, fb
->obj
);
1327 mutex_unlock(&dev
->mode_config
.fb_lock
);
1332 static int i915_context_status(struct seq_file
*m
, void *unused
)
1334 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1335 struct drm_device
*dev
= node
->minor
->dev
;
1336 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1337 struct intel_ring_buffer
*ring
;
1340 ret
= mutex_lock_interruptible(&dev
->mode_config
.mutex
);
1344 if (dev_priv
->ips
.pwrctx
) {
1345 seq_puts(m
, "power context ");
1346 describe_obj(m
, dev_priv
->ips
.pwrctx
);
1350 if (dev_priv
->ips
.renderctx
) {
1351 seq_puts(m
, "render context ");
1352 describe_obj(m
, dev_priv
->ips
.renderctx
);
1356 for_each_ring(ring
, dev_priv
, i
) {
1357 if (ring
->default_context
) {
1358 seq_printf(m
, "HW default context %s ring ", ring
->name
);
1359 describe_obj(m
, ring
->default_context
->obj
);
1364 mutex_unlock(&dev
->mode_config
.mutex
);
1369 static int i915_gen6_forcewake_count_info(struct seq_file
*m
, void *data
)
1371 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1372 struct drm_device
*dev
= node
->minor
->dev
;
1373 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1374 unsigned forcewake_count
;
1376 spin_lock_irq(&dev_priv
->gt_lock
);
1377 forcewake_count
= dev_priv
->forcewake_count
;
1378 spin_unlock_irq(&dev_priv
->gt_lock
);
1380 seq_printf(m
, "forcewake count = %u\n", forcewake_count
);
1385 static const char *swizzle_string(unsigned swizzle
)
1388 case I915_BIT_6_SWIZZLE_NONE
:
1390 case I915_BIT_6_SWIZZLE_9
:
1392 case I915_BIT_6_SWIZZLE_9_10
:
1393 return "bit9/bit10";
1394 case I915_BIT_6_SWIZZLE_9_11
:
1395 return "bit9/bit11";
1396 case I915_BIT_6_SWIZZLE_9_10_11
:
1397 return "bit9/bit10/bit11";
1398 case I915_BIT_6_SWIZZLE_9_17
:
1399 return "bit9/bit17";
1400 case I915_BIT_6_SWIZZLE_9_10_17
:
1401 return "bit9/bit10/bit17";
1402 case I915_BIT_6_SWIZZLE_UNKNOWN
:
1409 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
1411 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1412 struct drm_device
*dev
= node
->minor
->dev
;
1413 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1416 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1420 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
1421 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
1422 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
1423 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
1425 if (IS_GEN3(dev
) || IS_GEN4(dev
)) {
1426 seq_printf(m
, "DDC = 0x%08x\n",
1428 seq_printf(m
, "C0DRB3 = 0x%04x\n",
1429 I915_READ16(C0DRB3
));
1430 seq_printf(m
, "C1DRB3 = 0x%04x\n",
1431 I915_READ16(C1DRB3
));
1432 } else if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1433 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
1434 I915_READ(MAD_DIMM_C0
));
1435 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
1436 I915_READ(MAD_DIMM_C1
));
1437 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
1438 I915_READ(MAD_DIMM_C2
));
1439 seq_printf(m
, "TILECTL = 0x%08x\n",
1440 I915_READ(TILECTL
));
1441 seq_printf(m
, "ARB_MODE = 0x%08x\n",
1442 I915_READ(ARB_MODE
));
1443 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
1444 I915_READ(DISP_ARB_CTL
));
1446 mutex_unlock(&dev
->struct_mutex
);
1451 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
1453 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1454 struct drm_device
*dev
= node
->minor
->dev
;
1455 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1456 struct intel_ring_buffer
*ring
;
1460 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1463 if (INTEL_INFO(dev
)->gen
== 6)
1464 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
1466 for_each_ring(ring
, dev_priv
, i
) {
1467 seq_printf(m
, "%s\n", ring
->name
);
1468 if (INTEL_INFO(dev
)->gen
== 7)
1469 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring
)));
1470 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring
)));
1471 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring
)));
1472 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring
)));
1474 if (dev_priv
->mm
.aliasing_ppgtt
) {
1475 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
1477 seq_puts(m
, "aliasing PPGTT:\n");
1478 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd_offset
);
1480 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
1481 mutex_unlock(&dev
->struct_mutex
);
1486 static int i915_dpio_info(struct seq_file
*m
, void *data
)
1488 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1489 struct drm_device
*dev
= node
->minor
->dev
;
1490 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1494 if (!IS_VALLEYVIEW(dev
)) {
1495 seq_puts(m
, "unsupported\n");
1499 ret
= mutex_lock_interruptible(&dev_priv
->dpio_lock
);
1503 seq_printf(m
, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL
));
1505 seq_printf(m
, "DPIO_DIV_A: 0x%08x\n",
1506 vlv_dpio_read(dev_priv
, _DPIO_DIV_A
));
1507 seq_printf(m
, "DPIO_DIV_B: 0x%08x\n",
1508 vlv_dpio_read(dev_priv
, _DPIO_DIV_B
));
1510 seq_printf(m
, "DPIO_REFSFR_A: 0x%08x\n",
1511 vlv_dpio_read(dev_priv
, _DPIO_REFSFR_A
));
1512 seq_printf(m
, "DPIO_REFSFR_B: 0x%08x\n",
1513 vlv_dpio_read(dev_priv
, _DPIO_REFSFR_B
));
1515 seq_printf(m
, "DPIO_CORE_CLK_A: 0x%08x\n",
1516 vlv_dpio_read(dev_priv
, _DPIO_CORE_CLK_A
));
1517 seq_printf(m
, "DPIO_CORE_CLK_B: 0x%08x\n",
1518 vlv_dpio_read(dev_priv
, _DPIO_CORE_CLK_B
));
1520 seq_printf(m
, "DPIO_LPF_COEFF_A: 0x%08x\n",
1521 vlv_dpio_read(dev_priv
, _DPIO_LPF_COEFF_A
));
1522 seq_printf(m
, "DPIO_LPF_COEFF_B: 0x%08x\n",
1523 vlv_dpio_read(dev_priv
, _DPIO_LPF_COEFF_B
));
1525 seq_printf(m
, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1526 vlv_dpio_read(dev_priv
, DPIO_FASTCLK_DISABLE
));
1528 mutex_unlock(&dev_priv
->dpio_lock
);
1533 static int i915_llc(struct seq_file
*m
, void *data
)
1535 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1536 struct drm_device
*dev
= node
->minor
->dev
;
1537 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1539 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1540 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev
)));
1541 seq_printf(m
, "eLLC: %zuMB\n", dev_priv
->ellc_size
);
1547 i915_wedged_get(void *data
, u64
*val
)
1549 struct drm_device
*dev
= data
;
1550 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1552 *val
= atomic_read(&dev_priv
->gpu_error
.reset_counter
);
1558 i915_wedged_set(void *data
, u64 val
)
1560 struct drm_device
*dev
= data
;
1562 DRM_INFO("Manually setting wedged to %llu\n", val
);
1563 i915_handle_error(dev
, val
);
1568 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
1569 i915_wedged_get
, i915_wedged_set
,
1573 i915_ring_stop_get(void *data
, u64
*val
)
1575 struct drm_device
*dev
= data
;
1576 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1578 *val
= dev_priv
->gpu_error
.stop_rings
;
1584 i915_ring_stop_set(void *data
, u64 val
)
1586 struct drm_device
*dev
= data
;
1587 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1590 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val
);
1592 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1596 dev_priv
->gpu_error
.stop_rings
= val
;
1597 mutex_unlock(&dev
->struct_mutex
);
1602 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops
,
1603 i915_ring_stop_get
, i915_ring_stop_set
,
1606 #define DROP_UNBOUND 0x1
1607 #define DROP_BOUND 0x2
1608 #define DROP_RETIRE 0x4
1609 #define DROP_ACTIVE 0x8
1610 #define DROP_ALL (DROP_UNBOUND | \
1615 i915_drop_caches_get(void *data
, u64
*val
)
1623 i915_drop_caches_set(void *data
, u64 val
)
1625 struct drm_device
*dev
= data
;
1626 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1627 struct drm_i915_gem_object
*obj
, *next
;
1630 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val
);
1632 /* No need to check and wait for gpu resets, only libdrm auto-restarts
1633 * on ioctls on -EAGAIN. */
1634 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1638 if (val
& DROP_ACTIVE
) {
1639 ret
= i915_gpu_idle(dev
);
1644 if (val
& (DROP_RETIRE
| DROP_ACTIVE
))
1645 i915_gem_retire_requests(dev
);
1647 if (val
& DROP_BOUND
) {
1648 list_for_each_entry_safe(obj
, next
, &dev_priv
->mm
.inactive_list
, mm_list
)
1649 if (obj
->pin_count
== 0) {
1650 ret
= i915_gem_object_unbind(obj
);
1656 if (val
& DROP_UNBOUND
) {
1657 list_for_each_entry_safe(obj
, next
, &dev_priv
->mm
.unbound_list
,
1659 if (obj
->pages_pin_count
== 0) {
1660 ret
= i915_gem_object_put_pages(obj
);
1667 mutex_unlock(&dev
->struct_mutex
);
1672 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
1673 i915_drop_caches_get
, i915_drop_caches_set
,
1677 i915_max_freq_get(void *data
, u64
*val
)
1679 struct drm_device
*dev
= data
;
1680 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1683 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
1686 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1690 if (IS_VALLEYVIEW(dev
))
1691 *val
= vlv_gpu_freq(dev_priv
->mem_freq
,
1692 dev_priv
->rps
.max_delay
);
1694 *val
= dev_priv
->rps
.max_delay
* GT_FREQUENCY_MULTIPLIER
;
1695 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1701 i915_max_freq_set(void *data
, u64 val
)
1703 struct drm_device
*dev
= data
;
1704 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1707 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
1710 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val
);
1712 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1717 * Turbo will still be enabled, but won't go above the set value.
1719 if (IS_VALLEYVIEW(dev
)) {
1720 val
= vlv_freq_opcode(dev_priv
->mem_freq
, val
);
1721 dev_priv
->rps
.max_delay
= val
;
1722 gen6_set_rps(dev
, val
);
1724 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
1725 dev_priv
->rps
.max_delay
= val
;
1726 gen6_set_rps(dev
, val
);
1729 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1734 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops
,
1735 i915_max_freq_get
, i915_max_freq_set
,
1739 i915_min_freq_get(void *data
, u64
*val
)
1741 struct drm_device
*dev
= data
;
1742 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1745 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
1748 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1752 if (IS_VALLEYVIEW(dev
))
1753 *val
= vlv_gpu_freq(dev_priv
->mem_freq
,
1754 dev_priv
->rps
.min_delay
);
1756 *val
= dev_priv
->rps
.min_delay
* GT_FREQUENCY_MULTIPLIER
;
1757 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1763 i915_min_freq_set(void *data
, u64 val
)
1765 struct drm_device
*dev
= data
;
1766 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1769 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
1772 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val
);
1774 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1779 * Turbo will still be enabled, but won't go below the set value.
1781 if (IS_VALLEYVIEW(dev
)) {
1782 val
= vlv_freq_opcode(dev_priv
->mem_freq
, val
);
1783 dev_priv
->rps
.min_delay
= val
;
1784 valleyview_set_rps(dev
, val
);
1786 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
1787 dev_priv
->rps
.min_delay
= val
;
1788 gen6_set_rps(dev
, val
);
1790 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1795 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops
,
1796 i915_min_freq_get
, i915_min_freq_set
,
1800 i915_cache_sharing_get(void *data
, u64
*val
)
1802 struct drm_device
*dev
= data
;
1803 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1807 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
1810 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1814 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
1815 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
1817 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
1823 i915_cache_sharing_set(void *data
, u64 val
)
1825 struct drm_device
*dev
= data
;
1826 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1829 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
1835 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
1837 /* Update the cache sharing policy here as well */
1838 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
1839 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
1840 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
1841 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
1846 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
1847 i915_cache_sharing_get
, i915_cache_sharing_set
,
1850 /* As the drm_debugfs_init() routines are called before dev->dev_private is
1851 * allocated we need to hook into the minor for release. */
1853 drm_add_fake_info_node(struct drm_minor
*minor
,
1857 struct drm_info_node
*node
;
1859 node
= kmalloc(sizeof(struct drm_info_node
), GFP_KERNEL
);
1861 debugfs_remove(ent
);
1865 node
->minor
= minor
;
1867 node
->info_ent
= (void *) key
;
1869 mutex_lock(&minor
->debugfs_lock
);
1870 list_add(&node
->list
, &minor
->debugfs_list
);
1871 mutex_unlock(&minor
->debugfs_lock
);
1876 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
1878 struct drm_device
*dev
= inode
->i_private
;
1879 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1881 if (INTEL_INFO(dev
)->gen
< 6)
1884 gen6_gt_force_wake_get(dev_priv
);
1889 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
1891 struct drm_device
*dev
= inode
->i_private
;
1892 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1894 if (INTEL_INFO(dev
)->gen
< 6)
1897 gen6_gt_force_wake_put(dev_priv
);
1902 static const struct file_operations i915_forcewake_fops
= {
1903 .owner
= THIS_MODULE
,
1904 .open
= i915_forcewake_open
,
1905 .release
= i915_forcewake_release
,
1908 static int i915_forcewake_create(struct dentry
*root
, struct drm_minor
*minor
)
1910 struct drm_device
*dev
= minor
->dev
;
1913 ent
= debugfs_create_file("i915_forcewake_user",
1916 &i915_forcewake_fops
);
1918 return PTR_ERR(ent
);
1920 return drm_add_fake_info_node(minor
, ent
, &i915_forcewake_fops
);
1923 static int i915_debugfs_create(struct dentry
*root
,
1924 struct drm_minor
*minor
,
1926 const struct file_operations
*fops
)
1928 struct drm_device
*dev
= minor
->dev
;
1931 ent
= debugfs_create_file(name
,
1936 return PTR_ERR(ent
);
1938 return drm_add_fake_info_node(minor
, ent
, fops
);
1941 static struct drm_info_list i915_debugfs_list
[] = {
1942 {"i915_capabilities", i915_capabilities
, 0},
1943 {"i915_gem_objects", i915_gem_object_info
, 0},
1944 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
1945 {"i915_gem_pinned", i915_gem_gtt_info
, 0, (void *) PINNED_LIST
},
1946 {"i915_gem_active", i915_gem_object_list_info
, 0, (void *) ACTIVE_LIST
},
1947 {"i915_gem_inactive", i915_gem_object_list_info
, 0, (void *) INACTIVE_LIST
},
1948 {"i915_gem_pageflip", i915_gem_pageflip_info
, 0},
1949 {"i915_gem_request", i915_gem_request_info
, 0},
1950 {"i915_gem_seqno", i915_gem_seqno_info
, 0},
1951 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
1952 {"i915_gem_interrupt", i915_interrupt_info
, 0},
1953 {"i915_gem_hws", i915_hws_info
, 0, (void *)RCS
},
1954 {"i915_gem_hws_blt", i915_hws_info
, 0, (void *)BCS
},
1955 {"i915_gem_hws_bsd", i915_hws_info
, 0, (void *)VCS
},
1956 {"i915_gem_hws_vebox", i915_hws_info
, 0, (void *)VECS
},
1957 {"i915_rstdby_delays", i915_rstdby_delays
, 0},
1958 {"i915_cur_delayinfo", i915_cur_delayinfo
, 0},
1959 {"i915_delayfreq_table", i915_delayfreq_table
, 0},
1960 {"i915_inttoext_table", i915_inttoext_table
, 0},
1961 {"i915_drpc_info", i915_drpc_info
, 0},
1962 {"i915_emon_status", i915_emon_status
, 0},
1963 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
1964 {"i915_gfxec", i915_gfxec
, 0},
1965 {"i915_fbc_status", i915_fbc_status
, 0},
1966 {"i915_ips_status", i915_ips_status
, 0},
1967 {"i915_sr_status", i915_sr_status
, 0},
1968 {"i915_opregion", i915_opregion
, 0},
1969 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
1970 {"i915_context_status", i915_context_status
, 0},
1971 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info
, 0},
1972 {"i915_swizzle_info", i915_swizzle_info
, 0},
1973 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
1974 {"i915_dpio", i915_dpio_info
, 0},
1975 {"i915_llc", i915_llc
, 0},
1977 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1979 struct i915_debugfs_files
{
1981 const struct file_operations
*fops
;
1982 } i915_debugfs_files
[] = {
1983 {"i915_wedged", &i915_wedged_fops
},
1984 {"i915_max_freq", &i915_max_freq_fops
},
1985 {"i915_min_freq", &i915_min_freq_fops
},
1986 {"i915_cache_sharing", &i915_cache_sharing_fops
},
1987 {"i915_ring_stop", &i915_ring_stop_fops
},
1988 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
1989 {"i915_error_state", &i915_error_state_fops
},
1990 {"i915_next_seqno", &i915_next_seqno_fops
},
1993 int i915_debugfs_init(struct drm_minor
*minor
)
1997 ret
= i915_forcewake_create(minor
->debugfs_root
, minor
);
2001 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
2002 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
2003 i915_debugfs_files
[i
].name
,
2004 i915_debugfs_files
[i
].fops
);
2009 return drm_debugfs_create_files(i915_debugfs_list
,
2010 I915_DEBUGFS_ENTRIES
,
2011 minor
->debugfs_root
, minor
);
2014 void i915_debugfs_cleanup(struct drm_minor
*minor
)
2018 drm_debugfs_remove_files(i915_debugfs_list
,
2019 I915_DEBUGFS_ENTRIES
, minor
);
2020 drm_debugfs_remove_files((struct drm_info_list
*) &i915_forcewake_fops
,
2022 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
2023 struct drm_info_list
*info_list
=
2024 (struct drm_info_list
*) i915_debugfs_files
[i
].fops
;
2026 drm_debugfs_remove_files(info_list
, 1, minor
);
2030 #endif /* CONFIG_DEBUG_FS */