2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
49 static const char *yesno(int v
)
51 return v
? "yes" : "no";
54 /* As the drm_debugfs_init() routines are called before dev->dev_private is
55 * allocated we need to hook into the minor for release. */
57 drm_add_fake_info_node(struct drm_minor
*minor
,
61 struct drm_info_node
*node
;
63 node
= kmalloc(sizeof(*node
), GFP_KERNEL
);
71 node
->info_ent
= (void *) key
;
73 mutex_lock(&minor
->debugfs_lock
);
74 list_add(&node
->list
, &minor
->debugfs_list
);
75 mutex_unlock(&minor
->debugfs_lock
);
80 static int i915_capabilities(struct seq_file
*m
, void *data
)
82 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
83 struct drm_device
*dev
= node
->minor
->dev
;
84 const struct intel_device_info
*info
= INTEL_INFO(dev
);
86 seq_printf(m
, "gen: %d\n", info
->gen
);
87 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev
));
88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
89 #define SEP_SEMICOLON ;
90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_SEMICOLON
);
97 static const char *get_pin_flag(struct drm_i915_gem_object
*obj
)
99 if (obj
->user_pin_count
> 0)
101 else if (i915_gem_obj_is_pinned(obj
))
107 static const char *get_tiling_flag(struct drm_i915_gem_object
*obj
)
109 switch (obj
->tiling_mode
) {
111 case I915_TILING_NONE
: return " ";
112 case I915_TILING_X
: return "X";
113 case I915_TILING_Y
: return "Y";
117 static inline const char *get_global_flag(struct drm_i915_gem_object
*obj
)
119 return obj
->has_global_gtt_mapping
? "g" : " ";
123 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
125 struct i915_vma
*vma
;
128 seq_printf(m
, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
131 get_tiling_flag(obj
),
132 get_global_flag(obj
),
133 obj
->base
.size
/ 1024,
134 obj
->base
.read_domains
,
135 obj
->base
.write_domain
,
136 obj
->last_read_seqno
,
137 obj
->last_write_seqno
,
138 obj
->last_fenced_seqno
,
139 i915_cache_level_str(obj
->cache_level
),
140 obj
->dirty
? " dirty" : "",
141 obj
->madv
== I915_MADV_DONTNEED
? " purgeable" : "");
143 seq_printf(m
, " (name: %d)", obj
->base
.name
);
144 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
)
145 if (vma
->pin_count
> 0)
147 seq_printf(m
, " (pinned x %d)", pin_count
);
148 if (obj
->pin_display
)
149 seq_printf(m
, " (display)");
150 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
)
151 seq_printf(m
, " (fence: %d)", obj
->fence_reg
);
152 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
153 if (!i915_is_ggtt(vma
->vm
))
157 seq_printf(m
, "gtt offset: %08lx, size: %08lx)",
158 vma
->node
.start
, vma
->node
.size
);
161 seq_printf(m
, " (stolen: %08lx)", obj
->stolen
->start
);
162 if (obj
->pin_mappable
|| obj
->fault_mappable
) {
164 if (obj
->pin_mappable
)
166 if (obj
->fault_mappable
)
169 seq_printf(m
, " (%s mappable)", s
);
171 if (obj
->ring
!= NULL
)
172 seq_printf(m
, " (%s)", obj
->ring
->name
);
175 static void describe_ctx(struct seq_file
*m
, struct i915_hw_context
*ctx
)
177 seq_putc(m
, ctx
->is_initialized
? 'I' : 'i');
178 seq_putc(m
, ctx
->remap_slice
? 'R' : 'r');
182 static int i915_gem_object_list_info(struct seq_file
*m
, void *data
)
184 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
185 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
186 struct list_head
*head
;
187 struct drm_device
*dev
= node
->minor
->dev
;
188 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
189 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
190 struct i915_vma
*vma
;
191 size_t total_obj_size
, total_gtt_size
;
194 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
198 /* FIXME: the user of this interface might want more than just GGTT */
201 seq_puts(m
, "Active:\n");
202 head
= &vm
->active_list
;
205 seq_puts(m
, "Inactive:\n");
206 head
= &vm
->inactive_list
;
209 mutex_unlock(&dev
->struct_mutex
);
213 total_obj_size
= total_gtt_size
= count
= 0;
214 list_for_each_entry(vma
, head
, mm_list
) {
216 describe_obj(m
, vma
->obj
);
218 total_obj_size
+= vma
->obj
->base
.size
;
219 total_gtt_size
+= vma
->node
.size
;
222 mutex_unlock(&dev
->struct_mutex
);
224 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
225 count
, total_obj_size
, total_gtt_size
);
229 static int obj_rank_by_stolen(void *priv
,
230 struct list_head
*A
, struct list_head
*B
)
232 struct drm_i915_gem_object
*a
=
233 container_of(A
, struct drm_i915_gem_object
, obj_exec_link
);
234 struct drm_i915_gem_object
*b
=
235 container_of(B
, struct drm_i915_gem_object
, obj_exec_link
);
237 return a
->stolen
->start
- b
->stolen
->start
;
240 static int i915_gem_stolen_list_info(struct seq_file
*m
, void *data
)
242 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
243 struct drm_device
*dev
= node
->minor
->dev
;
244 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
245 struct drm_i915_gem_object
*obj
;
246 size_t total_obj_size
, total_gtt_size
;
250 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
254 total_obj_size
= total_gtt_size
= count
= 0;
255 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
256 if (obj
->stolen
== NULL
)
259 list_add(&obj
->obj_exec_link
, &stolen
);
261 total_obj_size
+= obj
->base
.size
;
262 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
265 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
266 if (obj
->stolen
== NULL
)
269 list_add(&obj
->obj_exec_link
, &stolen
);
271 total_obj_size
+= obj
->base
.size
;
274 list_sort(NULL
, &stolen
, obj_rank_by_stolen
);
275 seq_puts(m
, "Stolen:\n");
276 while (!list_empty(&stolen
)) {
277 obj
= list_first_entry(&stolen
, typeof(*obj
), obj_exec_link
);
279 describe_obj(m
, obj
);
281 list_del_init(&obj
->obj_exec_link
);
283 mutex_unlock(&dev
->struct_mutex
);
285 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
286 count
, total_obj_size
, total_gtt_size
);
290 #define count_objects(list, member) do { \
291 list_for_each_entry(obj, list, member) { \
292 size += i915_gem_obj_ggtt_size(obj); \
294 if (obj->map_and_fenceable) { \
295 mappable_size += i915_gem_obj_ggtt_size(obj); \
303 size_t total
, active
, inactive
, unbound
;
306 static int per_file_stats(int id
, void *ptr
, void *data
)
308 struct drm_i915_gem_object
*obj
= ptr
;
309 struct file_stats
*stats
= data
;
312 stats
->total
+= obj
->base
.size
;
314 if (i915_gem_obj_ggtt_bound(obj
)) {
315 if (!list_empty(&obj
->ring_list
))
316 stats
->active
+= obj
->base
.size
;
318 stats
->inactive
+= obj
->base
.size
;
320 if (!list_empty(&obj
->global_list
))
321 stats
->unbound
+= obj
->base
.size
;
327 #define count_vmas(list, member) do { \
328 list_for_each_entry(vma, list, member) { \
329 size += i915_gem_obj_ggtt_size(vma->obj); \
331 if (vma->obj->map_and_fenceable) { \
332 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
338 static int i915_gem_object_info(struct seq_file
*m
, void* data
)
340 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
341 struct drm_device
*dev
= node
->minor
->dev
;
342 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
343 u32 count
, mappable_count
, purgeable_count
;
344 size_t size
, mappable_size
, purgeable_size
;
345 struct drm_i915_gem_object
*obj
;
346 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
347 struct drm_file
*file
;
348 struct i915_vma
*vma
;
351 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
355 seq_printf(m
, "%u objects, %zu bytes\n",
356 dev_priv
->mm
.object_count
,
357 dev_priv
->mm
.object_memory
);
359 size
= count
= mappable_size
= mappable_count
= 0;
360 count_objects(&dev_priv
->mm
.bound_list
, global_list
);
361 seq_printf(m
, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
362 count
, mappable_count
, size
, mappable_size
);
364 size
= count
= mappable_size
= mappable_count
= 0;
365 count_vmas(&vm
->active_list
, mm_list
);
366 seq_printf(m
, " %u [%u] active objects, %zu [%zu] bytes\n",
367 count
, mappable_count
, size
, mappable_size
);
369 size
= count
= mappable_size
= mappable_count
= 0;
370 count_vmas(&vm
->inactive_list
, mm_list
);
371 seq_printf(m
, " %u [%u] inactive objects, %zu [%zu] bytes\n",
372 count
, mappable_count
, size
, mappable_size
);
374 size
= count
= purgeable_size
= purgeable_count
= 0;
375 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
376 size
+= obj
->base
.size
, ++count
;
377 if (obj
->madv
== I915_MADV_DONTNEED
)
378 purgeable_size
+= obj
->base
.size
, ++purgeable_count
;
380 seq_printf(m
, "%u unbound objects, %zu bytes\n", count
, size
);
382 size
= count
= mappable_size
= mappable_count
= 0;
383 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
384 if (obj
->fault_mappable
) {
385 size
+= i915_gem_obj_ggtt_size(obj
);
388 if (obj
->pin_mappable
) {
389 mappable_size
+= i915_gem_obj_ggtt_size(obj
);
392 if (obj
->madv
== I915_MADV_DONTNEED
) {
393 purgeable_size
+= obj
->base
.size
;
397 seq_printf(m
, "%u purgeable objects, %zu bytes\n",
398 purgeable_count
, purgeable_size
);
399 seq_printf(m
, "%u pinned mappable objects, %zu bytes\n",
400 mappable_count
, mappable_size
);
401 seq_printf(m
, "%u fault mappable objects, %zu bytes\n",
404 seq_printf(m
, "%zu [%lu] gtt total\n",
405 dev_priv
->gtt
.base
.total
,
406 dev_priv
->gtt
.mappable_end
- dev_priv
->gtt
.base
.start
);
409 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
410 struct file_stats stats
;
411 struct task_struct
*task
;
413 memset(&stats
, 0, sizeof(stats
));
414 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
416 * Although we have a valid reference on file->pid, that does
417 * not guarantee that the task_struct who called get_pid() is
418 * still alive (e.g. get_pid(current) => fork() => exit()).
419 * Therefore, we need to protect this ->comm access using RCU.
422 task
= pid_task(file
->pid
, PIDTYPE_PID
);
423 seq_printf(m
, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
424 task
? task
->comm
: "<unknown>",
433 mutex_unlock(&dev
->struct_mutex
);
438 static int i915_gem_gtt_info(struct seq_file
*m
, void *data
)
440 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
441 struct drm_device
*dev
= node
->minor
->dev
;
442 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
443 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
444 struct drm_i915_gem_object
*obj
;
445 size_t total_obj_size
, total_gtt_size
;
448 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
452 total_obj_size
= total_gtt_size
= count
= 0;
453 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
454 if (list
== PINNED_LIST
&& !i915_gem_obj_is_pinned(obj
))
458 describe_obj(m
, obj
);
460 total_obj_size
+= obj
->base
.size
;
461 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
465 mutex_unlock(&dev
->struct_mutex
);
467 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
468 count
, total_obj_size
, total_gtt_size
);
473 static int i915_gem_pageflip_info(struct seq_file
*m
, void *data
)
475 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
476 struct drm_device
*dev
= node
->minor
->dev
;
478 struct intel_crtc
*crtc
;
480 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, base
.head
) {
481 const char pipe
= pipe_name(crtc
->pipe
);
482 const char plane
= plane_name(crtc
->plane
);
483 struct intel_unpin_work
*work
;
485 spin_lock_irqsave(&dev
->event_lock
, flags
);
486 work
= crtc
->unpin_work
;
488 seq_printf(m
, "No flip due on pipe %c (plane %c)\n",
491 if (atomic_read(&work
->pending
) < INTEL_FLIP_COMPLETE
) {
492 seq_printf(m
, "Flip queued on pipe %c (plane %c)\n",
495 seq_printf(m
, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
498 if (work
->enable_stall_check
)
499 seq_puts(m
, "Stall check enabled, ");
501 seq_puts(m
, "Stall check waiting for page flip ioctl, ");
502 seq_printf(m
, "%d prepares\n", atomic_read(&work
->pending
));
504 if (work
->old_fb_obj
) {
505 struct drm_i915_gem_object
*obj
= work
->old_fb_obj
;
507 seq_printf(m
, "Old framebuffer gtt_offset 0x%08lx\n",
508 i915_gem_obj_ggtt_offset(obj
));
510 if (work
->pending_flip_obj
) {
511 struct drm_i915_gem_object
*obj
= work
->pending_flip_obj
;
513 seq_printf(m
, "New framebuffer gtt_offset 0x%08lx\n",
514 i915_gem_obj_ggtt_offset(obj
));
517 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
523 static int i915_gem_request_info(struct seq_file
*m
, void *data
)
525 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
526 struct drm_device
*dev
= node
->minor
->dev
;
527 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
528 struct intel_ring_buffer
*ring
;
529 struct drm_i915_gem_request
*gem_request
;
532 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
537 for_each_ring(ring
, dev_priv
, i
) {
538 if (list_empty(&ring
->request_list
))
541 seq_printf(m
, "%s requests:\n", ring
->name
);
542 list_for_each_entry(gem_request
,
545 seq_printf(m
, " %d @ %d\n",
547 (int) (jiffies
- gem_request
->emitted_jiffies
));
551 mutex_unlock(&dev
->struct_mutex
);
554 seq_puts(m
, "No requests\n");
559 static void i915_ring_seqno_info(struct seq_file
*m
,
560 struct intel_ring_buffer
*ring
)
562 if (ring
->get_seqno
) {
563 seq_printf(m
, "Current sequence (%s): %u\n",
564 ring
->name
, ring
->get_seqno(ring
, false));
568 static int i915_gem_seqno_info(struct seq_file
*m
, void *data
)
570 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
571 struct drm_device
*dev
= node
->minor
->dev
;
572 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
573 struct intel_ring_buffer
*ring
;
576 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
579 intel_runtime_pm_get(dev_priv
);
581 for_each_ring(ring
, dev_priv
, i
)
582 i915_ring_seqno_info(m
, ring
);
584 intel_runtime_pm_put(dev_priv
);
585 mutex_unlock(&dev
->struct_mutex
);
591 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
593 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
594 struct drm_device
*dev
= node
->minor
->dev
;
595 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
596 struct intel_ring_buffer
*ring
;
599 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
602 intel_runtime_pm_get(dev_priv
);
604 if (INTEL_INFO(dev
)->gen
>= 8) {
606 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
607 I915_READ(GEN8_MASTER_IRQ
));
609 for (i
= 0; i
< 4; i
++) {
610 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
611 i
, I915_READ(GEN8_GT_IMR(i
)));
612 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
613 i
, I915_READ(GEN8_GT_IIR(i
)));
614 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
615 i
, I915_READ(GEN8_GT_IER(i
)));
619 seq_printf(m
, "Pipe %c IMR:\t%08x\n",
621 I915_READ(GEN8_DE_PIPE_IMR(i
)));
622 seq_printf(m
, "Pipe %c IIR:\t%08x\n",
624 I915_READ(GEN8_DE_PIPE_IIR(i
)));
625 seq_printf(m
, "Pipe %c IER:\t%08x\n",
627 I915_READ(GEN8_DE_PIPE_IER(i
)));
630 seq_printf(m
, "Display Engine port interrupt mask:\t%08x\n",
631 I915_READ(GEN8_DE_PORT_IMR
));
632 seq_printf(m
, "Display Engine port interrupt identity:\t%08x\n",
633 I915_READ(GEN8_DE_PORT_IIR
));
634 seq_printf(m
, "Display Engine port interrupt enable:\t%08x\n",
635 I915_READ(GEN8_DE_PORT_IER
));
637 seq_printf(m
, "Display Engine misc interrupt mask:\t%08x\n",
638 I915_READ(GEN8_DE_MISC_IMR
));
639 seq_printf(m
, "Display Engine misc interrupt identity:\t%08x\n",
640 I915_READ(GEN8_DE_MISC_IIR
));
641 seq_printf(m
, "Display Engine misc interrupt enable:\t%08x\n",
642 I915_READ(GEN8_DE_MISC_IER
));
644 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
645 I915_READ(GEN8_PCU_IMR
));
646 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
647 I915_READ(GEN8_PCU_IIR
));
648 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
649 I915_READ(GEN8_PCU_IER
));
650 } else if (IS_VALLEYVIEW(dev
)) {
651 seq_printf(m
, "Display IER:\t%08x\n",
653 seq_printf(m
, "Display IIR:\t%08x\n",
655 seq_printf(m
, "Display IIR_RW:\t%08x\n",
656 I915_READ(VLV_IIR_RW
));
657 seq_printf(m
, "Display IMR:\t%08x\n",
660 seq_printf(m
, "Pipe %c stat:\t%08x\n",
662 I915_READ(PIPESTAT(pipe
)));
664 seq_printf(m
, "Master IER:\t%08x\n",
665 I915_READ(VLV_MASTER_IER
));
667 seq_printf(m
, "Render IER:\t%08x\n",
669 seq_printf(m
, "Render IIR:\t%08x\n",
671 seq_printf(m
, "Render IMR:\t%08x\n",
674 seq_printf(m
, "PM IER:\t\t%08x\n",
675 I915_READ(GEN6_PMIER
));
676 seq_printf(m
, "PM IIR:\t\t%08x\n",
677 I915_READ(GEN6_PMIIR
));
678 seq_printf(m
, "PM IMR:\t\t%08x\n",
679 I915_READ(GEN6_PMIMR
));
681 seq_printf(m
, "Port hotplug:\t%08x\n",
682 I915_READ(PORT_HOTPLUG_EN
));
683 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
684 I915_READ(VLV_DPFLIPSTAT
));
685 seq_printf(m
, "DPINVGTT:\t%08x\n",
686 I915_READ(DPINVGTT
));
688 } else if (!HAS_PCH_SPLIT(dev
)) {
689 seq_printf(m
, "Interrupt enable: %08x\n",
691 seq_printf(m
, "Interrupt identity: %08x\n",
693 seq_printf(m
, "Interrupt mask: %08x\n",
696 seq_printf(m
, "Pipe %c stat: %08x\n",
698 I915_READ(PIPESTAT(pipe
)));
700 seq_printf(m
, "North Display Interrupt enable: %08x\n",
702 seq_printf(m
, "North Display Interrupt identity: %08x\n",
704 seq_printf(m
, "North Display Interrupt mask: %08x\n",
706 seq_printf(m
, "South Display Interrupt enable: %08x\n",
708 seq_printf(m
, "South Display Interrupt identity: %08x\n",
710 seq_printf(m
, "South Display Interrupt mask: %08x\n",
712 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
714 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
716 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
719 for_each_ring(ring
, dev_priv
, i
) {
720 if (INTEL_INFO(dev
)->gen
>= 6) {
722 "Graphics Interrupt mask (%s): %08x\n",
723 ring
->name
, I915_READ_IMR(ring
));
725 i915_ring_seqno_info(m
, ring
);
727 intel_runtime_pm_put(dev_priv
);
728 mutex_unlock(&dev
->struct_mutex
);
733 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
735 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
736 struct drm_device
*dev
= node
->minor
->dev
;
737 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
740 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
744 seq_printf(m
, "Reserved fences = %d\n", dev_priv
->fence_reg_start
);
745 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
746 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
747 struct drm_i915_gem_object
*obj
= dev_priv
->fence_regs
[i
].obj
;
749 seq_printf(m
, "Fence %d, pin count = %d, object = ",
750 i
, dev_priv
->fence_regs
[i
].pin_count
);
752 seq_puts(m
, "unused");
754 describe_obj(m
, obj
);
758 mutex_unlock(&dev
->struct_mutex
);
762 static int i915_hws_info(struct seq_file
*m
, void *data
)
764 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
765 struct drm_device
*dev
= node
->minor
->dev
;
766 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
767 struct intel_ring_buffer
*ring
;
771 ring
= &dev_priv
->ring
[(uintptr_t)node
->info_ent
->data
];
772 hws
= ring
->status_page
.page_addr
;
776 for (i
= 0; i
< 4096 / sizeof(u32
) / 4; i
+= 4) {
777 seq_printf(m
, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
779 hws
[i
], hws
[i
+ 1], hws
[i
+ 2], hws
[i
+ 3]);
785 i915_error_state_write(struct file
*filp
,
786 const char __user
*ubuf
,
790 struct i915_error_state_file_priv
*error_priv
= filp
->private_data
;
791 struct drm_device
*dev
= error_priv
->dev
;
794 DRM_DEBUG_DRIVER("Resetting error state\n");
796 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
800 i915_destroy_error_state(dev
);
801 mutex_unlock(&dev
->struct_mutex
);
806 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
808 struct drm_device
*dev
= inode
->i_private
;
809 struct i915_error_state_file_priv
*error_priv
;
811 error_priv
= kzalloc(sizeof(*error_priv
), GFP_KERNEL
);
815 error_priv
->dev
= dev
;
817 i915_error_state_get(dev
, error_priv
);
819 file
->private_data
= error_priv
;
824 static int i915_error_state_release(struct inode
*inode
, struct file
*file
)
826 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
828 i915_error_state_put(error_priv
);
834 static ssize_t
i915_error_state_read(struct file
*file
, char __user
*userbuf
,
835 size_t count
, loff_t
*pos
)
837 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
838 struct drm_i915_error_state_buf error_str
;
840 ssize_t ret_count
= 0;
843 ret
= i915_error_state_buf_init(&error_str
, count
, *pos
);
847 ret
= i915_error_state_to_str(&error_str
, error_priv
);
851 ret_count
= simple_read_from_buffer(userbuf
, count
, &tmp_pos
,
858 *pos
= error_str
.start
+ ret_count
;
860 i915_error_state_buf_release(&error_str
);
861 return ret
?: ret_count
;
864 static const struct file_operations i915_error_state_fops
= {
865 .owner
= THIS_MODULE
,
866 .open
= i915_error_state_open
,
867 .read
= i915_error_state_read
,
868 .write
= i915_error_state_write
,
869 .llseek
= default_llseek
,
870 .release
= i915_error_state_release
,
874 i915_next_seqno_get(void *data
, u64
*val
)
876 struct drm_device
*dev
= data
;
877 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
880 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
884 *val
= dev_priv
->next_seqno
;
885 mutex_unlock(&dev
->struct_mutex
);
891 i915_next_seqno_set(void *data
, u64 val
)
893 struct drm_device
*dev
= data
;
896 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
900 ret
= i915_gem_set_seqno(dev
, val
);
901 mutex_unlock(&dev
->struct_mutex
);
906 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
907 i915_next_seqno_get
, i915_next_seqno_set
,
910 static int i915_rstdby_delays(struct seq_file
*m
, void *unused
)
912 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
913 struct drm_device
*dev
= node
->minor
->dev
;
914 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
918 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
921 intel_runtime_pm_get(dev_priv
);
923 crstanddelay
= I915_READ16(CRSTANDVID
);
925 intel_runtime_pm_put(dev_priv
);
926 mutex_unlock(&dev
->struct_mutex
);
928 seq_printf(m
, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay
>> 8) & 0x3f, (crstanddelay
& 0x3f));
933 static int i915_cur_delayinfo(struct seq_file
*m
, void *unused
)
935 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
936 struct drm_device
*dev
= node
->minor
->dev
;
937 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
940 intel_runtime_pm_get(dev_priv
);
942 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
945 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
946 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
948 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
949 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
950 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
952 seq_printf(m
, "Current P-state: %d\n",
953 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
954 } else if ((IS_GEN6(dev
) || IS_GEN7(dev
)) && !IS_VALLEYVIEW(dev
)) {
955 u32 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
956 u32 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
957 u32 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
958 u32 rpstat
, cagf
, reqf
;
959 u32 rpupei
, rpcurup
, rpprevup
;
960 u32 rpdownei
, rpcurdown
, rpprevdown
;
963 /* RPSTAT1 is in the GT power well */
964 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
968 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
970 reqf
= I915_READ(GEN6_RPNSWREQ
);
971 reqf
&= ~GEN6_TURBO_DISABLE
;
976 reqf
*= GT_FREQUENCY_MULTIPLIER
;
978 rpstat
= I915_READ(GEN6_RPSTAT1
);
979 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
);
980 rpcurup
= I915_READ(GEN6_RP_CUR_UP
);
981 rpprevup
= I915_READ(GEN6_RP_PREV_UP
);
982 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
);
983 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
);
984 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
);
986 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
988 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
989 cagf
*= GT_FREQUENCY_MULTIPLIER
;
991 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
992 mutex_unlock(&dev
->struct_mutex
);
994 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
995 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
996 seq_printf(m
, "Render p-state ratio: %d\n",
997 (gt_perf_status
& 0xff00) >> 8);
998 seq_printf(m
, "Render p-state VID: %d\n",
999 gt_perf_status
& 0xff);
1000 seq_printf(m
, "Render p-state limit: %d\n",
1001 rp_state_limits
& 0xff);
1002 seq_printf(m
, "RPNSWREQ: %dMHz\n", reqf
);
1003 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
1004 seq_printf(m
, "RP CUR UP EI: %dus\n", rpupei
&
1005 GEN6_CURICONT_MASK
);
1006 seq_printf(m
, "RP CUR UP: %dus\n", rpcurup
&
1007 GEN6_CURBSYTAVG_MASK
);
1008 seq_printf(m
, "RP PREV UP: %dus\n", rpprevup
&
1009 GEN6_CURBSYTAVG_MASK
);
1010 seq_printf(m
, "RP CUR DOWN EI: %dus\n", rpdownei
&
1012 seq_printf(m
, "RP CUR DOWN: %dus\n", rpcurdown
&
1013 GEN6_CURBSYTAVG_MASK
);
1014 seq_printf(m
, "RP PREV DOWN: %dus\n", rpprevdown
&
1015 GEN6_CURBSYTAVG_MASK
);
1017 max_freq
= (rp_state_cap
& 0xff0000) >> 16;
1018 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
1019 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1021 max_freq
= (rp_state_cap
& 0xff00) >> 8;
1022 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
1023 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1025 max_freq
= rp_state_cap
& 0xff;
1026 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
1027 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1029 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
1030 dev_priv
->rps
.hw_max
* GT_FREQUENCY_MULTIPLIER
);
1031 } else if (IS_VALLEYVIEW(dev
)) {
1034 mutex_lock(&dev_priv
->rps
.hw_lock
);
1035 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
1036 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
1037 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
1039 val
= valleyview_rps_max_freq(dev_priv
);
1040 seq_printf(m
, "max GPU freq: %d MHz\n",
1041 vlv_gpu_freq(dev_priv
, val
));
1043 val
= valleyview_rps_min_freq(dev_priv
);
1044 seq_printf(m
, "min GPU freq: %d MHz\n",
1045 vlv_gpu_freq(dev_priv
, val
));
1047 seq_printf(m
, "current GPU freq: %d MHz\n",
1048 vlv_gpu_freq(dev_priv
, (freq_sts
>> 8) & 0xff));
1049 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1051 seq_puts(m
, "no P-state info available\n");
1055 intel_runtime_pm_put(dev_priv
);
1059 static int i915_delayfreq_table(struct seq_file
*m
, void *unused
)
1061 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1062 struct drm_device
*dev
= node
->minor
->dev
;
1063 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1067 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1070 intel_runtime_pm_get(dev_priv
);
1072 for (i
= 0; i
< 16; i
++) {
1073 delayfreq
= I915_READ(PXVFREQ_BASE
+ i
* 4);
1074 seq_printf(m
, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i
, delayfreq
,
1075 (delayfreq
& PXVFREQ_PX_MASK
) >> PXVFREQ_PX_SHIFT
);
1078 intel_runtime_pm_put(dev_priv
);
1080 mutex_unlock(&dev
->struct_mutex
);
1085 static inline int MAP_TO_MV(int map
)
1087 return 1250 - (map
* 25);
1090 static int i915_inttoext_table(struct seq_file
*m
, void *unused
)
1092 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1093 struct drm_device
*dev
= node
->minor
->dev
;
1094 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1098 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1101 intel_runtime_pm_get(dev_priv
);
1103 for (i
= 1; i
<= 32; i
++) {
1104 inttoext
= I915_READ(INTTOEXT_BASE_ILK
+ i
* 4);
1105 seq_printf(m
, "INTTOEXT%02d: 0x%08x\n", i
, inttoext
);
1108 intel_runtime_pm_put(dev_priv
);
1109 mutex_unlock(&dev
->struct_mutex
);
1114 static int ironlake_drpc_info(struct seq_file
*m
)
1116 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1117 struct drm_device
*dev
= node
->minor
->dev
;
1118 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1119 u32 rgvmodectl
, rstdbyctl
;
1123 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1126 intel_runtime_pm_get(dev_priv
);
1128 rgvmodectl
= I915_READ(MEMMODECTL
);
1129 rstdbyctl
= I915_READ(RSTDBYCTL
);
1130 crstandvid
= I915_READ16(CRSTANDVID
);
1132 intel_runtime_pm_put(dev_priv
);
1133 mutex_unlock(&dev
->struct_mutex
);
1135 seq_printf(m
, "HD boost: %s\n", (rgvmodectl
& MEMMODE_BOOST_EN
) ?
1137 seq_printf(m
, "Boost freq: %d\n",
1138 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
1139 MEMMODE_BOOST_FREQ_SHIFT
);
1140 seq_printf(m
, "HW control enabled: %s\n",
1141 rgvmodectl
& MEMMODE_HWIDLE_EN
? "yes" : "no");
1142 seq_printf(m
, "SW control enabled: %s\n",
1143 rgvmodectl
& MEMMODE_SWMODE_EN
? "yes" : "no");
1144 seq_printf(m
, "Gated voltage change: %s\n",
1145 rgvmodectl
& MEMMODE_RCLK_GATE
? "yes" : "no");
1146 seq_printf(m
, "Starting frequency: P%d\n",
1147 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
1148 seq_printf(m
, "Max P-state: P%d\n",
1149 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
1150 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
1151 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
1152 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
1153 seq_printf(m
, "Render standby enabled: %s\n",
1154 (rstdbyctl
& RCX_SW_EXIT
) ? "no" : "yes");
1155 seq_puts(m
, "Current RS state: ");
1156 switch (rstdbyctl
& RSX_STATUS_MASK
) {
1158 seq_puts(m
, "on\n");
1160 case RSX_STATUS_RC1
:
1161 seq_puts(m
, "RC1\n");
1163 case RSX_STATUS_RC1E
:
1164 seq_puts(m
, "RC1E\n");
1166 case RSX_STATUS_RS1
:
1167 seq_puts(m
, "RS1\n");
1169 case RSX_STATUS_RS2
:
1170 seq_puts(m
, "RS2 (RC6)\n");
1172 case RSX_STATUS_RS3
:
1173 seq_puts(m
, "RC3 (RC6+)\n");
1176 seq_puts(m
, "unknown\n");
1183 static int vlv_drpc_info(struct seq_file
*m
)
1186 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1187 struct drm_device
*dev
= node
->minor
->dev
;
1188 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1189 u32 rpmodectl1
, rcctl1
;
1190 unsigned fw_rendercount
= 0, fw_mediacount
= 0;
1192 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1193 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1195 seq_printf(m
, "Video Turbo Mode: %s\n",
1196 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1197 seq_printf(m
, "Turbo enabled: %s\n",
1198 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1199 seq_printf(m
, "HW control enabled: %s\n",
1200 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1201 seq_printf(m
, "SW control enabled: %s\n",
1202 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1203 GEN6_RP_MEDIA_SW_MODE
));
1204 seq_printf(m
, "RC6 Enabled: %s\n",
1205 yesno(rcctl1
& (GEN7_RC_CTL_TO_MODE
|
1206 GEN6_RC_CTL_EI_MODE(1))));
1207 seq_printf(m
, "Render Power Well: %s\n",
1208 (I915_READ(VLV_GTLC_PW_STATUS
) &
1209 VLV_GTLC_PW_RENDER_STATUS_MASK
) ? "Up" : "Down");
1210 seq_printf(m
, "Media Power Well: %s\n",
1211 (I915_READ(VLV_GTLC_PW_STATUS
) &
1212 VLV_GTLC_PW_MEDIA_STATUS_MASK
) ? "Up" : "Down");
1214 spin_lock_irq(&dev_priv
->uncore
.lock
);
1215 fw_rendercount
= dev_priv
->uncore
.fw_rendercount
;
1216 fw_mediacount
= dev_priv
->uncore
.fw_mediacount
;
1217 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1219 seq_printf(m
, "Forcewake Render Count = %u\n", fw_rendercount
);
1220 seq_printf(m
, "Forcewake Media Count = %u\n", fw_mediacount
);
1227 static int gen6_drpc_info(struct seq_file
*m
)
1230 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1231 struct drm_device
*dev
= node
->minor
->dev
;
1232 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1233 u32 rpmodectl1
, gt_core_status
, rcctl1
, rc6vids
= 0;
1234 unsigned forcewake_count
;
1237 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1240 intel_runtime_pm_get(dev_priv
);
1242 spin_lock_irq(&dev_priv
->uncore
.lock
);
1243 forcewake_count
= dev_priv
->uncore
.forcewake_count
;
1244 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1246 if (forcewake_count
) {
1247 seq_puts(m
, "RC information inaccurate because somebody "
1248 "holds a forcewake reference \n");
1250 /* NB: we cannot use forcewake, else we read the wrong values */
1251 while (count
++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK
) & 1))
1253 seq_printf(m
, "RC information accurate: %s\n", yesno(count
< 51));
1256 gt_core_status
= readl(dev_priv
->regs
+ GEN6_GT_CORE_STATUS
);
1257 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4, true);
1259 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1260 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1261 mutex_unlock(&dev
->struct_mutex
);
1262 mutex_lock(&dev_priv
->rps
.hw_lock
);
1263 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
1264 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1266 intel_runtime_pm_put(dev_priv
);
1268 seq_printf(m
, "Video Turbo Mode: %s\n",
1269 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1270 seq_printf(m
, "HW control enabled: %s\n",
1271 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1272 seq_printf(m
, "SW control enabled: %s\n",
1273 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1274 GEN6_RP_MEDIA_SW_MODE
));
1275 seq_printf(m
, "RC1e Enabled: %s\n",
1276 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1277 seq_printf(m
, "RC6 Enabled: %s\n",
1278 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1279 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1280 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1281 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1282 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1283 seq_puts(m
, "Current RC state: ");
1284 switch (gt_core_status
& GEN6_RCn_MASK
) {
1286 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1287 seq_puts(m
, "Core Power Down\n");
1289 seq_puts(m
, "on\n");
1292 seq_puts(m
, "RC3\n");
1295 seq_puts(m
, "RC6\n");
1298 seq_puts(m
, "RC7\n");
1301 seq_puts(m
, "Unknown\n");
1305 seq_printf(m
, "Core Power Down: %s\n",
1306 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1308 /* Not exactly sure what this is */
1309 seq_printf(m
, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1310 I915_READ(GEN6_GT_GFX_RC6_LOCKED
));
1311 seq_printf(m
, "RC6 residency since boot: %u\n",
1312 I915_READ(GEN6_GT_GFX_RC6
));
1313 seq_printf(m
, "RC6+ residency since boot: %u\n",
1314 I915_READ(GEN6_GT_GFX_RC6p
));
1315 seq_printf(m
, "RC6++ residency since boot: %u\n",
1316 I915_READ(GEN6_GT_GFX_RC6pp
));
1318 seq_printf(m
, "RC6 voltage: %dmV\n",
1319 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1320 seq_printf(m
, "RC6+ voltage: %dmV\n",
1321 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1322 seq_printf(m
, "RC6++ voltage: %dmV\n",
1323 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1327 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1329 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1330 struct drm_device
*dev
= node
->minor
->dev
;
1332 if (IS_VALLEYVIEW(dev
))
1333 return vlv_drpc_info(m
);
1334 else if (IS_GEN6(dev
) || IS_GEN7(dev
))
1335 return gen6_drpc_info(m
);
1337 return ironlake_drpc_info(m
);
1340 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1342 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1343 struct drm_device
*dev
= node
->minor
->dev
;
1344 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1346 if (!HAS_FBC(dev
)) {
1347 seq_puts(m
, "FBC unsupported on this chipset\n");
1351 if (intel_fbc_enabled(dev
)) {
1352 seq_puts(m
, "FBC enabled\n");
1354 seq_puts(m
, "FBC disabled: ");
1355 switch (dev_priv
->fbc
.no_fbc_reason
) {
1357 seq_puts(m
, "FBC actived, but currently disabled in hardware");
1359 case FBC_UNSUPPORTED
:
1360 seq_puts(m
, "unsupported by this chipset");
1363 seq_puts(m
, "no outputs");
1365 case FBC_STOLEN_TOO_SMALL
:
1366 seq_puts(m
, "not enough stolen memory");
1368 case FBC_UNSUPPORTED_MODE
:
1369 seq_puts(m
, "mode not supported");
1371 case FBC_MODE_TOO_LARGE
:
1372 seq_puts(m
, "mode too large");
1375 seq_puts(m
, "FBC unsupported on plane");
1378 seq_puts(m
, "scanout buffer not tiled");
1380 case FBC_MULTIPLE_PIPES
:
1381 seq_puts(m
, "multiple pipes are enabled");
1383 case FBC_MODULE_PARAM
:
1384 seq_puts(m
, "disabled per module param (default off)");
1386 case FBC_CHIP_DEFAULT
:
1387 seq_puts(m
, "disabled per chip default");
1390 seq_puts(m
, "unknown reason");
1397 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1399 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1400 struct drm_device
*dev
= node
->minor
->dev
;
1401 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1403 if (!HAS_IPS(dev
)) {
1404 seq_puts(m
, "not supported\n");
1408 if (IS_BROADWELL(dev
) || I915_READ(IPS_CTL
) & IPS_ENABLE
)
1409 seq_puts(m
, "enabled\n");
1411 seq_puts(m
, "disabled\n");
1416 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1418 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1419 struct drm_device
*dev
= node
->minor
->dev
;
1420 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1421 bool sr_enabled
= false;
1423 if (HAS_PCH_SPLIT(dev
))
1424 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1425 else if (IS_CRESTLINE(dev
) || IS_I945G(dev
) || IS_I945GM(dev
))
1426 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1427 else if (IS_I915GM(dev
))
1428 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1429 else if (IS_PINEVIEW(dev
))
1430 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1432 seq_printf(m
, "self-refresh: %s\n",
1433 sr_enabled
? "enabled" : "disabled");
1438 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1440 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1441 struct drm_device
*dev
= node
->minor
->dev
;
1442 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1443 unsigned long temp
, chipset
, gfx
;
1449 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1453 temp
= i915_mch_val(dev_priv
);
1454 chipset
= i915_chipset_val(dev_priv
);
1455 gfx
= i915_gfx_val(dev_priv
);
1456 mutex_unlock(&dev
->struct_mutex
);
1458 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1459 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1460 seq_printf(m
, "GFX power: %ld\n", gfx
);
1461 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1466 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1468 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1469 struct drm_device
*dev
= node
->minor
->dev
;
1470 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1472 int gpu_freq
, ia_freq
;
1474 if (!(IS_GEN6(dev
) || IS_GEN7(dev
))) {
1475 seq_puts(m
, "unsupported on this chipset\n");
1479 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
1481 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1484 intel_runtime_pm_get(dev_priv
);
1486 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1488 for (gpu_freq
= dev_priv
->rps
.min_delay
;
1489 gpu_freq
<= dev_priv
->rps
.max_delay
;
1492 sandybridge_pcode_read(dev_priv
,
1493 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1495 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1496 gpu_freq
* GT_FREQUENCY_MULTIPLIER
,
1497 ((ia_freq
>> 0) & 0xff) * 100,
1498 ((ia_freq
>> 8) & 0xff) * 100);
1501 intel_runtime_pm_put(dev_priv
);
1502 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1507 static int i915_gfxec(struct seq_file
*m
, void *unused
)
1509 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1510 struct drm_device
*dev
= node
->minor
->dev
;
1511 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1514 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1517 intel_runtime_pm_get(dev_priv
);
1519 seq_printf(m
, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1520 intel_runtime_pm_put(dev_priv
);
1522 mutex_unlock(&dev
->struct_mutex
);
1527 static int i915_opregion(struct seq_file
*m
, void *unused
)
1529 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1530 struct drm_device
*dev
= node
->minor
->dev
;
1531 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1532 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1533 void *data
= kmalloc(OPREGION_SIZE
, GFP_KERNEL
);
1539 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1543 if (opregion
->header
) {
1544 memcpy_fromio(data
, opregion
->header
, OPREGION_SIZE
);
1545 seq_write(m
, data
, OPREGION_SIZE
);
1548 mutex_unlock(&dev
->struct_mutex
);
1555 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1557 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1558 struct drm_device
*dev
= node
->minor
->dev
;
1559 struct intel_fbdev
*ifbdev
= NULL
;
1560 struct intel_framebuffer
*fb
;
1562 #ifdef CONFIG_DRM_I915_FBDEV
1563 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1564 int ret
= mutex_lock_interruptible(&dev
->mode_config
.mutex
);
1568 ifbdev
= dev_priv
->fbdev
;
1569 fb
= to_intel_framebuffer(ifbdev
->helper
.fb
);
1571 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1575 fb
->base
.bits_per_pixel
,
1576 atomic_read(&fb
->base
.refcount
.refcount
));
1577 describe_obj(m
, fb
->obj
);
1579 mutex_unlock(&dev
->mode_config
.mutex
);
1582 mutex_lock(&dev
->mode_config
.fb_lock
);
1583 list_for_each_entry(fb
, &dev
->mode_config
.fb_list
, base
.head
) {
1584 if (ifbdev
&& &fb
->base
== ifbdev
->helper
.fb
)
1587 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1591 fb
->base
.bits_per_pixel
,
1592 atomic_read(&fb
->base
.refcount
.refcount
));
1593 describe_obj(m
, fb
->obj
);
1596 mutex_unlock(&dev
->mode_config
.fb_lock
);
1601 static int i915_context_status(struct seq_file
*m
, void *unused
)
1603 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1604 struct drm_device
*dev
= node
->minor
->dev
;
1605 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1606 struct intel_ring_buffer
*ring
;
1607 struct i915_hw_context
*ctx
;
1610 ret
= mutex_lock_interruptible(&dev
->mode_config
.mutex
);
1614 if (dev_priv
->ips
.pwrctx
) {
1615 seq_puts(m
, "power context ");
1616 describe_obj(m
, dev_priv
->ips
.pwrctx
);
1620 if (dev_priv
->ips
.renderctx
) {
1621 seq_puts(m
, "render context ");
1622 describe_obj(m
, dev_priv
->ips
.renderctx
);
1626 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
) {
1627 seq_puts(m
, "HW context ");
1628 describe_ctx(m
, ctx
);
1629 for_each_ring(ring
, dev_priv
, i
)
1630 if (ring
->default_context
== ctx
)
1631 seq_printf(m
, "(default context %s) ", ring
->name
);
1633 describe_obj(m
, ctx
->obj
);
1637 mutex_unlock(&dev
->mode_config
.mutex
);
1642 static int i915_gen6_forcewake_count_info(struct seq_file
*m
, void *data
)
1644 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1645 struct drm_device
*dev
= node
->minor
->dev
;
1646 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1647 unsigned forcewake_count
= 0, fw_rendercount
= 0, fw_mediacount
= 0;
1649 spin_lock_irq(&dev_priv
->uncore
.lock
);
1650 if (IS_VALLEYVIEW(dev
)) {
1651 fw_rendercount
= dev_priv
->uncore
.fw_rendercount
;
1652 fw_mediacount
= dev_priv
->uncore
.fw_mediacount
;
1654 forcewake_count
= dev_priv
->uncore
.forcewake_count
;
1655 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1657 if (IS_VALLEYVIEW(dev
)) {
1658 seq_printf(m
, "fw_rendercount = %u\n", fw_rendercount
);
1659 seq_printf(m
, "fw_mediacount = %u\n", fw_mediacount
);
1661 seq_printf(m
, "forcewake count = %u\n", forcewake_count
);
1666 static const char *swizzle_string(unsigned swizzle
)
1669 case I915_BIT_6_SWIZZLE_NONE
:
1671 case I915_BIT_6_SWIZZLE_9
:
1673 case I915_BIT_6_SWIZZLE_9_10
:
1674 return "bit9/bit10";
1675 case I915_BIT_6_SWIZZLE_9_11
:
1676 return "bit9/bit11";
1677 case I915_BIT_6_SWIZZLE_9_10_11
:
1678 return "bit9/bit10/bit11";
1679 case I915_BIT_6_SWIZZLE_9_17
:
1680 return "bit9/bit17";
1681 case I915_BIT_6_SWIZZLE_9_10_17
:
1682 return "bit9/bit10/bit17";
1683 case I915_BIT_6_SWIZZLE_UNKNOWN
:
1690 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
1692 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1693 struct drm_device
*dev
= node
->minor
->dev
;
1694 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1697 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1700 intel_runtime_pm_get(dev_priv
);
1702 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
1703 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
1704 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
1705 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
1707 if (IS_GEN3(dev
) || IS_GEN4(dev
)) {
1708 seq_printf(m
, "DDC = 0x%08x\n",
1710 seq_printf(m
, "C0DRB3 = 0x%04x\n",
1711 I915_READ16(C0DRB3
));
1712 seq_printf(m
, "C1DRB3 = 0x%04x\n",
1713 I915_READ16(C1DRB3
));
1714 } else if (INTEL_INFO(dev
)->gen
>= 6) {
1715 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
1716 I915_READ(MAD_DIMM_C0
));
1717 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
1718 I915_READ(MAD_DIMM_C1
));
1719 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
1720 I915_READ(MAD_DIMM_C2
));
1721 seq_printf(m
, "TILECTL = 0x%08x\n",
1722 I915_READ(TILECTL
));
1724 seq_printf(m
, "GAMTARBMODE = 0x%08x\n",
1725 I915_READ(GAMTARBMODE
));
1727 seq_printf(m
, "ARB_MODE = 0x%08x\n",
1728 I915_READ(ARB_MODE
));
1729 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
1730 I915_READ(DISP_ARB_CTL
));
1732 intel_runtime_pm_put(dev_priv
);
1733 mutex_unlock(&dev
->struct_mutex
);
1738 static int per_file_ctx(int id
, void *ptr
, void *data
)
1740 struct i915_hw_context
*ctx
= ptr
;
1741 struct seq_file
*m
= data
;
1742 struct i915_hw_ppgtt
*ppgtt
= ctx_to_ppgtt(ctx
);
1744 ppgtt
->debug_dump(ppgtt
, m
);
1749 static void gen8_ppgtt_info(struct seq_file
*m
, struct drm_device
*dev
)
1751 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1752 struct intel_ring_buffer
*ring
;
1753 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
1759 seq_printf(m
, "Page directories: %d\n", ppgtt
->num_pd_pages
);
1760 seq_printf(m
, "Page tables: %d\n", ppgtt
->num_pt_pages
);
1761 for_each_ring(ring
, dev_priv
, unused
) {
1762 seq_printf(m
, "%s\n", ring
->name
);
1763 for (i
= 0; i
< 4; i
++) {
1764 u32 offset
= 0x270 + i
* 8;
1765 u64 pdp
= I915_READ(ring
->mmio_base
+ offset
+ 4);
1767 pdp
|= I915_READ(ring
->mmio_base
+ offset
);
1768 for (i
= 0; i
< 4; i
++)
1769 seq_printf(m
, "\tPDP%d 0x%016llx\n", i
, pdp
);
1774 static void gen6_ppgtt_info(struct seq_file
*m
, struct drm_device
*dev
)
1776 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1777 struct intel_ring_buffer
*ring
;
1778 struct drm_file
*file
;
1781 if (INTEL_INFO(dev
)->gen
== 6)
1782 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
1784 for_each_ring(ring
, dev_priv
, i
) {
1785 seq_printf(m
, "%s\n", ring
->name
);
1786 if (INTEL_INFO(dev
)->gen
== 7)
1787 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring
)));
1788 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring
)));
1789 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring
)));
1790 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring
)));
1792 if (dev_priv
->mm
.aliasing_ppgtt
) {
1793 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
1795 seq_puts(m
, "aliasing PPGTT:\n");
1796 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd_offset
);
1798 ppgtt
->debug_dump(ppgtt
, m
);
1802 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
1803 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
1804 struct i915_hw_ppgtt
*pvt_ppgtt
;
1806 pvt_ppgtt
= ctx_to_ppgtt(file_priv
->private_default_ctx
);
1807 seq_printf(m
, "proc: %s\n",
1808 get_pid_task(file
->pid
, PIDTYPE_PID
)->comm
);
1809 seq_puts(m
, " default context:\n");
1810 idr_for_each(&file_priv
->context_idr
, per_file_ctx
, m
);
1812 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
1815 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
1817 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1818 struct drm_device
*dev
= node
->minor
->dev
;
1819 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1821 int ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1824 intel_runtime_pm_get(dev_priv
);
1826 if (INTEL_INFO(dev
)->gen
>= 8)
1827 gen8_ppgtt_info(m
, dev
);
1828 else if (INTEL_INFO(dev
)->gen
>= 6)
1829 gen6_ppgtt_info(m
, dev
);
1831 intel_runtime_pm_put(dev_priv
);
1832 mutex_unlock(&dev
->struct_mutex
);
1837 static int i915_dpio_info(struct seq_file
*m
, void *data
)
1839 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1840 struct drm_device
*dev
= node
->minor
->dev
;
1841 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1845 if (!IS_VALLEYVIEW(dev
)) {
1846 seq_puts(m
, "unsupported\n");
1850 ret
= mutex_lock_interruptible(&dev_priv
->dpio_lock
);
1854 seq_printf(m
, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL
));
1856 seq_printf(m
, "DPIO PLL DW3 CH0 : 0x%08x\n",
1857 vlv_dpio_read(dev_priv
, PIPE_A
, VLV_PLL_DW3(0)));
1858 seq_printf(m
, "DPIO PLL DW3 CH1: 0x%08x\n",
1859 vlv_dpio_read(dev_priv
, PIPE_A
, VLV_PLL_DW3(1)));
1861 seq_printf(m
, "DPIO PLL DW5 CH0: 0x%08x\n",
1862 vlv_dpio_read(dev_priv
, PIPE_A
, VLV_PLL_DW5(0)));
1863 seq_printf(m
, "DPIO PLL DW5 CH1: 0x%08x\n",
1864 vlv_dpio_read(dev_priv
, PIPE_A
, VLV_PLL_DW5(1)));
1866 seq_printf(m
, "DPIO PLL DW7 CH0: 0x%08x\n",
1867 vlv_dpio_read(dev_priv
, PIPE_A
, VLV_PLL_DW7(0)));
1868 seq_printf(m
, "DPIO PLL DW7 CH1: 0x%08x\n",
1869 vlv_dpio_read(dev_priv
, PIPE_A
, VLV_PLL_DW7(1)));
1871 seq_printf(m
, "DPIO PLL DW10 CH0: 0x%08x\n",
1872 vlv_dpio_read(dev_priv
, PIPE_A
, VLV_PLL_DW10(0)));
1873 seq_printf(m
, "DPIO PLL DW10 CH1: 0x%08x\n",
1874 vlv_dpio_read(dev_priv
, PIPE_A
, VLV_PLL_DW10(1)));
1876 seq_printf(m
, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1877 vlv_dpio_read(dev_priv
, PIPE_A
, VLV_CMN_DW0
));
1879 mutex_unlock(&dev_priv
->dpio_lock
);
1884 static int i915_llc(struct seq_file
*m
, void *data
)
1886 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1887 struct drm_device
*dev
= node
->minor
->dev
;
1888 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1890 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1891 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev
)));
1892 seq_printf(m
, "eLLC: %zuMB\n", dev_priv
->ellc_size
);
1897 static int i915_edp_psr_status(struct seq_file
*m
, void *data
)
1899 struct drm_info_node
*node
= m
->private;
1900 struct drm_device
*dev
= node
->minor
->dev
;
1901 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1903 bool enabled
= false;
1905 intel_runtime_pm_get(dev_priv
);
1907 seq_printf(m
, "Sink_Support: %s\n", yesno(dev_priv
->psr
.sink_support
));
1908 seq_printf(m
, "Source_OK: %s\n", yesno(dev_priv
->psr
.source_ok
));
1910 enabled
= HAS_PSR(dev
) &&
1911 I915_READ(EDP_PSR_CTL(dev
)) & EDP_PSR_ENABLE
;
1912 seq_printf(m
, "Enabled: %s\n", yesno(enabled
));
1915 psrperf
= I915_READ(EDP_PSR_PERF_CNT(dev
)) &
1916 EDP_PSR_PERF_CNT_MASK
;
1917 seq_printf(m
, "Performance_Counter: %u\n", psrperf
);
1919 intel_runtime_pm_put(dev_priv
);
1923 static int i915_energy_uJ(struct seq_file
*m
, void *data
)
1925 struct drm_info_node
*node
= m
->private;
1926 struct drm_device
*dev
= node
->minor
->dev
;
1927 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1931 if (INTEL_INFO(dev
)->gen
< 6)
1934 rdmsrl(MSR_RAPL_POWER_UNIT
, power
);
1935 power
= (power
& 0x1f00) >> 8;
1936 units
= 1000000 / (1 << power
); /* convert to uJ */
1937 power
= I915_READ(MCH_SECP_NRG_STTS
);
1940 seq_printf(m
, "%llu", (long long unsigned)power
);
1945 static int i915_pc8_status(struct seq_file
*m
, void *unused
)
1947 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1948 struct drm_device
*dev
= node
->minor
->dev
;
1949 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1951 if (!IS_HASWELL(dev
)) {
1952 seq_puts(m
, "not supported\n");
1956 mutex_lock(&dev_priv
->pc8
.lock
);
1957 seq_printf(m
, "Requirements met: %s\n",
1958 yesno(dev_priv
->pc8
.requirements_met
));
1959 seq_printf(m
, "GPU idle: %s\n", yesno(dev_priv
->pc8
.gpu_idle
));
1960 seq_printf(m
, "Disable count: %d\n", dev_priv
->pc8
.disable_count
);
1961 seq_printf(m
, "IRQs disabled: %s\n",
1962 yesno(dev_priv
->pc8
.irqs_disabled
));
1963 seq_printf(m
, "Enabled: %s\n", yesno(dev_priv
->pc8
.enabled
));
1964 mutex_unlock(&dev_priv
->pc8
.lock
);
1969 static const char *power_domain_str(enum intel_display_power_domain domain
)
1972 case POWER_DOMAIN_PIPE_A
:
1974 case POWER_DOMAIN_PIPE_B
:
1976 case POWER_DOMAIN_PIPE_C
:
1978 case POWER_DOMAIN_PIPE_A_PANEL_FITTER
:
1979 return "PIPE_A_PANEL_FITTER";
1980 case POWER_DOMAIN_PIPE_B_PANEL_FITTER
:
1981 return "PIPE_B_PANEL_FITTER";
1982 case POWER_DOMAIN_PIPE_C_PANEL_FITTER
:
1983 return "PIPE_C_PANEL_FITTER";
1984 case POWER_DOMAIN_TRANSCODER_A
:
1985 return "TRANSCODER_A";
1986 case POWER_DOMAIN_TRANSCODER_B
:
1987 return "TRANSCODER_B";
1988 case POWER_DOMAIN_TRANSCODER_C
:
1989 return "TRANSCODER_C";
1990 case POWER_DOMAIN_TRANSCODER_EDP
:
1991 return "TRANSCODER_EDP";
1992 case POWER_DOMAIN_VGA
:
1994 case POWER_DOMAIN_AUDIO
:
1996 case POWER_DOMAIN_INIT
:
2004 static int i915_power_domain_info(struct seq_file
*m
, void *unused
)
2006 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
2007 struct drm_device
*dev
= node
->minor
->dev
;
2008 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2009 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
2012 mutex_lock(&power_domains
->lock
);
2014 seq_printf(m
, "%-25s %s\n", "Power well/domain", "Use count");
2015 for (i
= 0; i
< power_domains
->power_well_count
; i
++) {
2016 struct i915_power_well
*power_well
;
2017 enum intel_display_power_domain power_domain
;
2019 power_well
= &power_domains
->power_wells
[i
];
2020 seq_printf(m
, "%-25s %d\n", power_well
->name
,
2023 for (power_domain
= 0; power_domain
< POWER_DOMAIN_NUM
;
2025 if (!(BIT(power_domain
) & power_well
->domains
))
2028 seq_printf(m
, " %-23s %d\n",
2029 power_domain_str(power_domain
),
2030 power_domains
->domain_use_count
[power_domain
]);
2034 mutex_unlock(&power_domains
->lock
);
2039 struct pipe_crc_info
{
2041 struct drm_device
*dev
;
2045 static int i915_pipe_crc_open(struct inode
*inode
, struct file
*filep
)
2047 struct pipe_crc_info
*info
= inode
->i_private
;
2048 struct drm_i915_private
*dev_priv
= info
->dev
->dev_private
;
2049 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
2051 if (info
->pipe
>= INTEL_INFO(info
->dev
)->num_pipes
)
2054 spin_lock_irq(&pipe_crc
->lock
);
2056 if (pipe_crc
->opened
) {
2057 spin_unlock_irq(&pipe_crc
->lock
);
2058 return -EBUSY
; /* already open */
2061 pipe_crc
->opened
= true;
2062 filep
->private_data
= inode
->i_private
;
2064 spin_unlock_irq(&pipe_crc
->lock
);
2069 static int i915_pipe_crc_release(struct inode
*inode
, struct file
*filep
)
2071 struct pipe_crc_info
*info
= inode
->i_private
;
2072 struct drm_i915_private
*dev_priv
= info
->dev
->dev_private
;
2073 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
2075 spin_lock_irq(&pipe_crc
->lock
);
2076 pipe_crc
->opened
= false;
2077 spin_unlock_irq(&pipe_crc
->lock
);
2082 /* (6 fields, 8 chars each, space separated (5) + '\n') */
2083 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
2084 /* account for \'0' */
2085 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
2087 static int pipe_crc_data_count(struct intel_pipe_crc
*pipe_crc
)
2089 assert_spin_locked(&pipe_crc
->lock
);
2090 return CIRC_CNT(pipe_crc
->head
, pipe_crc
->tail
,
2091 INTEL_PIPE_CRC_ENTRIES_NR
);
2095 i915_pipe_crc_read(struct file
*filep
, char __user
*user_buf
, size_t count
,
2098 struct pipe_crc_info
*info
= filep
->private_data
;
2099 struct drm_device
*dev
= info
->dev
;
2100 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2101 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
2102 char buf
[PIPE_CRC_BUFFER_LEN
];
2103 int head
, tail
, n_entries
, n
;
2107 * Don't allow user space to provide buffers not big enough to hold
2110 if (count
< PIPE_CRC_LINE_LEN
)
2113 if (pipe_crc
->source
== INTEL_PIPE_CRC_SOURCE_NONE
)
2116 /* nothing to read */
2117 spin_lock_irq(&pipe_crc
->lock
);
2118 while (pipe_crc_data_count(pipe_crc
) == 0) {
2121 if (filep
->f_flags
& O_NONBLOCK
) {
2122 spin_unlock_irq(&pipe_crc
->lock
);
2126 ret
= wait_event_interruptible_lock_irq(pipe_crc
->wq
,
2127 pipe_crc_data_count(pipe_crc
), pipe_crc
->lock
);
2129 spin_unlock_irq(&pipe_crc
->lock
);
2134 /* We now have one or more entries to read */
2135 head
= pipe_crc
->head
;
2136 tail
= pipe_crc
->tail
;
2137 n_entries
= min((size_t)CIRC_CNT(head
, tail
, INTEL_PIPE_CRC_ENTRIES_NR
),
2138 count
/ PIPE_CRC_LINE_LEN
);
2139 spin_unlock_irq(&pipe_crc
->lock
);
2144 struct intel_pipe_crc_entry
*entry
= &pipe_crc
->entries
[tail
];
2147 bytes_read
+= snprintf(buf
, PIPE_CRC_BUFFER_LEN
,
2148 "%8u %8x %8x %8x %8x %8x\n",
2149 entry
->frame
, entry
->crc
[0],
2150 entry
->crc
[1], entry
->crc
[2],
2151 entry
->crc
[3], entry
->crc
[4]);
2153 ret
= copy_to_user(user_buf
+ n
* PIPE_CRC_LINE_LEN
,
2154 buf
, PIPE_CRC_LINE_LEN
);
2155 if (ret
== PIPE_CRC_LINE_LEN
)
2158 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR
);
2159 tail
= (tail
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
2161 } while (--n_entries
);
2163 spin_lock_irq(&pipe_crc
->lock
);
2164 pipe_crc
->tail
= tail
;
2165 spin_unlock_irq(&pipe_crc
->lock
);
2170 static const struct file_operations i915_pipe_crc_fops
= {
2171 .owner
= THIS_MODULE
,
2172 .open
= i915_pipe_crc_open
,
2173 .read
= i915_pipe_crc_read
,
2174 .release
= i915_pipe_crc_release
,
2177 static struct pipe_crc_info i915_pipe_crc_data
[I915_MAX_PIPES
] = {
2179 .name
= "i915_pipe_A_crc",
2183 .name
= "i915_pipe_B_crc",
2187 .name
= "i915_pipe_C_crc",
2192 static int i915_pipe_crc_create(struct dentry
*root
, struct drm_minor
*minor
,
2195 struct drm_device
*dev
= minor
->dev
;
2197 struct pipe_crc_info
*info
= &i915_pipe_crc_data
[pipe
];
2200 ent
= debugfs_create_file(info
->name
, S_IRUGO
, root
, info
,
2201 &i915_pipe_crc_fops
);
2205 return drm_add_fake_info_node(minor
, ent
, info
);
2208 static const char * const pipe_crc_sources
[] = {
2221 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source
)
2223 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources
) != INTEL_PIPE_CRC_SOURCE_MAX
);
2224 return pipe_crc_sources
[source
];
2227 static int display_crc_ctl_show(struct seq_file
*m
, void *data
)
2229 struct drm_device
*dev
= m
->private;
2230 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2233 for (i
= 0; i
< I915_MAX_PIPES
; i
++)
2234 seq_printf(m
, "%c %s\n", pipe_name(i
),
2235 pipe_crc_source_name(dev_priv
->pipe_crc
[i
].source
));
2240 static int display_crc_ctl_open(struct inode
*inode
, struct file
*file
)
2242 struct drm_device
*dev
= inode
->i_private
;
2244 return single_open(file
, display_crc_ctl_show
, dev
);
2247 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
2250 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
2251 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
2254 case INTEL_PIPE_CRC_SOURCE_PIPE
:
2255 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_INCLUDE_BORDER_I8XX
;
2257 case INTEL_PIPE_CRC_SOURCE_NONE
:
2267 static int i9xx_pipe_crc_auto_source(struct drm_device
*dev
, enum pipe pipe
,
2268 enum intel_pipe_crc_source
*source
)
2270 struct intel_encoder
*encoder
;
2271 struct intel_crtc
*crtc
;
2272 struct intel_digital_port
*dig_port
;
2275 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
2277 mutex_lock(&dev
->mode_config
.mutex
);
2278 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
2280 if (!encoder
->base
.crtc
)
2283 crtc
= to_intel_crtc(encoder
->base
.crtc
);
2285 if (crtc
->pipe
!= pipe
)
2288 switch (encoder
->type
) {
2289 case INTEL_OUTPUT_TVOUT
:
2290 *source
= INTEL_PIPE_CRC_SOURCE_TV
;
2292 case INTEL_OUTPUT_DISPLAYPORT
:
2293 case INTEL_OUTPUT_EDP
:
2294 dig_port
= enc_to_dig_port(&encoder
->base
);
2295 switch (dig_port
->port
) {
2297 *source
= INTEL_PIPE_CRC_SOURCE_DP_B
;
2300 *source
= INTEL_PIPE_CRC_SOURCE_DP_C
;
2303 *source
= INTEL_PIPE_CRC_SOURCE_DP_D
;
2306 WARN(1, "nonexisting DP port %c\n",
2307 port_name(dig_port
->port
));
2313 mutex_unlock(&dev
->mode_config
.mutex
);
2318 static int vlv_pipe_crc_ctl_reg(struct drm_device
*dev
,
2320 enum intel_pipe_crc_source
*source
,
2323 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2324 bool need_stable_symbols
= false;
2326 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
) {
2327 int ret
= i9xx_pipe_crc_auto_source(dev
, pipe
, source
);
2333 case INTEL_PIPE_CRC_SOURCE_PIPE
:
2334 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_VLV
;
2336 case INTEL_PIPE_CRC_SOURCE_DP_B
:
2337 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_B_VLV
;
2338 need_stable_symbols
= true;
2340 case INTEL_PIPE_CRC_SOURCE_DP_C
:
2341 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_C_VLV
;
2342 need_stable_symbols
= true;
2344 case INTEL_PIPE_CRC_SOURCE_NONE
:
2352 * When the pipe CRC tap point is after the transcoders we need
2353 * to tweak symbol-level features to produce a deterministic series of
2354 * symbols for a given frame. We need to reset those features only once
2355 * a frame (instead of every nth symbol):
2356 * - DC-balance: used to ensure a better clock recovery from the data
2358 * - DisplayPort scrambling: used for EMI reduction
2360 if (need_stable_symbols
) {
2361 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
2363 WARN_ON(!IS_G4X(dev
));
2365 tmp
|= DC_BALANCE_RESET_VLV
;
2367 tmp
|= PIPE_A_SCRAMBLE_RESET
;
2369 tmp
|= PIPE_B_SCRAMBLE_RESET
;
2371 I915_WRITE(PORT_DFT2_G4X
, tmp
);
2377 static int i9xx_pipe_crc_ctl_reg(struct drm_device
*dev
,
2379 enum intel_pipe_crc_source
*source
,
2382 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2383 bool need_stable_symbols
= false;
2385 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
) {
2386 int ret
= i9xx_pipe_crc_auto_source(dev
, pipe
, source
);
2392 case INTEL_PIPE_CRC_SOURCE_PIPE
:
2393 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_I9XX
;
2395 case INTEL_PIPE_CRC_SOURCE_TV
:
2396 if (!SUPPORTS_TV(dev
))
2398 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_TV_PRE
;
2400 case INTEL_PIPE_CRC_SOURCE_DP_B
:
2403 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_B_G4X
;
2404 need_stable_symbols
= true;
2406 case INTEL_PIPE_CRC_SOURCE_DP_C
:
2409 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_C_G4X
;
2410 need_stable_symbols
= true;
2412 case INTEL_PIPE_CRC_SOURCE_DP_D
:
2415 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_D_G4X
;
2416 need_stable_symbols
= true;
2418 case INTEL_PIPE_CRC_SOURCE_NONE
:
2426 * When the pipe CRC tap point is after the transcoders we need
2427 * to tweak symbol-level features to produce a deterministic series of
2428 * symbols for a given frame. We need to reset those features only once
2429 * a frame (instead of every nth symbol):
2430 * - DC-balance: used to ensure a better clock recovery from the data
2432 * - DisplayPort scrambling: used for EMI reduction
2434 if (need_stable_symbols
) {
2435 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
2437 WARN_ON(!IS_G4X(dev
));
2439 I915_WRITE(PORT_DFT_I9XX
,
2440 I915_READ(PORT_DFT_I9XX
) | DC_BALANCE_RESET
);
2443 tmp
|= PIPE_A_SCRAMBLE_RESET
;
2445 tmp
|= PIPE_B_SCRAMBLE_RESET
;
2447 I915_WRITE(PORT_DFT2_G4X
, tmp
);
2453 static void vlv_undo_pipe_scramble_reset(struct drm_device
*dev
,
2456 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2457 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
2460 tmp
&= ~PIPE_A_SCRAMBLE_RESET
;
2462 tmp
&= ~PIPE_B_SCRAMBLE_RESET
;
2463 if (!(tmp
& PIPE_SCRAMBLE_RESET_MASK
))
2464 tmp
&= ~DC_BALANCE_RESET_VLV
;
2465 I915_WRITE(PORT_DFT2_G4X
, tmp
);
2469 static void g4x_undo_pipe_scramble_reset(struct drm_device
*dev
,
2472 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2473 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
2476 tmp
&= ~PIPE_A_SCRAMBLE_RESET
;
2478 tmp
&= ~PIPE_B_SCRAMBLE_RESET
;
2479 I915_WRITE(PORT_DFT2_G4X
, tmp
);
2481 if (!(tmp
& PIPE_SCRAMBLE_RESET_MASK
)) {
2482 I915_WRITE(PORT_DFT_I9XX
,
2483 I915_READ(PORT_DFT_I9XX
) & ~DC_BALANCE_RESET
);
2487 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
2490 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
2491 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
2494 case INTEL_PIPE_CRC_SOURCE_PLANE1
:
2495 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PRIMARY_ILK
;
2497 case INTEL_PIPE_CRC_SOURCE_PLANE2
:
2498 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_SPRITE_ILK
;
2500 case INTEL_PIPE_CRC_SOURCE_PIPE
:
2501 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_ILK
;
2503 case INTEL_PIPE_CRC_SOURCE_NONE
:
2513 static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
2516 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
2517 *source
= INTEL_PIPE_CRC_SOURCE_PF
;
2520 case INTEL_PIPE_CRC_SOURCE_PLANE1
:
2521 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PRIMARY_IVB
;
2523 case INTEL_PIPE_CRC_SOURCE_PLANE2
:
2524 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_SPRITE_IVB
;
2526 case INTEL_PIPE_CRC_SOURCE_PF
:
2527 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PF_IVB
;
2529 case INTEL_PIPE_CRC_SOURCE_NONE
:
2539 static int pipe_crc_set_source(struct drm_device
*dev
, enum pipe pipe
,
2540 enum intel_pipe_crc_source source
)
2542 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2543 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
2544 u32 val
= 0; /* shut up gcc */
2547 if (pipe_crc
->source
== source
)
2550 /* forbid changing the source without going back to 'none' */
2551 if (pipe_crc
->source
&& source
)
2555 ret
= i8xx_pipe_crc_ctl_reg(&source
, &val
);
2556 else if (INTEL_INFO(dev
)->gen
< 5)
2557 ret
= i9xx_pipe_crc_ctl_reg(dev
, pipe
, &source
, &val
);
2558 else if (IS_VALLEYVIEW(dev
))
2559 ret
= vlv_pipe_crc_ctl_reg(dev
,pipe
, &source
, &val
);
2560 else if (IS_GEN5(dev
) || IS_GEN6(dev
))
2561 ret
= ilk_pipe_crc_ctl_reg(&source
, &val
);
2563 ret
= ivb_pipe_crc_ctl_reg(&source
, &val
);
2568 /* none -> real source transition */
2570 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
2571 pipe_name(pipe
), pipe_crc_source_name(source
));
2573 pipe_crc
->entries
= kzalloc(sizeof(*pipe_crc
->entries
) *
2574 INTEL_PIPE_CRC_ENTRIES_NR
,
2576 if (!pipe_crc
->entries
)
2579 spin_lock_irq(&pipe_crc
->lock
);
2582 spin_unlock_irq(&pipe_crc
->lock
);
2585 pipe_crc
->source
= source
;
2587 I915_WRITE(PIPE_CRC_CTL(pipe
), val
);
2588 POSTING_READ(PIPE_CRC_CTL(pipe
));
2590 /* real source -> none transition */
2591 if (source
== INTEL_PIPE_CRC_SOURCE_NONE
) {
2592 struct intel_pipe_crc_entry
*entries
;
2594 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
2597 intel_wait_for_vblank(dev
, pipe
);
2599 spin_lock_irq(&pipe_crc
->lock
);
2600 entries
= pipe_crc
->entries
;
2601 pipe_crc
->entries
= NULL
;
2602 spin_unlock_irq(&pipe_crc
->lock
);
2607 g4x_undo_pipe_scramble_reset(dev
, pipe
);
2608 else if (IS_VALLEYVIEW(dev
))
2609 vlv_undo_pipe_scramble_reset(dev
, pipe
);
2616 * Parse pipe CRC command strings:
2617 * command: wsp* object wsp+ name wsp+ source wsp*
2620 * source: (none | plane1 | plane2 | pf)
2621 * wsp: (#0x20 | #0x9 | #0xA)+
2624 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
2625 * "pipe A none" -> Stop CRC
2627 static int display_crc_ctl_tokenize(char *buf
, char *words
[], int max_words
)
2634 /* skip leading white space */
2635 buf
= skip_spaces(buf
);
2637 break; /* end of buffer */
2639 /* find end of word */
2640 for (end
= buf
; *end
&& !isspace(*end
); end
++)
2643 if (n_words
== max_words
) {
2644 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
2646 return -EINVAL
; /* ran out of words[] before bytes */
2651 words
[n_words
++] = buf
;
2658 enum intel_pipe_crc_object
{
2659 PIPE_CRC_OBJECT_PIPE
,
2662 static const char * const pipe_crc_objects
[] = {
2667 display_crc_ctl_parse_object(const char *buf
, enum intel_pipe_crc_object
*o
)
2671 for (i
= 0; i
< ARRAY_SIZE(pipe_crc_objects
); i
++)
2672 if (!strcmp(buf
, pipe_crc_objects
[i
])) {
2680 static int display_crc_ctl_parse_pipe(const char *buf
, enum pipe
*pipe
)
2682 const char name
= buf
[0];
2684 if (name
< 'A' || name
>= pipe_name(I915_MAX_PIPES
))
2693 display_crc_ctl_parse_source(const char *buf
, enum intel_pipe_crc_source
*s
)
2697 for (i
= 0; i
< ARRAY_SIZE(pipe_crc_sources
); i
++)
2698 if (!strcmp(buf
, pipe_crc_sources
[i
])) {
2706 static int display_crc_ctl_parse(struct drm_device
*dev
, char *buf
, size_t len
)
2710 char *words
[N_WORDS
];
2712 enum intel_pipe_crc_object object
;
2713 enum intel_pipe_crc_source source
;
2715 n_words
= display_crc_ctl_tokenize(buf
, words
, N_WORDS
);
2716 if (n_words
!= N_WORDS
) {
2717 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
2722 if (display_crc_ctl_parse_object(words
[0], &object
) < 0) {
2723 DRM_DEBUG_DRIVER("unknown object %s\n", words
[0]);
2727 if (display_crc_ctl_parse_pipe(words
[1], &pipe
) < 0) {
2728 DRM_DEBUG_DRIVER("unknown pipe %s\n", words
[1]);
2732 if (display_crc_ctl_parse_source(words
[2], &source
) < 0) {
2733 DRM_DEBUG_DRIVER("unknown source %s\n", words
[2]);
2737 return pipe_crc_set_source(dev
, pipe
, source
);
2740 static ssize_t
display_crc_ctl_write(struct file
*file
, const char __user
*ubuf
,
2741 size_t len
, loff_t
*offp
)
2743 struct seq_file
*m
= file
->private_data
;
2744 struct drm_device
*dev
= m
->private;
2751 if (len
> PAGE_SIZE
- 1) {
2752 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
2757 tmpbuf
= kmalloc(len
+ 1, GFP_KERNEL
);
2761 if (copy_from_user(tmpbuf
, ubuf
, len
)) {
2767 ret
= display_crc_ctl_parse(dev
, tmpbuf
, len
);
2778 static const struct file_operations i915_display_crc_ctl_fops
= {
2779 .owner
= THIS_MODULE
,
2780 .open
= display_crc_ctl_open
,
2782 .llseek
= seq_lseek
,
2783 .release
= single_release
,
2784 .write
= display_crc_ctl_write
2788 i915_wedged_get(void *data
, u64
*val
)
2790 struct drm_device
*dev
= data
;
2791 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2793 *val
= atomic_read(&dev_priv
->gpu_error
.reset_counter
);
2799 i915_wedged_set(void *data
, u64 val
)
2801 struct drm_device
*dev
= data
;
2803 DRM_INFO("Manually setting wedged to %llu\n", val
);
2804 i915_handle_error(dev
, val
);
2809 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
2810 i915_wedged_get
, i915_wedged_set
,
2814 i915_ring_stop_get(void *data
, u64
*val
)
2816 struct drm_device
*dev
= data
;
2817 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2819 *val
= dev_priv
->gpu_error
.stop_rings
;
2825 i915_ring_stop_set(void *data
, u64 val
)
2827 struct drm_device
*dev
= data
;
2828 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2831 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val
);
2833 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2837 dev_priv
->gpu_error
.stop_rings
= val
;
2838 mutex_unlock(&dev
->struct_mutex
);
2843 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops
,
2844 i915_ring_stop_get
, i915_ring_stop_set
,
2848 i915_ring_missed_irq_get(void *data
, u64
*val
)
2850 struct drm_device
*dev
= data
;
2851 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2853 *val
= dev_priv
->gpu_error
.missed_irq_rings
;
2858 i915_ring_missed_irq_set(void *data
, u64 val
)
2860 struct drm_device
*dev
= data
;
2861 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2864 /* Lock against concurrent debugfs callers */
2865 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2868 dev_priv
->gpu_error
.missed_irq_rings
= val
;
2869 mutex_unlock(&dev
->struct_mutex
);
2874 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops
,
2875 i915_ring_missed_irq_get
, i915_ring_missed_irq_set
,
2879 i915_ring_test_irq_get(void *data
, u64
*val
)
2881 struct drm_device
*dev
= data
;
2882 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2884 *val
= dev_priv
->gpu_error
.test_irq_rings
;
2890 i915_ring_test_irq_set(void *data
, u64 val
)
2892 struct drm_device
*dev
= data
;
2893 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2896 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val
);
2898 /* Lock against concurrent debugfs callers */
2899 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2903 dev_priv
->gpu_error
.test_irq_rings
= val
;
2904 mutex_unlock(&dev
->struct_mutex
);
2909 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops
,
2910 i915_ring_test_irq_get
, i915_ring_test_irq_set
,
2913 #define DROP_UNBOUND 0x1
2914 #define DROP_BOUND 0x2
2915 #define DROP_RETIRE 0x4
2916 #define DROP_ACTIVE 0x8
2917 #define DROP_ALL (DROP_UNBOUND | \
2922 i915_drop_caches_get(void *data
, u64
*val
)
2930 i915_drop_caches_set(void *data
, u64 val
)
2932 struct drm_device
*dev
= data
;
2933 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2934 struct drm_i915_gem_object
*obj
, *next
;
2935 struct i915_address_space
*vm
;
2936 struct i915_vma
*vma
, *x
;
2939 DRM_DEBUG("Dropping caches: 0x%08llx\n", val
);
2941 /* No need to check and wait for gpu resets, only libdrm auto-restarts
2942 * on ioctls on -EAGAIN. */
2943 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2947 if (val
& DROP_ACTIVE
) {
2948 ret
= i915_gpu_idle(dev
);
2953 if (val
& (DROP_RETIRE
| DROP_ACTIVE
))
2954 i915_gem_retire_requests(dev
);
2956 if (val
& DROP_BOUND
) {
2957 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
) {
2958 list_for_each_entry_safe(vma
, x
, &vm
->inactive_list
,
2963 ret
= i915_vma_unbind(vma
);
2970 if (val
& DROP_UNBOUND
) {
2971 list_for_each_entry_safe(obj
, next
, &dev_priv
->mm
.unbound_list
,
2973 if (obj
->pages_pin_count
== 0) {
2974 ret
= i915_gem_object_put_pages(obj
);
2981 mutex_unlock(&dev
->struct_mutex
);
2986 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
2987 i915_drop_caches_get
, i915_drop_caches_set
,
2991 i915_max_freq_get(void *data
, u64
*val
)
2993 struct drm_device
*dev
= data
;
2994 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2997 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
3000 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
3002 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
3006 if (IS_VALLEYVIEW(dev
))
3007 *val
= vlv_gpu_freq(dev_priv
, dev_priv
->rps
.max_delay
);
3009 *val
= dev_priv
->rps
.max_delay
* GT_FREQUENCY_MULTIPLIER
;
3010 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3016 i915_max_freq_set(void *data
, u64 val
)
3018 struct drm_device
*dev
= data
;
3019 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3022 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
3025 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
3027 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val
);
3029 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
3034 * Turbo will still be enabled, but won't go above the set value.
3036 if (IS_VALLEYVIEW(dev
)) {
3037 val
= vlv_freq_opcode(dev_priv
, val
);
3038 dev_priv
->rps
.max_delay
= val
;
3039 valleyview_set_rps(dev
, val
);
3041 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
3042 dev_priv
->rps
.max_delay
= val
;
3043 gen6_set_rps(dev
, val
);
3046 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3051 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops
,
3052 i915_max_freq_get
, i915_max_freq_set
,
3056 i915_min_freq_get(void *data
, u64
*val
)
3058 struct drm_device
*dev
= data
;
3059 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3062 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
3065 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
3067 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
3071 if (IS_VALLEYVIEW(dev
))
3072 *val
= vlv_gpu_freq(dev_priv
, dev_priv
->rps
.min_delay
);
3074 *val
= dev_priv
->rps
.min_delay
* GT_FREQUENCY_MULTIPLIER
;
3075 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3081 i915_min_freq_set(void *data
, u64 val
)
3083 struct drm_device
*dev
= data
;
3084 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3087 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
3090 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
3092 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val
);
3094 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
3099 * Turbo will still be enabled, but won't go below the set value.
3101 if (IS_VALLEYVIEW(dev
)) {
3102 val
= vlv_freq_opcode(dev_priv
, val
);
3103 dev_priv
->rps
.min_delay
= val
;
3104 valleyview_set_rps(dev
, val
);
3106 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
3107 dev_priv
->rps
.min_delay
= val
;
3108 gen6_set_rps(dev
, val
);
3110 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3115 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops
,
3116 i915_min_freq_get
, i915_min_freq_set
,
3120 i915_cache_sharing_get(void *data
, u64
*val
)
3122 struct drm_device
*dev
= data
;
3123 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3127 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
3130 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
3133 intel_runtime_pm_get(dev_priv
);
3135 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
3137 intel_runtime_pm_put(dev_priv
);
3138 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
3140 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
3146 i915_cache_sharing_set(void *data
, u64 val
)
3148 struct drm_device
*dev
= data
;
3149 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3152 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
3158 intel_runtime_pm_get(dev_priv
);
3159 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
3161 /* Update the cache sharing policy here as well */
3162 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
3163 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
3164 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
3165 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
3167 intel_runtime_pm_put(dev_priv
);
3171 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
3172 i915_cache_sharing_get
, i915_cache_sharing_set
,
3175 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
3177 struct drm_device
*dev
= inode
->i_private
;
3178 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3180 if (INTEL_INFO(dev
)->gen
< 6)
3183 intel_runtime_pm_get(dev_priv
);
3184 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
3189 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
3191 struct drm_device
*dev
= inode
->i_private
;
3192 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3194 if (INTEL_INFO(dev
)->gen
< 6)
3197 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
3198 intel_runtime_pm_put(dev_priv
);
3203 static const struct file_operations i915_forcewake_fops
= {
3204 .owner
= THIS_MODULE
,
3205 .open
= i915_forcewake_open
,
3206 .release
= i915_forcewake_release
,
3209 static int i915_forcewake_create(struct dentry
*root
, struct drm_minor
*minor
)
3211 struct drm_device
*dev
= minor
->dev
;
3214 ent
= debugfs_create_file("i915_forcewake_user",
3217 &i915_forcewake_fops
);
3221 return drm_add_fake_info_node(minor
, ent
, &i915_forcewake_fops
);
3224 static int i915_debugfs_create(struct dentry
*root
,
3225 struct drm_minor
*minor
,
3227 const struct file_operations
*fops
)
3229 struct drm_device
*dev
= minor
->dev
;
3232 ent
= debugfs_create_file(name
,
3239 return drm_add_fake_info_node(minor
, ent
, fops
);
3242 static const struct drm_info_list i915_debugfs_list
[] = {
3243 {"i915_capabilities", i915_capabilities
, 0},
3244 {"i915_gem_objects", i915_gem_object_info
, 0},
3245 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
3246 {"i915_gem_pinned", i915_gem_gtt_info
, 0, (void *) PINNED_LIST
},
3247 {"i915_gem_active", i915_gem_object_list_info
, 0, (void *) ACTIVE_LIST
},
3248 {"i915_gem_inactive", i915_gem_object_list_info
, 0, (void *) INACTIVE_LIST
},
3249 {"i915_gem_stolen", i915_gem_stolen_list_info
},
3250 {"i915_gem_pageflip", i915_gem_pageflip_info
, 0},
3251 {"i915_gem_request", i915_gem_request_info
, 0},
3252 {"i915_gem_seqno", i915_gem_seqno_info
, 0},
3253 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
3254 {"i915_gem_interrupt", i915_interrupt_info
, 0},
3255 {"i915_gem_hws", i915_hws_info
, 0, (void *)RCS
},
3256 {"i915_gem_hws_blt", i915_hws_info
, 0, (void *)BCS
},
3257 {"i915_gem_hws_bsd", i915_hws_info
, 0, (void *)VCS
},
3258 {"i915_gem_hws_vebox", i915_hws_info
, 0, (void *)VECS
},
3259 {"i915_rstdby_delays", i915_rstdby_delays
, 0},
3260 {"i915_cur_delayinfo", i915_cur_delayinfo
, 0},
3261 {"i915_delayfreq_table", i915_delayfreq_table
, 0},
3262 {"i915_inttoext_table", i915_inttoext_table
, 0},
3263 {"i915_drpc_info", i915_drpc_info
, 0},
3264 {"i915_emon_status", i915_emon_status
, 0},
3265 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
3266 {"i915_gfxec", i915_gfxec
, 0},
3267 {"i915_fbc_status", i915_fbc_status
, 0},
3268 {"i915_ips_status", i915_ips_status
, 0},
3269 {"i915_sr_status", i915_sr_status
, 0},
3270 {"i915_opregion", i915_opregion
, 0},
3271 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
3272 {"i915_context_status", i915_context_status
, 0},
3273 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info
, 0},
3274 {"i915_swizzle_info", i915_swizzle_info
, 0},
3275 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
3276 {"i915_dpio", i915_dpio_info
, 0},
3277 {"i915_llc", i915_llc
, 0},
3278 {"i915_edp_psr_status", i915_edp_psr_status
, 0},
3279 {"i915_energy_uJ", i915_energy_uJ
, 0},
3280 {"i915_pc8_status", i915_pc8_status
, 0},
3281 {"i915_power_domain_info", i915_power_domain_info
, 0},
3283 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
3285 static const struct i915_debugfs_files
{
3287 const struct file_operations
*fops
;
3288 } i915_debugfs_files
[] = {
3289 {"i915_wedged", &i915_wedged_fops
},
3290 {"i915_max_freq", &i915_max_freq_fops
},
3291 {"i915_min_freq", &i915_min_freq_fops
},
3292 {"i915_cache_sharing", &i915_cache_sharing_fops
},
3293 {"i915_ring_stop", &i915_ring_stop_fops
},
3294 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops
},
3295 {"i915_ring_test_irq", &i915_ring_test_irq_fops
},
3296 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
3297 {"i915_error_state", &i915_error_state_fops
},
3298 {"i915_next_seqno", &i915_next_seqno_fops
},
3299 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops
},
3302 void intel_display_crc_init(struct drm_device
*dev
)
3304 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3307 for_each_pipe(pipe
) {
3308 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
3310 pipe_crc
->opened
= false;
3311 spin_lock_init(&pipe_crc
->lock
);
3312 init_waitqueue_head(&pipe_crc
->wq
);
3316 int i915_debugfs_init(struct drm_minor
*minor
)
3320 ret
= i915_forcewake_create(minor
->debugfs_root
, minor
);
3324 for (i
= 0; i
< ARRAY_SIZE(i915_pipe_crc_data
); i
++) {
3325 ret
= i915_pipe_crc_create(minor
->debugfs_root
, minor
, i
);
3330 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
3331 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
3332 i915_debugfs_files
[i
].name
,
3333 i915_debugfs_files
[i
].fops
);
3338 return drm_debugfs_create_files(i915_debugfs_list
,
3339 I915_DEBUGFS_ENTRIES
,
3340 minor
->debugfs_root
, minor
);
3343 void i915_debugfs_cleanup(struct drm_minor
*minor
)
3347 drm_debugfs_remove_files(i915_debugfs_list
,
3348 I915_DEBUGFS_ENTRIES
, minor
);
3350 drm_debugfs_remove_files((struct drm_info_list
*) &i915_forcewake_fops
,
3353 for (i
= 0; i
< ARRAY_SIZE(i915_pipe_crc_data
); i
++) {
3354 struct drm_info_list
*info_list
=
3355 (struct drm_info_list
*)&i915_pipe_crc_data
[i
];
3357 drm_debugfs_remove_files(info_list
, 1, minor
);
3360 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
3361 struct drm_info_list
*info_list
=
3362 (struct drm_info_list
*) i915_debugfs_files
[i
].fops
;
3364 drm_debugfs_remove_files(info_list
, 1, minor
);