2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
49 static const char *yesno(int v
)
51 return v
? "yes" : "no";
54 /* As the drm_debugfs_init() routines are called before dev->dev_private is
55 * allocated we need to hook into the minor for release. */
57 drm_add_fake_info_node(struct drm_minor
*minor
,
61 struct drm_info_node
*node
;
63 node
= kmalloc(sizeof(*node
), GFP_KERNEL
);
71 node
->info_ent
= (void *) key
;
73 mutex_lock(&minor
->debugfs_lock
);
74 list_add(&node
->list
, &minor
->debugfs_list
);
75 mutex_unlock(&minor
->debugfs_lock
);
80 static int i915_capabilities(struct seq_file
*m
, void *data
)
82 struct drm_info_node
*node
= m
->private;
83 struct drm_device
*dev
= node
->minor
->dev
;
84 const struct intel_device_info
*info
= INTEL_INFO(dev
);
86 seq_printf(m
, "gen: %d\n", info
->gen
);
87 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev
));
88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
89 #define SEP_SEMICOLON ;
90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_SEMICOLON
);
97 static const char *get_pin_flag(struct drm_i915_gem_object
*obj
)
99 if (obj
->user_pin_count
> 0)
101 else if (i915_gem_obj_is_pinned(obj
))
107 static const char *get_tiling_flag(struct drm_i915_gem_object
*obj
)
109 switch (obj
->tiling_mode
) {
111 case I915_TILING_NONE
: return " ";
112 case I915_TILING_X
: return "X";
113 case I915_TILING_Y
: return "Y";
117 static inline const char *get_global_flag(struct drm_i915_gem_object
*obj
)
119 return obj
->has_global_gtt_mapping
? "g" : " ";
123 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
125 struct i915_vma
*vma
;
128 seq_printf(m
, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
131 get_tiling_flag(obj
),
132 get_global_flag(obj
),
133 obj
->base
.size
/ 1024,
134 obj
->base
.read_domains
,
135 obj
->base
.write_domain
,
136 obj
->last_read_seqno
,
137 obj
->last_write_seqno
,
138 obj
->last_fenced_seqno
,
139 i915_cache_level_str(obj
->cache_level
),
140 obj
->dirty
? " dirty" : "",
141 obj
->madv
== I915_MADV_DONTNEED
? " purgeable" : "");
143 seq_printf(m
, " (name: %d)", obj
->base
.name
);
144 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
)
145 if (vma
->pin_count
> 0)
147 seq_printf(m
, " (pinned x %d)", pin_count
);
148 if (obj
->pin_display
)
149 seq_printf(m
, " (display)");
150 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
)
151 seq_printf(m
, " (fence: %d)", obj
->fence_reg
);
152 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
153 if (!i915_is_ggtt(vma
->vm
))
157 seq_printf(m
, "gtt offset: %08lx, size: %08lx)",
158 vma
->node
.start
, vma
->node
.size
);
161 seq_printf(m
, " (stolen: %08lx)", obj
->stolen
->start
);
162 if (obj
->pin_mappable
|| obj
->fault_mappable
) {
164 if (obj
->pin_mappable
)
166 if (obj
->fault_mappable
)
169 seq_printf(m
, " (%s mappable)", s
);
171 if (obj
->ring
!= NULL
)
172 seq_printf(m
, " (%s)", obj
->ring
->name
);
175 static void describe_ctx(struct seq_file
*m
, struct intel_context
*ctx
)
177 seq_putc(m
, ctx
->is_initialized
? 'I' : 'i');
178 seq_putc(m
, ctx
->remap_slice
? 'R' : 'r');
182 static int i915_gem_object_list_info(struct seq_file
*m
, void *data
)
184 struct drm_info_node
*node
= m
->private;
185 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
186 struct list_head
*head
;
187 struct drm_device
*dev
= node
->minor
->dev
;
188 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
189 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
190 struct i915_vma
*vma
;
191 size_t total_obj_size
, total_gtt_size
;
194 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
198 /* FIXME: the user of this interface might want more than just GGTT */
201 seq_puts(m
, "Active:\n");
202 head
= &vm
->active_list
;
205 seq_puts(m
, "Inactive:\n");
206 head
= &vm
->inactive_list
;
209 mutex_unlock(&dev
->struct_mutex
);
213 total_obj_size
= total_gtt_size
= count
= 0;
214 list_for_each_entry(vma
, head
, mm_list
) {
216 describe_obj(m
, vma
->obj
);
218 total_obj_size
+= vma
->obj
->base
.size
;
219 total_gtt_size
+= vma
->node
.size
;
222 mutex_unlock(&dev
->struct_mutex
);
224 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
225 count
, total_obj_size
, total_gtt_size
);
229 static int obj_rank_by_stolen(void *priv
,
230 struct list_head
*A
, struct list_head
*B
)
232 struct drm_i915_gem_object
*a
=
233 container_of(A
, struct drm_i915_gem_object
, obj_exec_link
);
234 struct drm_i915_gem_object
*b
=
235 container_of(B
, struct drm_i915_gem_object
, obj_exec_link
);
237 return a
->stolen
->start
- b
->stolen
->start
;
240 static int i915_gem_stolen_list_info(struct seq_file
*m
, void *data
)
242 struct drm_info_node
*node
= m
->private;
243 struct drm_device
*dev
= node
->minor
->dev
;
244 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
245 struct drm_i915_gem_object
*obj
;
246 size_t total_obj_size
, total_gtt_size
;
250 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
254 total_obj_size
= total_gtt_size
= count
= 0;
255 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
256 if (obj
->stolen
== NULL
)
259 list_add(&obj
->obj_exec_link
, &stolen
);
261 total_obj_size
+= obj
->base
.size
;
262 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
265 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
266 if (obj
->stolen
== NULL
)
269 list_add(&obj
->obj_exec_link
, &stolen
);
271 total_obj_size
+= obj
->base
.size
;
274 list_sort(NULL
, &stolen
, obj_rank_by_stolen
);
275 seq_puts(m
, "Stolen:\n");
276 while (!list_empty(&stolen
)) {
277 obj
= list_first_entry(&stolen
, typeof(*obj
), obj_exec_link
);
279 describe_obj(m
, obj
);
281 list_del_init(&obj
->obj_exec_link
);
283 mutex_unlock(&dev
->struct_mutex
);
285 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
286 count
, total_obj_size
, total_gtt_size
);
290 #define count_objects(list, member) do { \
291 list_for_each_entry(obj, list, member) { \
292 size += i915_gem_obj_ggtt_size(obj); \
294 if (obj->map_and_fenceable) { \
295 mappable_size += i915_gem_obj_ggtt_size(obj); \
302 struct drm_i915_file_private
*file_priv
;
304 size_t total
, unbound
;
305 size_t global
, shared
;
306 size_t active
, inactive
;
309 static int per_file_stats(int id
, void *ptr
, void *data
)
311 struct drm_i915_gem_object
*obj
= ptr
;
312 struct file_stats
*stats
= data
;
313 struct i915_vma
*vma
;
316 stats
->total
+= obj
->base
.size
;
318 if (obj
->base
.name
|| obj
->base
.dma_buf
)
319 stats
->shared
+= obj
->base
.size
;
321 if (USES_FULL_PPGTT(obj
->base
.dev
)) {
322 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
323 struct i915_hw_ppgtt
*ppgtt
;
325 if (!drm_mm_node_allocated(&vma
->node
))
328 if (i915_is_ggtt(vma
->vm
)) {
329 stats
->global
+= obj
->base
.size
;
333 ppgtt
= container_of(vma
->vm
, struct i915_hw_ppgtt
, base
);
334 if (ppgtt
->ctx
&& ppgtt
->ctx
->file_priv
!= stats
->file_priv
)
337 if (obj
->ring
) /* XXX per-vma statistic */
338 stats
->active
+= obj
->base
.size
;
340 stats
->inactive
+= obj
->base
.size
;
345 if (i915_gem_obj_ggtt_bound(obj
)) {
346 stats
->global
+= obj
->base
.size
;
348 stats
->active
+= obj
->base
.size
;
350 stats
->inactive
+= obj
->base
.size
;
355 if (!list_empty(&obj
->global_list
))
356 stats
->unbound
+= obj
->base
.size
;
361 #define count_vmas(list, member) do { \
362 list_for_each_entry(vma, list, member) { \
363 size += i915_gem_obj_ggtt_size(vma->obj); \
365 if (vma->obj->map_and_fenceable) { \
366 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
372 static int i915_gem_object_info(struct seq_file
*m
, void* data
)
374 struct drm_info_node
*node
= m
->private;
375 struct drm_device
*dev
= node
->minor
->dev
;
376 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
377 u32 count
, mappable_count
, purgeable_count
;
378 size_t size
, mappable_size
, purgeable_size
;
379 struct drm_i915_gem_object
*obj
;
380 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
381 struct drm_file
*file
;
382 struct i915_vma
*vma
;
385 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
389 seq_printf(m
, "%u objects, %zu bytes\n",
390 dev_priv
->mm
.object_count
,
391 dev_priv
->mm
.object_memory
);
393 size
= count
= mappable_size
= mappable_count
= 0;
394 count_objects(&dev_priv
->mm
.bound_list
, global_list
);
395 seq_printf(m
, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
396 count
, mappable_count
, size
, mappable_size
);
398 size
= count
= mappable_size
= mappable_count
= 0;
399 count_vmas(&vm
->active_list
, mm_list
);
400 seq_printf(m
, " %u [%u] active objects, %zu [%zu] bytes\n",
401 count
, mappable_count
, size
, mappable_size
);
403 size
= count
= mappable_size
= mappable_count
= 0;
404 count_vmas(&vm
->inactive_list
, mm_list
);
405 seq_printf(m
, " %u [%u] inactive objects, %zu [%zu] bytes\n",
406 count
, mappable_count
, size
, mappable_size
);
408 size
= count
= purgeable_size
= purgeable_count
= 0;
409 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
410 size
+= obj
->base
.size
, ++count
;
411 if (obj
->madv
== I915_MADV_DONTNEED
)
412 purgeable_size
+= obj
->base
.size
, ++purgeable_count
;
414 seq_printf(m
, "%u unbound objects, %zu bytes\n", count
, size
);
416 size
= count
= mappable_size
= mappable_count
= 0;
417 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
418 if (obj
->fault_mappable
) {
419 size
+= i915_gem_obj_ggtt_size(obj
);
422 if (obj
->pin_mappable
) {
423 mappable_size
+= i915_gem_obj_ggtt_size(obj
);
426 if (obj
->madv
== I915_MADV_DONTNEED
) {
427 purgeable_size
+= obj
->base
.size
;
431 seq_printf(m
, "%u purgeable objects, %zu bytes\n",
432 purgeable_count
, purgeable_size
);
433 seq_printf(m
, "%u pinned mappable objects, %zu bytes\n",
434 mappable_count
, mappable_size
);
435 seq_printf(m
, "%u fault mappable objects, %zu bytes\n",
438 seq_printf(m
, "%zu [%lu] gtt total\n",
439 dev_priv
->gtt
.base
.total
,
440 dev_priv
->gtt
.mappable_end
- dev_priv
->gtt
.base
.start
);
443 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
444 struct file_stats stats
;
445 struct task_struct
*task
;
447 memset(&stats
, 0, sizeof(stats
));
448 stats
.file_priv
= file
->driver_priv
;
449 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
451 * Although we have a valid reference on file->pid, that does
452 * not guarantee that the task_struct who called get_pid() is
453 * still alive (e.g. get_pid(current) => fork() => exit()).
454 * Therefore, we need to protect this ->comm access using RCU.
457 task
= pid_task(file
->pid
, PIDTYPE_PID
);
458 seq_printf(m
, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
459 task
? task
->comm
: "<unknown>",
470 mutex_unlock(&dev
->struct_mutex
);
475 static int i915_gem_gtt_info(struct seq_file
*m
, void *data
)
477 struct drm_info_node
*node
= m
->private;
478 struct drm_device
*dev
= node
->minor
->dev
;
479 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
480 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
481 struct drm_i915_gem_object
*obj
;
482 size_t total_obj_size
, total_gtt_size
;
485 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
489 total_obj_size
= total_gtt_size
= count
= 0;
490 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
491 if (list
== PINNED_LIST
&& !i915_gem_obj_is_pinned(obj
))
495 describe_obj(m
, obj
);
497 total_obj_size
+= obj
->base
.size
;
498 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
502 mutex_unlock(&dev
->struct_mutex
);
504 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
505 count
, total_obj_size
, total_gtt_size
);
510 static int i915_gem_pageflip_info(struct seq_file
*m
, void *data
)
512 struct drm_info_node
*node
= m
->private;
513 struct drm_device
*dev
= node
->minor
->dev
;
515 struct intel_crtc
*crtc
;
518 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
522 for_each_intel_crtc(dev
, crtc
) {
523 const char pipe
= pipe_name(crtc
->pipe
);
524 const char plane
= plane_name(crtc
->plane
);
525 struct intel_unpin_work
*work
;
527 spin_lock_irqsave(&dev
->event_lock
, flags
);
528 work
= crtc
->unpin_work
;
530 seq_printf(m
, "No flip due on pipe %c (plane %c)\n",
533 if (atomic_read(&work
->pending
) < INTEL_FLIP_COMPLETE
) {
534 seq_printf(m
, "Flip queued on pipe %c (plane %c)\n",
537 seq_printf(m
, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
540 if (work
->enable_stall_check
)
541 seq_puts(m
, "Stall check enabled, ");
543 seq_puts(m
, "Stall check waiting for page flip ioctl, ");
544 seq_printf(m
, "%d prepares\n", atomic_read(&work
->pending
));
546 if (work
->old_fb_obj
) {
547 struct drm_i915_gem_object
*obj
= work
->old_fb_obj
;
549 seq_printf(m
, "Old framebuffer gtt_offset 0x%08lx\n",
550 i915_gem_obj_ggtt_offset(obj
));
552 if (work
->pending_flip_obj
) {
553 struct drm_i915_gem_object
*obj
= work
->pending_flip_obj
;
555 seq_printf(m
, "New framebuffer gtt_offset 0x%08lx\n",
556 i915_gem_obj_ggtt_offset(obj
));
559 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
562 mutex_unlock(&dev
->struct_mutex
);
567 static int i915_gem_request_info(struct seq_file
*m
, void *data
)
569 struct drm_info_node
*node
= m
->private;
570 struct drm_device
*dev
= node
->minor
->dev
;
571 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
572 struct intel_engine_cs
*ring
;
573 struct drm_i915_gem_request
*gem_request
;
576 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
581 for_each_ring(ring
, dev_priv
, i
) {
582 if (list_empty(&ring
->request_list
))
585 seq_printf(m
, "%s requests:\n", ring
->name
);
586 list_for_each_entry(gem_request
,
589 seq_printf(m
, " %d @ %d\n",
591 (int) (jiffies
- gem_request
->emitted_jiffies
));
595 mutex_unlock(&dev
->struct_mutex
);
598 seq_puts(m
, "No requests\n");
603 static void i915_ring_seqno_info(struct seq_file
*m
,
604 struct intel_engine_cs
*ring
)
606 if (ring
->get_seqno
) {
607 seq_printf(m
, "Current sequence (%s): %u\n",
608 ring
->name
, ring
->get_seqno(ring
, false));
612 static int i915_gem_seqno_info(struct seq_file
*m
, void *data
)
614 struct drm_info_node
*node
= m
->private;
615 struct drm_device
*dev
= node
->minor
->dev
;
616 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
617 struct intel_engine_cs
*ring
;
620 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
623 intel_runtime_pm_get(dev_priv
);
625 for_each_ring(ring
, dev_priv
, i
)
626 i915_ring_seqno_info(m
, ring
);
628 intel_runtime_pm_put(dev_priv
);
629 mutex_unlock(&dev
->struct_mutex
);
635 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
637 struct drm_info_node
*node
= m
->private;
638 struct drm_device
*dev
= node
->minor
->dev
;
639 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
640 struct intel_engine_cs
*ring
;
643 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
646 intel_runtime_pm_get(dev_priv
);
648 if (IS_CHERRYVIEW(dev
)) {
650 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
651 I915_READ(GEN8_MASTER_IRQ
));
653 seq_printf(m
, "Display IER:\t%08x\n",
655 seq_printf(m
, "Display IIR:\t%08x\n",
657 seq_printf(m
, "Display IIR_RW:\t%08x\n",
658 I915_READ(VLV_IIR_RW
));
659 seq_printf(m
, "Display IMR:\t%08x\n",
662 seq_printf(m
, "Pipe %c stat:\t%08x\n",
664 I915_READ(PIPESTAT(pipe
)));
666 seq_printf(m
, "Port hotplug:\t%08x\n",
667 I915_READ(PORT_HOTPLUG_EN
));
668 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
669 I915_READ(VLV_DPFLIPSTAT
));
670 seq_printf(m
, "DPINVGTT:\t%08x\n",
671 I915_READ(DPINVGTT
));
673 for (i
= 0; i
< 4; i
++) {
674 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
675 i
, I915_READ(GEN8_GT_IMR(i
)));
676 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
677 i
, I915_READ(GEN8_GT_IIR(i
)));
678 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
679 i
, I915_READ(GEN8_GT_IER(i
)));
682 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
683 I915_READ(GEN8_PCU_IMR
));
684 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
685 I915_READ(GEN8_PCU_IIR
));
686 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
687 I915_READ(GEN8_PCU_IER
));
688 } else if (INTEL_INFO(dev
)->gen
>= 8) {
689 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
690 I915_READ(GEN8_MASTER_IRQ
));
692 for (i
= 0; i
< 4; i
++) {
693 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
694 i
, I915_READ(GEN8_GT_IMR(i
)));
695 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
696 i
, I915_READ(GEN8_GT_IIR(i
)));
697 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
698 i
, I915_READ(GEN8_GT_IER(i
)));
701 for_each_pipe(pipe
) {
702 seq_printf(m
, "Pipe %c IMR:\t%08x\n",
704 I915_READ(GEN8_DE_PIPE_IMR(pipe
)));
705 seq_printf(m
, "Pipe %c IIR:\t%08x\n",
707 I915_READ(GEN8_DE_PIPE_IIR(pipe
)));
708 seq_printf(m
, "Pipe %c IER:\t%08x\n",
710 I915_READ(GEN8_DE_PIPE_IER(pipe
)));
713 seq_printf(m
, "Display Engine port interrupt mask:\t%08x\n",
714 I915_READ(GEN8_DE_PORT_IMR
));
715 seq_printf(m
, "Display Engine port interrupt identity:\t%08x\n",
716 I915_READ(GEN8_DE_PORT_IIR
));
717 seq_printf(m
, "Display Engine port interrupt enable:\t%08x\n",
718 I915_READ(GEN8_DE_PORT_IER
));
720 seq_printf(m
, "Display Engine misc interrupt mask:\t%08x\n",
721 I915_READ(GEN8_DE_MISC_IMR
));
722 seq_printf(m
, "Display Engine misc interrupt identity:\t%08x\n",
723 I915_READ(GEN8_DE_MISC_IIR
));
724 seq_printf(m
, "Display Engine misc interrupt enable:\t%08x\n",
725 I915_READ(GEN8_DE_MISC_IER
));
727 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
728 I915_READ(GEN8_PCU_IMR
));
729 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
730 I915_READ(GEN8_PCU_IIR
));
731 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
732 I915_READ(GEN8_PCU_IER
));
733 } else if (IS_VALLEYVIEW(dev
)) {
734 seq_printf(m
, "Display IER:\t%08x\n",
736 seq_printf(m
, "Display IIR:\t%08x\n",
738 seq_printf(m
, "Display IIR_RW:\t%08x\n",
739 I915_READ(VLV_IIR_RW
));
740 seq_printf(m
, "Display IMR:\t%08x\n",
743 seq_printf(m
, "Pipe %c stat:\t%08x\n",
745 I915_READ(PIPESTAT(pipe
)));
747 seq_printf(m
, "Master IER:\t%08x\n",
748 I915_READ(VLV_MASTER_IER
));
750 seq_printf(m
, "Render IER:\t%08x\n",
752 seq_printf(m
, "Render IIR:\t%08x\n",
754 seq_printf(m
, "Render IMR:\t%08x\n",
757 seq_printf(m
, "PM IER:\t\t%08x\n",
758 I915_READ(GEN6_PMIER
));
759 seq_printf(m
, "PM IIR:\t\t%08x\n",
760 I915_READ(GEN6_PMIIR
));
761 seq_printf(m
, "PM IMR:\t\t%08x\n",
762 I915_READ(GEN6_PMIMR
));
764 seq_printf(m
, "Port hotplug:\t%08x\n",
765 I915_READ(PORT_HOTPLUG_EN
));
766 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
767 I915_READ(VLV_DPFLIPSTAT
));
768 seq_printf(m
, "DPINVGTT:\t%08x\n",
769 I915_READ(DPINVGTT
));
771 } else if (!HAS_PCH_SPLIT(dev
)) {
772 seq_printf(m
, "Interrupt enable: %08x\n",
774 seq_printf(m
, "Interrupt identity: %08x\n",
776 seq_printf(m
, "Interrupt mask: %08x\n",
779 seq_printf(m
, "Pipe %c stat: %08x\n",
781 I915_READ(PIPESTAT(pipe
)));
783 seq_printf(m
, "North Display Interrupt enable: %08x\n",
785 seq_printf(m
, "North Display Interrupt identity: %08x\n",
787 seq_printf(m
, "North Display Interrupt mask: %08x\n",
789 seq_printf(m
, "South Display Interrupt enable: %08x\n",
791 seq_printf(m
, "South Display Interrupt identity: %08x\n",
793 seq_printf(m
, "South Display Interrupt mask: %08x\n",
795 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
797 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
799 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
802 for_each_ring(ring
, dev_priv
, i
) {
803 if (INTEL_INFO(dev
)->gen
>= 6) {
805 "Graphics Interrupt mask (%s): %08x\n",
806 ring
->name
, I915_READ_IMR(ring
));
808 i915_ring_seqno_info(m
, ring
);
810 intel_runtime_pm_put(dev_priv
);
811 mutex_unlock(&dev
->struct_mutex
);
816 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
818 struct drm_info_node
*node
= m
->private;
819 struct drm_device
*dev
= node
->minor
->dev
;
820 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
823 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
827 seq_printf(m
, "Reserved fences = %d\n", dev_priv
->fence_reg_start
);
828 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
829 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
830 struct drm_i915_gem_object
*obj
= dev_priv
->fence_regs
[i
].obj
;
832 seq_printf(m
, "Fence %d, pin count = %d, object = ",
833 i
, dev_priv
->fence_regs
[i
].pin_count
);
835 seq_puts(m
, "unused");
837 describe_obj(m
, obj
);
841 mutex_unlock(&dev
->struct_mutex
);
845 static int i915_hws_info(struct seq_file
*m
, void *data
)
847 struct drm_info_node
*node
= m
->private;
848 struct drm_device
*dev
= node
->minor
->dev
;
849 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
850 struct intel_engine_cs
*ring
;
854 ring
= &dev_priv
->ring
[(uintptr_t)node
->info_ent
->data
];
855 hws
= ring
->status_page
.page_addr
;
859 for (i
= 0; i
< 4096 / sizeof(u32
) / 4; i
+= 4) {
860 seq_printf(m
, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
862 hws
[i
], hws
[i
+ 1], hws
[i
+ 2], hws
[i
+ 3]);
868 i915_error_state_write(struct file
*filp
,
869 const char __user
*ubuf
,
873 struct i915_error_state_file_priv
*error_priv
= filp
->private_data
;
874 struct drm_device
*dev
= error_priv
->dev
;
877 DRM_DEBUG_DRIVER("Resetting error state\n");
879 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
883 i915_destroy_error_state(dev
);
884 mutex_unlock(&dev
->struct_mutex
);
889 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
891 struct drm_device
*dev
= inode
->i_private
;
892 struct i915_error_state_file_priv
*error_priv
;
894 error_priv
= kzalloc(sizeof(*error_priv
), GFP_KERNEL
);
898 error_priv
->dev
= dev
;
900 i915_error_state_get(dev
, error_priv
);
902 file
->private_data
= error_priv
;
907 static int i915_error_state_release(struct inode
*inode
, struct file
*file
)
909 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
911 i915_error_state_put(error_priv
);
917 static ssize_t
i915_error_state_read(struct file
*file
, char __user
*userbuf
,
918 size_t count
, loff_t
*pos
)
920 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
921 struct drm_i915_error_state_buf error_str
;
923 ssize_t ret_count
= 0;
926 ret
= i915_error_state_buf_init(&error_str
, count
, *pos
);
930 ret
= i915_error_state_to_str(&error_str
, error_priv
);
934 ret_count
= simple_read_from_buffer(userbuf
, count
, &tmp_pos
,
941 *pos
= error_str
.start
+ ret_count
;
943 i915_error_state_buf_release(&error_str
);
944 return ret
?: ret_count
;
947 static const struct file_operations i915_error_state_fops
= {
948 .owner
= THIS_MODULE
,
949 .open
= i915_error_state_open
,
950 .read
= i915_error_state_read
,
951 .write
= i915_error_state_write
,
952 .llseek
= default_llseek
,
953 .release
= i915_error_state_release
,
957 i915_next_seqno_get(void *data
, u64
*val
)
959 struct drm_device
*dev
= data
;
960 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
963 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
967 *val
= dev_priv
->next_seqno
;
968 mutex_unlock(&dev
->struct_mutex
);
974 i915_next_seqno_set(void *data
, u64 val
)
976 struct drm_device
*dev
= data
;
979 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
983 ret
= i915_gem_set_seqno(dev
, val
);
984 mutex_unlock(&dev
->struct_mutex
);
989 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
990 i915_next_seqno_get
, i915_next_seqno_set
,
993 static int i915_rstdby_delays(struct seq_file
*m
, void *unused
)
995 struct drm_info_node
*node
= m
->private;
996 struct drm_device
*dev
= node
->minor
->dev
;
997 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1001 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1004 intel_runtime_pm_get(dev_priv
);
1006 crstanddelay
= I915_READ16(CRSTANDVID
);
1008 intel_runtime_pm_put(dev_priv
);
1009 mutex_unlock(&dev
->struct_mutex
);
1011 seq_printf(m
, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay
>> 8) & 0x3f, (crstanddelay
& 0x3f));
1016 static int i915_frequency_info(struct seq_file
*m
, void *unused
)
1018 struct drm_info_node
*node
= m
->private;
1019 struct drm_device
*dev
= node
->minor
->dev
;
1020 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1023 intel_runtime_pm_get(dev_priv
);
1025 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
1028 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
1029 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
1031 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
1032 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
1033 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
1035 seq_printf(m
, "Current P-state: %d\n",
1036 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
1037 } else if (IS_GEN6(dev
) || (IS_GEN7(dev
) && !IS_VALLEYVIEW(dev
)) ||
1038 IS_BROADWELL(dev
)) {
1039 u32 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
1040 u32 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
1041 u32 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
1042 u32 rpmodectl
, rpinclimit
, rpdeclimit
;
1043 u32 rpstat
, cagf
, reqf
;
1044 u32 rpupei
, rpcurup
, rpprevup
;
1045 u32 rpdownei
, rpcurdown
, rpprevdown
;
1048 /* RPSTAT1 is in the GT power well */
1049 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1053 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
1055 reqf
= I915_READ(GEN6_RPNSWREQ
);
1056 reqf
&= ~GEN6_TURBO_DISABLE
;
1057 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1061 reqf
*= GT_FREQUENCY_MULTIPLIER
;
1063 rpmodectl
= I915_READ(GEN6_RP_CONTROL
);
1064 rpinclimit
= I915_READ(GEN6_RP_UP_THRESHOLD
);
1065 rpdeclimit
= I915_READ(GEN6_RP_DOWN_THRESHOLD
);
1067 rpstat
= I915_READ(GEN6_RPSTAT1
);
1068 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
);
1069 rpcurup
= I915_READ(GEN6_RP_CUR_UP
);
1070 rpprevup
= I915_READ(GEN6_RP_PREV_UP
);
1071 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
);
1072 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
);
1073 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
);
1074 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1075 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
1077 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
1078 cagf
*= GT_FREQUENCY_MULTIPLIER
;
1080 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
1081 mutex_unlock(&dev
->struct_mutex
);
1083 seq_printf(m
, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1084 I915_READ(GEN6_PMIER
),
1085 I915_READ(GEN6_PMIMR
),
1086 I915_READ(GEN6_PMISR
),
1087 I915_READ(GEN6_PMIIR
),
1088 I915_READ(GEN6_PMINTRMSK
));
1089 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
1090 seq_printf(m
, "Render p-state ratio: %d\n",
1091 (gt_perf_status
& 0xff00) >> 8);
1092 seq_printf(m
, "Render p-state VID: %d\n",
1093 gt_perf_status
& 0xff);
1094 seq_printf(m
, "Render p-state limit: %d\n",
1095 rp_state_limits
& 0xff);
1096 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
1097 seq_printf(m
, "RPMODECTL: 0x%08x\n", rpmodectl
);
1098 seq_printf(m
, "RPINCLIMIT: 0x%08x\n", rpinclimit
);
1099 seq_printf(m
, "RPDECLIMIT: 0x%08x\n", rpdeclimit
);
1100 seq_printf(m
, "RPNSWREQ: %dMHz\n", reqf
);
1101 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
1102 seq_printf(m
, "RP CUR UP EI: %dus\n", rpupei
&
1103 GEN6_CURICONT_MASK
);
1104 seq_printf(m
, "RP CUR UP: %dus\n", rpcurup
&
1105 GEN6_CURBSYTAVG_MASK
);
1106 seq_printf(m
, "RP PREV UP: %dus\n", rpprevup
&
1107 GEN6_CURBSYTAVG_MASK
);
1108 seq_printf(m
, "RP CUR DOWN EI: %dus\n", rpdownei
&
1110 seq_printf(m
, "RP CUR DOWN: %dus\n", rpcurdown
&
1111 GEN6_CURBSYTAVG_MASK
);
1112 seq_printf(m
, "RP PREV DOWN: %dus\n", rpprevdown
&
1113 GEN6_CURBSYTAVG_MASK
);
1115 max_freq
= (rp_state_cap
& 0xff0000) >> 16;
1116 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
1117 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1119 max_freq
= (rp_state_cap
& 0xff00) >> 8;
1120 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
1121 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1123 max_freq
= rp_state_cap
& 0xff;
1124 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
1125 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1127 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
1128 dev_priv
->rps
.max_freq
* GT_FREQUENCY_MULTIPLIER
);
1129 } else if (IS_VALLEYVIEW(dev
)) {
1132 mutex_lock(&dev_priv
->rps
.hw_lock
);
1133 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
1134 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
1135 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
1137 val
= valleyview_rps_max_freq(dev_priv
);
1138 seq_printf(m
, "max GPU freq: %d MHz\n",
1139 vlv_gpu_freq(dev_priv
, val
));
1141 val
= valleyview_rps_min_freq(dev_priv
);
1142 seq_printf(m
, "min GPU freq: %d MHz\n",
1143 vlv_gpu_freq(dev_priv
, val
));
1145 seq_printf(m
, "current GPU freq: %d MHz\n",
1146 vlv_gpu_freq(dev_priv
, (freq_sts
>> 8) & 0xff));
1147 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1149 seq_puts(m
, "no P-state info available\n");
1153 intel_runtime_pm_put(dev_priv
);
1157 static int i915_delayfreq_table(struct seq_file
*m
, void *unused
)
1159 struct drm_info_node
*node
= m
->private;
1160 struct drm_device
*dev
= node
->minor
->dev
;
1161 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1165 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1168 intel_runtime_pm_get(dev_priv
);
1170 for (i
= 0; i
< 16; i
++) {
1171 delayfreq
= I915_READ(PXVFREQ_BASE
+ i
* 4);
1172 seq_printf(m
, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i
, delayfreq
,
1173 (delayfreq
& PXVFREQ_PX_MASK
) >> PXVFREQ_PX_SHIFT
);
1176 intel_runtime_pm_put(dev_priv
);
1178 mutex_unlock(&dev
->struct_mutex
);
1183 static inline int MAP_TO_MV(int map
)
1185 return 1250 - (map
* 25);
1188 static int i915_inttoext_table(struct seq_file
*m
, void *unused
)
1190 struct drm_info_node
*node
= m
->private;
1191 struct drm_device
*dev
= node
->minor
->dev
;
1192 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1196 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1199 intel_runtime_pm_get(dev_priv
);
1201 for (i
= 1; i
<= 32; i
++) {
1202 inttoext
= I915_READ(INTTOEXT_BASE_ILK
+ i
* 4);
1203 seq_printf(m
, "INTTOEXT%02d: 0x%08x\n", i
, inttoext
);
1206 intel_runtime_pm_put(dev_priv
);
1207 mutex_unlock(&dev
->struct_mutex
);
1212 static int ironlake_drpc_info(struct seq_file
*m
)
1214 struct drm_info_node
*node
= m
->private;
1215 struct drm_device
*dev
= node
->minor
->dev
;
1216 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1217 u32 rgvmodectl
, rstdbyctl
;
1221 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1224 intel_runtime_pm_get(dev_priv
);
1226 rgvmodectl
= I915_READ(MEMMODECTL
);
1227 rstdbyctl
= I915_READ(RSTDBYCTL
);
1228 crstandvid
= I915_READ16(CRSTANDVID
);
1230 intel_runtime_pm_put(dev_priv
);
1231 mutex_unlock(&dev
->struct_mutex
);
1233 seq_printf(m
, "HD boost: %s\n", (rgvmodectl
& MEMMODE_BOOST_EN
) ?
1235 seq_printf(m
, "Boost freq: %d\n",
1236 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
1237 MEMMODE_BOOST_FREQ_SHIFT
);
1238 seq_printf(m
, "HW control enabled: %s\n",
1239 rgvmodectl
& MEMMODE_HWIDLE_EN
? "yes" : "no");
1240 seq_printf(m
, "SW control enabled: %s\n",
1241 rgvmodectl
& MEMMODE_SWMODE_EN
? "yes" : "no");
1242 seq_printf(m
, "Gated voltage change: %s\n",
1243 rgvmodectl
& MEMMODE_RCLK_GATE
? "yes" : "no");
1244 seq_printf(m
, "Starting frequency: P%d\n",
1245 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
1246 seq_printf(m
, "Max P-state: P%d\n",
1247 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
1248 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
1249 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
1250 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
1251 seq_printf(m
, "Render standby enabled: %s\n",
1252 (rstdbyctl
& RCX_SW_EXIT
) ? "no" : "yes");
1253 seq_puts(m
, "Current RS state: ");
1254 switch (rstdbyctl
& RSX_STATUS_MASK
) {
1256 seq_puts(m
, "on\n");
1258 case RSX_STATUS_RC1
:
1259 seq_puts(m
, "RC1\n");
1261 case RSX_STATUS_RC1E
:
1262 seq_puts(m
, "RC1E\n");
1264 case RSX_STATUS_RS1
:
1265 seq_puts(m
, "RS1\n");
1267 case RSX_STATUS_RS2
:
1268 seq_puts(m
, "RS2 (RC6)\n");
1270 case RSX_STATUS_RS3
:
1271 seq_puts(m
, "RC3 (RC6+)\n");
1274 seq_puts(m
, "unknown\n");
1281 static int vlv_drpc_info(struct seq_file
*m
)
1284 struct drm_info_node
*node
= m
->private;
1285 struct drm_device
*dev
= node
->minor
->dev
;
1286 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1287 u32 rpmodectl1
, rcctl1
;
1288 unsigned fw_rendercount
= 0, fw_mediacount
= 0;
1290 intel_runtime_pm_get(dev_priv
);
1292 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1293 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1295 intel_runtime_pm_put(dev_priv
);
1297 seq_printf(m
, "Video Turbo Mode: %s\n",
1298 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1299 seq_printf(m
, "Turbo enabled: %s\n",
1300 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1301 seq_printf(m
, "HW control enabled: %s\n",
1302 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1303 seq_printf(m
, "SW control enabled: %s\n",
1304 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1305 GEN6_RP_MEDIA_SW_MODE
));
1306 seq_printf(m
, "RC6 Enabled: %s\n",
1307 yesno(rcctl1
& (GEN7_RC_CTL_TO_MODE
|
1308 GEN6_RC_CTL_EI_MODE(1))));
1309 seq_printf(m
, "Render Power Well: %s\n",
1310 (I915_READ(VLV_GTLC_PW_STATUS
) &
1311 VLV_GTLC_PW_RENDER_STATUS_MASK
) ? "Up" : "Down");
1312 seq_printf(m
, "Media Power Well: %s\n",
1313 (I915_READ(VLV_GTLC_PW_STATUS
) &
1314 VLV_GTLC_PW_MEDIA_STATUS_MASK
) ? "Up" : "Down");
1316 seq_printf(m
, "Render RC6 residency since boot: %u\n",
1317 I915_READ(VLV_GT_RENDER_RC6
));
1318 seq_printf(m
, "Media RC6 residency since boot: %u\n",
1319 I915_READ(VLV_GT_MEDIA_RC6
));
1321 spin_lock_irq(&dev_priv
->uncore
.lock
);
1322 fw_rendercount
= dev_priv
->uncore
.fw_rendercount
;
1323 fw_mediacount
= dev_priv
->uncore
.fw_mediacount
;
1324 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1326 seq_printf(m
, "Forcewake Render Count = %u\n", fw_rendercount
);
1327 seq_printf(m
, "Forcewake Media Count = %u\n", fw_mediacount
);
1334 static int gen6_drpc_info(struct seq_file
*m
)
1337 struct drm_info_node
*node
= m
->private;
1338 struct drm_device
*dev
= node
->minor
->dev
;
1339 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1340 u32 rpmodectl1
, gt_core_status
, rcctl1
, rc6vids
= 0;
1341 unsigned forcewake_count
;
1344 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1347 intel_runtime_pm_get(dev_priv
);
1349 spin_lock_irq(&dev_priv
->uncore
.lock
);
1350 forcewake_count
= dev_priv
->uncore
.forcewake_count
;
1351 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1353 if (forcewake_count
) {
1354 seq_puts(m
, "RC information inaccurate because somebody "
1355 "holds a forcewake reference \n");
1357 /* NB: we cannot use forcewake, else we read the wrong values */
1358 while (count
++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK
) & 1))
1360 seq_printf(m
, "RC information accurate: %s\n", yesno(count
< 51));
1363 gt_core_status
= readl(dev_priv
->regs
+ GEN6_GT_CORE_STATUS
);
1364 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4, true);
1366 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1367 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1368 mutex_unlock(&dev
->struct_mutex
);
1369 mutex_lock(&dev_priv
->rps
.hw_lock
);
1370 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
1371 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1373 intel_runtime_pm_put(dev_priv
);
1375 seq_printf(m
, "Video Turbo Mode: %s\n",
1376 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1377 seq_printf(m
, "HW control enabled: %s\n",
1378 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1379 seq_printf(m
, "SW control enabled: %s\n",
1380 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1381 GEN6_RP_MEDIA_SW_MODE
));
1382 seq_printf(m
, "RC1e Enabled: %s\n",
1383 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1384 seq_printf(m
, "RC6 Enabled: %s\n",
1385 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1386 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1387 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1388 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1389 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1390 seq_puts(m
, "Current RC state: ");
1391 switch (gt_core_status
& GEN6_RCn_MASK
) {
1393 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1394 seq_puts(m
, "Core Power Down\n");
1396 seq_puts(m
, "on\n");
1399 seq_puts(m
, "RC3\n");
1402 seq_puts(m
, "RC6\n");
1405 seq_puts(m
, "RC7\n");
1408 seq_puts(m
, "Unknown\n");
1412 seq_printf(m
, "Core Power Down: %s\n",
1413 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1415 /* Not exactly sure what this is */
1416 seq_printf(m
, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1417 I915_READ(GEN6_GT_GFX_RC6_LOCKED
));
1418 seq_printf(m
, "RC6 residency since boot: %u\n",
1419 I915_READ(GEN6_GT_GFX_RC6
));
1420 seq_printf(m
, "RC6+ residency since boot: %u\n",
1421 I915_READ(GEN6_GT_GFX_RC6p
));
1422 seq_printf(m
, "RC6++ residency since boot: %u\n",
1423 I915_READ(GEN6_GT_GFX_RC6pp
));
1425 seq_printf(m
, "RC6 voltage: %dmV\n",
1426 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1427 seq_printf(m
, "RC6+ voltage: %dmV\n",
1428 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1429 seq_printf(m
, "RC6++ voltage: %dmV\n",
1430 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1434 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1436 struct drm_info_node
*node
= m
->private;
1437 struct drm_device
*dev
= node
->minor
->dev
;
1439 if (IS_VALLEYVIEW(dev
))
1440 return vlv_drpc_info(m
);
1441 else if (IS_GEN6(dev
) || IS_GEN7(dev
))
1442 return gen6_drpc_info(m
);
1444 return ironlake_drpc_info(m
);
1447 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1449 struct drm_info_node
*node
= m
->private;
1450 struct drm_device
*dev
= node
->minor
->dev
;
1451 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1453 if (!HAS_FBC(dev
)) {
1454 seq_puts(m
, "FBC unsupported on this chipset\n");
1458 intel_runtime_pm_get(dev_priv
);
1460 if (intel_fbc_enabled(dev
)) {
1461 seq_puts(m
, "FBC enabled\n");
1463 seq_puts(m
, "FBC disabled: ");
1464 switch (dev_priv
->fbc
.no_fbc_reason
) {
1466 seq_puts(m
, "FBC actived, but currently disabled in hardware");
1468 case FBC_UNSUPPORTED
:
1469 seq_puts(m
, "unsupported by this chipset");
1472 seq_puts(m
, "no outputs");
1474 case FBC_STOLEN_TOO_SMALL
:
1475 seq_puts(m
, "not enough stolen memory");
1477 case FBC_UNSUPPORTED_MODE
:
1478 seq_puts(m
, "mode not supported");
1480 case FBC_MODE_TOO_LARGE
:
1481 seq_puts(m
, "mode too large");
1484 seq_puts(m
, "FBC unsupported on plane");
1487 seq_puts(m
, "scanout buffer not tiled");
1489 case FBC_MULTIPLE_PIPES
:
1490 seq_puts(m
, "multiple pipes are enabled");
1492 case FBC_MODULE_PARAM
:
1493 seq_puts(m
, "disabled per module param (default off)");
1495 case FBC_CHIP_DEFAULT
:
1496 seq_puts(m
, "disabled per chip default");
1499 seq_puts(m
, "unknown reason");
1504 intel_runtime_pm_put(dev_priv
);
1509 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1511 struct drm_info_node
*node
= m
->private;
1512 struct drm_device
*dev
= node
->minor
->dev
;
1513 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1515 if (!HAS_IPS(dev
)) {
1516 seq_puts(m
, "not supported\n");
1520 intel_runtime_pm_get(dev_priv
);
1522 if (IS_BROADWELL(dev
) || I915_READ(IPS_CTL
) & IPS_ENABLE
)
1523 seq_puts(m
, "enabled\n");
1525 seq_puts(m
, "disabled\n");
1527 intel_runtime_pm_put(dev_priv
);
1532 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1534 struct drm_info_node
*node
= m
->private;
1535 struct drm_device
*dev
= node
->minor
->dev
;
1536 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1537 bool sr_enabled
= false;
1539 intel_runtime_pm_get(dev_priv
);
1541 if (HAS_PCH_SPLIT(dev
))
1542 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1543 else if (IS_CRESTLINE(dev
) || IS_I945G(dev
) || IS_I945GM(dev
))
1544 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1545 else if (IS_I915GM(dev
))
1546 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1547 else if (IS_PINEVIEW(dev
))
1548 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1550 intel_runtime_pm_put(dev_priv
);
1552 seq_printf(m
, "self-refresh: %s\n",
1553 sr_enabled
? "enabled" : "disabled");
1558 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1560 struct drm_info_node
*node
= m
->private;
1561 struct drm_device
*dev
= node
->minor
->dev
;
1562 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1563 unsigned long temp
, chipset
, gfx
;
1569 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1573 temp
= i915_mch_val(dev_priv
);
1574 chipset
= i915_chipset_val(dev_priv
);
1575 gfx
= i915_gfx_val(dev_priv
);
1576 mutex_unlock(&dev
->struct_mutex
);
1578 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1579 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1580 seq_printf(m
, "GFX power: %ld\n", gfx
);
1581 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1586 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1588 struct drm_info_node
*node
= m
->private;
1589 struct drm_device
*dev
= node
->minor
->dev
;
1590 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1592 int gpu_freq
, ia_freq
;
1594 if (!(IS_GEN6(dev
) || IS_GEN7(dev
))) {
1595 seq_puts(m
, "unsupported on this chipset\n");
1599 intel_runtime_pm_get(dev_priv
);
1601 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
1603 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1607 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1609 for (gpu_freq
= dev_priv
->rps
.min_freq_softlimit
;
1610 gpu_freq
<= dev_priv
->rps
.max_freq_softlimit
;
1613 sandybridge_pcode_read(dev_priv
,
1614 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1616 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1617 gpu_freq
* GT_FREQUENCY_MULTIPLIER
,
1618 ((ia_freq
>> 0) & 0xff) * 100,
1619 ((ia_freq
>> 8) & 0xff) * 100);
1622 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1625 intel_runtime_pm_put(dev_priv
);
1629 static int i915_gfxec(struct seq_file
*m
, void *unused
)
1631 struct drm_info_node
*node
= m
->private;
1632 struct drm_device
*dev
= node
->minor
->dev
;
1633 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1636 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1639 intel_runtime_pm_get(dev_priv
);
1641 seq_printf(m
, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1642 intel_runtime_pm_put(dev_priv
);
1644 mutex_unlock(&dev
->struct_mutex
);
1649 static int i915_opregion(struct seq_file
*m
, void *unused
)
1651 struct drm_info_node
*node
= m
->private;
1652 struct drm_device
*dev
= node
->minor
->dev
;
1653 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1654 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1655 void *data
= kmalloc(OPREGION_SIZE
, GFP_KERNEL
);
1661 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1665 if (opregion
->header
) {
1666 memcpy_fromio(data
, opregion
->header
, OPREGION_SIZE
);
1667 seq_write(m
, data
, OPREGION_SIZE
);
1670 mutex_unlock(&dev
->struct_mutex
);
1677 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1679 struct drm_info_node
*node
= m
->private;
1680 struct drm_device
*dev
= node
->minor
->dev
;
1681 struct intel_fbdev
*ifbdev
= NULL
;
1682 struct intel_framebuffer
*fb
;
1684 #ifdef CONFIG_DRM_I915_FBDEV
1685 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1687 ifbdev
= dev_priv
->fbdev
;
1688 fb
= to_intel_framebuffer(ifbdev
->helper
.fb
);
1690 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1694 fb
->base
.bits_per_pixel
,
1695 atomic_read(&fb
->base
.refcount
.refcount
));
1696 describe_obj(m
, fb
->obj
);
1700 mutex_lock(&dev
->mode_config
.fb_lock
);
1701 list_for_each_entry(fb
, &dev
->mode_config
.fb_list
, base
.head
) {
1702 if (ifbdev
&& &fb
->base
== ifbdev
->helper
.fb
)
1705 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1709 fb
->base
.bits_per_pixel
,
1710 atomic_read(&fb
->base
.refcount
.refcount
));
1711 describe_obj(m
, fb
->obj
);
1714 mutex_unlock(&dev
->mode_config
.fb_lock
);
1719 static int i915_context_status(struct seq_file
*m
, void *unused
)
1721 struct drm_info_node
*node
= m
->private;
1722 struct drm_device
*dev
= node
->minor
->dev
;
1723 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1724 struct intel_engine_cs
*ring
;
1725 struct intel_context
*ctx
;
1728 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1732 if (dev_priv
->ips
.pwrctx
) {
1733 seq_puts(m
, "power context ");
1734 describe_obj(m
, dev_priv
->ips
.pwrctx
);
1738 if (dev_priv
->ips
.renderctx
) {
1739 seq_puts(m
, "render context ");
1740 describe_obj(m
, dev_priv
->ips
.renderctx
);
1744 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
) {
1745 if (ctx
->obj
== NULL
)
1748 seq_puts(m
, "HW context ");
1749 describe_ctx(m
, ctx
);
1750 for_each_ring(ring
, dev_priv
, i
)
1751 if (ring
->default_context
== ctx
)
1752 seq_printf(m
, "(default context %s) ", ring
->name
);
1754 describe_obj(m
, ctx
->obj
);
1758 mutex_unlock(&dev
->struct_mutex
);
1763 static int i915_gen6_forcewake_count_info(struct seq_file
*m
, void *data
)
1765 struct drm_info_node
*node
= m
->private;
1766 struct drm_device
*dev
= node
->minor
->dev
;
1767 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1768 unsigned forcewake_count
= 0, fw_rendercount
= 0, fw_mediacount
= 0;
1770 spin_lock_irq(&dev_priv
->uncore
.lock
);
1771 if (IS_VALLEYVIEW(dev
)) {
1772 fw_rendercount
= dev_priv
->uncore
.fw_rendercount
;
1773 fw_mediacount
= dev_priv
->uncore
.fw_mediacount
;
1775 forcewake_count
= dev_priv
->uncore
.forcewake_count
;
1776 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1778 if (IS_VALLEYVIEW(dev
)) {
1779 seq_printf(m
, "fw_rendercount = %u\n", fw_rendercount
);
1780 seq_printf(m
, "fw_mediacount = %u\n", fw_mediacount
);
1782 seq_printf(m
, "forcewake count = %u\n", forcewake_count
);
1787 static const char *swizzle_string(unsigned swizzle
)
1790 case I915_BIT_6_SWIZZLE_NONE
:
1792 case I915_BIT_6_SWIZZLE_9
:
1794 case I915_BIT_6_SWIZZLE_9_10
:
1795 return "bit9/bit10";
1796 case I915_BIT_6_SWIZZLE_9_11
:
1797 return "bit9/bit11";
1798 case I915_BIT_6_SWIZZLE_9_10_11
:
1799 return "bit9/bit10/bit11";
1800 case I915_BIT_6_SWIZZLE_9_17
:
1801 return "bit9/bit17";
1802 case I915_BIT_6_SWIZZLE_9_10_17
:
1803 return "bit9/bit10/bit17";
1804 case I915_BIT_6_SWIZZLE_UNKNOWN
:
1811 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
1813 struct drm_info_node
*node
= m
->private;
1814 struct drm_device
*dev
= node
->minor
->dev
;
1815 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1818 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1821 intel_runtime_pm_get(dev_priv
);
1823 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
1824 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
1825 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
1826 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
1828 if (IS_GEN3(dev
) || IS_GEN4(dev
)) {
1829 seq_printf(m
, "DDC = 0x%08x\n",
1831 seq_printf(m
, "C0DRB3 = 0x%04x\n",
1832 I915_READ16(C0DRB3
));
1833 seq_printf(m
, "C1DRB3 = 0x%04x\n",
1834 I915_READ16(C1DRB3
));
1835 } else if (INTEL_INFO(dev
)->gen
>= 6) {
1836 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
1837 I915_READ(MAD_DIMM_C0
));
1838 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
1839 I915_READ(MAD_DIMM_C1
));
1840 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
1841 I915_READ(MAD_DIMM_C2
));
1842 seq_printf(m
, "TILECTL = 0x%08x\n",
1843 I915_READ(TILECTL
));
1845 seq_printf(m
, "GAMTARBMODE = 0x%08x\n",
1846 I915_READ(GAMTARBMODE
));
1848 seq_printf(m
, "ARB_MODE = 0x%08x\n",
1849 I915_READ(ARB_MODE
));
1850 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
1851 I915_READ(DISP_ARB_CTL
));
1853 intel_runtime_pm_put(dev_priv
);
1854 mutex_unlock(&dev
->struct_mutex
);
1859 static int per_file_ctx(int id
, void *ptr
, void *data
)
1861 struct intel_context
*ctx
= ptr
;
1862 struct seq_file
*m
= data
;
1863 struct i915_hw_ppgtt
*ppgtt
= ctx_to_ppgtt(ctx
);
1865 if (i915_gem_context_is_default(ctx
))
1866 seq_puts(m
, " default context:\n");
1868 seq_printf(m
, " context %d:\n", ctx
->id
);
1869 ppgtt
->debug_dump(ppgtt
, m
);
1874 static void gen8_ppgtt_info(struct seq_file
*m
, struct drm_device
*dev
)
1876 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1877 struct intel_engine_cs
*ring
;
1878 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
1884 seq_printf(m
, "Page directories: %d\n", ppgtt
->num_pd_pages
);
1885 seq_printf(m
, "Page tables: %d\n", ppgtt
->num_pd_entries
);
1886 for_each_ring(ring
, dev_priv
, unused
) {
1887 seq_printf(m
, "%s\n", ring
->name
);
1888 for (i
= 0; i
< 4; i
++) {
1889 u32 offset
= 0x270 + i
* 8;
1890 u64 pdp
= I915_READ(ring
->mmio_base
+ offset
+ 4);
1892 pdp
|= I915_READ(ring
->mmio_base
+ offset
);
1893 seq_printf(m
, "\tPDP%d 0x%016llx\n", i
, pdp
);
1898 static void gen6_ppgtt_info(struct seq_file
*m
, struct drm_device
*dev
)
1900 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1901 struct intel_engine_cs
*ring
;
1902 struct drm_file
*file
;
1905 if (INTEL_INFO(dev
)->gen
== 6)
1906 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
1908 for_each_ring(ring
, dev_priv
, i
) {
1909 seq_printf(m
, "%s\n", ring
->name
);
1910 if (INTEL_INFO(dev
)->gen
== 7)
1911 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring
)));
1912 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring
)));
1913 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring
)));
1914 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring
)));
1916 if (dev_priv
->mm
.aliasing_ppgtt
) {
1917 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
1919 seq_puts(m
, "aliasing PPGTT:\n");
1920 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd_offset
);
1922 ppgtt
->debug_dump(ppgtt
, m
);
1926 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
1927 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
1929 seq_printf(m
, "proc: %s\n",
1930 get_pid_task(file
->pid
, PIDTYPE_PID
)->comm
);
1931 idr_for_each(&file_priv
->context_idr
, per_file_ctx
, m
);
1933 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
1936 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
1938 struct drm_info_node
*node
= m
->private;
1939 struct drm_device
*dev
= node
->minor
->dev
;
1940 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1942 int ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1945 intel_runtime_pm_get(dev_priv
);
1947 if (INTEL_INFO(dev
)->gen
>= 8)
1948 gen8_ppgtt_info(m
, dev
);
1949 else if (INTEL_INFO(dev
)->gen
>= 6)
1950 gen6_ppgtt_info(m
, dev
);
1952 intel_runtime_pm_put(dev_priv
);
1953 mutex_unlock(&dev
->struct_mutex
);
1958 static int i915_llc(struct seq_file
*m
, void *data
)
1960 struct drm_info_node
*node
= m
->private;
1961 struct drm_device
*dev
= node
->minor
->dev
;
1962 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1964 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1965 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev
)));
1966 seq_printf(m
, "eLLC: %zuMB\n", dev_priv
->ellc_size
);
1971 static int i915_edp_psr_status(struct seq_file
*m
, void *data
)
1973 struct drm_info_node
*node
= m
->private;
1974 struct drm_device
*dev
= node
->minor
->dev
;
1975 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1977 bool enabled
= false;
1979 intel_runtime_pm_get(dev_priv
);
1981 seq_printf(m
, "Sink_Support: %s\n", yesno(dev_priv
->psr
.sink_support
));
1982 seq_printf(m
, "Source_OK: %s\n", yesno(dev_priv
->psr
.source_ok
));
1983 seq_printf(m
, "Enabled: %s\n", yesno(dev_priv
->psr
.enabled
));
1984 seq_printf(m
, "Active: %s\n", yesno(dev_priv
->psr
.active
));
1986 enabled
= HAS_PSR(dev
) &&
1987 I915_READ(EDP_PSR_CTL(dev
)) & EDP_PSR_ENABLE
;
1988 seq_printf(m
, "HW Enabled & Active bit: %s\n", yesno(enabled
));
1991 psrperf
= I915_READ(EDP_PSR_PERF_CNT(dev
)) &
1992 EDP_PSR_PERF_CNT_MASK
;
1993 seq_printf(m
, "Performance_Counter: %u\n", psrperf
);
1995 intel_runtime_pm_put(dev_priv
);
1999 static int i915_sink_crc(struct seq_file
*m
, void *data
)
2001 struct drm_info_node
*node
= m
->private;
2002 struct drm_device
*dev
= node
->minor
->dev
;
2003 struct intel_encoder
*encoder
;
2004 struct intel_connector
*connector
;
2005 struct intel_dp
*intel_dp
= NULL
;
2009 drm_modeset_lock_all(dev
);
2010 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
,
2013 if (connector
->base
.dpms
!= DRM_MODE_DPMS_ON
)
2016 if (!connector
->base
.encoder
)
2019 encoder
= to_intel_encoder(connector
->base
.encoder
);
2020 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
2023 intel_dp
= enc_to_intel_dp(&encoder
->base
);
2025 ret
= intel_dp_sink_crc(intel_dp
, crc
);
2029 seq_printf(m
, "%02x%02x%02x%02x%02x%02x\n",
2030 crc
[0], crc
[1], crc
[2],
2031 crc
[3], crc
[4], crc
[5]);
2036 drm_modeset_unlock_all(dev
);
2040 static int i915_energy_uJ(struct seq_file
*m
, void *data
)
2042 struct drm_info_node
*node
= m
->private;
2043 struct drm_device
*dev
= node
->minor
->dev
;
2044 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2048 if (INTEL_INFO(dev
)->gen
< 6)
2051 intel_runtime_pm_get(dev_priv
);
2053 rdmsrl(MSR_RAPL_POWER_UNIT
, power
);
2054 power
= (power
& 0x1f00) >> 8;
2055 units
= 1000000 / (1 << power
); /* convert to uJ */
2056 power
= I915_READ(MCH_SECP_NRG_STTS
);
2059 intel_runtime_pm_put(dev_priv
);
2061 seq_printf(m
, "%llu", (long long unsigned)power
);
2066 static int i915_pc8_status(struct seq_file
*m
, void *unused
)
2068 struct drm_info_node
*node
= m
->private;
2069 struct drm_device
*dev
= node
->minor
->dev
;
2070 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2072 if (!IS_HASWELL(dev
) && !IS_BROADWELL(dev
)) {
2073 seq_puts(m
, "not supported\n");
2077 seq_printf(m
, "GPU idle: %s\n", yesno(!dev_priv
->mm
.busy
));
2078 seq_printf(m
, "IRQs disabled: %s\n",
2079 yesno(dev_priv
->pm
.irqs_disabled
));
2084 static const char *power_domain_str(enum intel_display_power_domain domain
)
2087 case POWER_DOMAIN_PIPE_A
:
2089 case POWER_DOMAIN_PIPE_B
:
2091 case POWER_DOMAIN_PIPE_C
:
2093 case POWER_DOMAIN_PIPE_A_PANEL_FITTER
:
2094 return "PIPE_A_PANEL_FITTER";
2095 case POWER_DOMAIN_PIPE_B_PANEL_FITTER
:
2096 return "PIPE_B_PANEL_FITTER";
2097 case POWER_DOMAIN_PIPE_C_PANEL_FITTER
:
2098 return "PIPE_C_PANEL_FITTER";
2099 case POWER_DOMAIN_TRANSCODER_A
:
2100 return "TRANSCODER_A";
2101 case POWER_DOMAIN_TRANSCODER_B
:
2102 return "TRANSCODER_B";
2103 case POWER_DOMAIN_TRANSCODER_C
:
2104 return "TRANSCODER_C";
2105 case POWER_DOMAIN_TRANSCODER_EDP
:
2106 return "TRANSCODER_EDP";
2107 case POWER_DOMAIN_PORT_DDI_A_2_LANES
:
2108 return "PORT_DDI_A_2_LANES";
2109 case POWER_DOMAIN_PORT_DDI_A_4_LANES
:
2110 return "PORT_DDI_A_4_LANES";
2111 case POWER_DOMAIN_PORT_DDI_B_2_LANES
:
2112 return "PORT_DDI_B_2_LANES";
2113 case POWER_DOMAIN_PORT_DDI_B_4_LANES
:
2114 return "PORT_DDI_B_4_LANES";
2115 case POWER_DOMAIN_PORT_DDI_C_2_LANES
:
2116 return "PORT_DDI_C_2_LANES";
2117 case POWER_DOMAIN_PORT_DDI_C_4_LANES
:
2118 return "PORT_DDI_C_4_LANES";
2119 case POWER_DOMAIN_PORT_DDI_D_2_LANES
:
2120 return "PORT_DDI_D_2_LANES";
2121 case POWER_DOMAIN_PORT_DDI_D_4_LANES
:
2122 return "PORT_DDI_D_4_LANES";
2123 case POWER_DOMAIN_PORT_DSI
:
2125 case POWER_DOMAIN_PORT_CRT
:
2127 case POWER_DOMAIN_PORT_OTHER
:
2128 return "PORT_OTHER";
2129 case POWER_DOMAIN_VGA
:
2131 case POWER_DOMAIN_AUDIO
:
2133 case POWER_DOMAIN_INIT
:
2141 static int i915_power_domain_info(struct seq_file
*m
, void *unused
)
2143 struct drm_info_node
*node
= m
->private;
2144 struct drm_device
*dev
= node
->minor
->dev
;
2145 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2146 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
2149 mutex_lock(&power_domains
->lock
);
2151 seq_printf(m
, "%-25s %s\n", "Power well/domain", "Use count");
2152 for (i
= 0; i
< power_domains
->power_well_count
; i
++) {
2153 struct i915_power_well
*power_well
;
2154 enum intel_display_power_domain power_domain
;
2156 power_well
= &power_domains
->power_wells
[i
];
2157 seq_printf(m
, "%-25s %d\n", power_well
->name
,
2160 for (power_domain
= 0; power_domain
< POWER_DOMAIN_NUM
;
2162 if (!(BIT(power_domain
) & power_well
->domains
))
2165 seq_printf(m
, " %-23s %d\n",
2166 power_domain_str(power_domain
),
2167 power_domains
->domain_use_count
[power_domain
]);
2171 mutex_unlock(&power_domains
->lock
);
2176 static void intel_seq_print_mode(struct seq_file
*m
, int tabs
,
2177 struct drm_display_mode
*mode
)
2181 for (i
= 0; i
< tabs
; i
++)
2184 seq_printf(m
, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2185 mode
->base
.id
, mode
->name
,
2186 mode
->vrefresh
, mode
->clock
,
2187 mode
->hdisplay
, mode
->hsync_start
,
2188 mode
->hsync_end
, mode
->htotal
,
2189 mode
->vdisplay
, mode
->vsync_start
,
2190 mode
->vsync_end
, mode
->vtotal
,
2191 mode
->type
, mode
->flags
);
2194 static void intel_encoder_info(struct seq_file
*m
,
2195 struct intel_crtc
*intel_crtc
,
2196 struct intel_encoder
*intel_encoder
)
2198 struct drm_info_node
*node
= m
->private;
2199 struct drm_device
*dev
= node
->minor
->dev
;
2200 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2201 struct intel_connector
*intel_connector
;
2202 struct drm_encoder
*encoder
;
2204 encoder
= &intel_encoder
->base
;
2205 seq_printf(m
, "\tencoder %d: type: %s, connectors:\n",
2206 encoder
->base
.id
, encoder
->name
);
2207 for_each_connector_on_encoder(dev
, encoder
, intel_connector
) {
2208 struct drm_connector
*connector
= &intel_connector
->base
;
2209 seq_printf(m
, "\t\tconnector %d: type: %s, status: %s",
2212 drm_get_connector_status_name(connector
->status
));
2213 if (connector
->status
== connector_status_connected
) {
2214 struct drm_display_mode
*mode
= &crtc
->mode
;
2215 seq_printf(m
, ", mode:\n");
2216 intel_seq_print_mode(m
, 2, mode
);
2223 static void intel_crtc_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
2225 struct drm_info_node
*node
= m
->private;
2226 struct drm_device
*dev
= node
->minor
->dev
;
2227 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2228 struct intel_encoder
*intel_encoder
;
2230 if (crtc
->primary
->fb
)
2231 seq_printf(m
, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2232 crtc
->primary
->fb
->base
.id
, crtc
->x
, crtc
->y
,
2233 crtc
->primary
->fb
->width
, crtc
->primary
->fb
->height
);
2235 seq_puts(m
, "\tprimary plane disabled\n");
2236 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
)
2237 intel_encoder_info(m
, intel_crtc
, intel_encoder
);
2240 static void intel_panel_info(struct seq_file
*m
, struct intel_panel
*panel
)
2242 struct drm_display_mode
*mode
= panel
->fixed_mode
;
2244 seq_printf(m
, "\tfixed mode:\n");
2245 intel_seq_print_mode(m
, 2, mode
);
2248 static void intel_dp_info(struct seq_file
*m
,
2249 struct intel_connector
*intel_connector
)
2251 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2252 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
2254 seq_printf(m
, "\tDPCD rev: %x\n", intel_dp
->dpcd
[DP_DPCD_REV
]);
2255 seq_printf(m
, "\taudio support: %s\n", intel_dp
->has_audio
? "yes" :
2257 if (intel_encoder
->type
== INTEL_OUTPUT_EDP
)
2258 intel_panel_info(m
, &intel_connector
->panel
);
2261 static void intel_hdmi_info(struct seq_file
*m
,
2262 struct intel_connector
*intel_connector
)
2264 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2265 struct intel_hdmi
*intel_hdmi
= enc_to_intel_hdmi(&intel_encoder
->base
);
2267 seq_printf(m
, "\taudio support: %s\n", intel_hdmi
->has_audio
? "yes" :
2271 static void intel_lvds_info(struct seq_file
*m
,
2272 struct intel_connector
*intel_connector
)
2274 intel_panel_info(m
, &intel_connector
->panel
);
2277 static void intel_connector_info(struct seq_file
*m
,
2278 struct drm_connector
*connector
)
2280 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
2281 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2282 struct drm_display_mode
*mode
;
2284 seq_printf(m
, "connector %d: type %s, status: %s\n",
2285 connector
->base
.id
, connector
->name
,
2286 drm_get_connector_status_name(connector
->status
));
2287 if (connector
->status
== connector_status_connected
) {
2288 seq_printf(m
, "\tname: %s\n", connector
->display_info
.name
);
2289 seq_printf(m
, "\tphysical dimensions: %dx%dmm\n",
2290 connector
->display_info
.width_mm
,
2291 connector
->display_info
.height_mm
);
2292 seq_printf(m
, "\tsubpixel order: %s\n",
2293 drm_get_subpixel_order_name(connector
->display_info
.subpixel_order
));
2294 seq_printf(m
, "\tCEA rev: %d\n",
2295 connector
->display_info
.cea_rev
);
2297 if (intel_encoder
->type
== INTEL_OUTPUT_DISPLAYPORT
||
2298 intel_encoder
->type
== INTEL_OUTPUT_EDP
)
2299 intel_dp_info(m
, intel_connector
);
2300 else if (intel_encoder
->type
== INTEL_OUTPUT_HDMI
)
2301 intel_hdmi_info(m
, intel_connector
);
2302 else if (intel_encoder
->type
== INTEL_OUTPUT_LVDS
)
2303 intel_lvds_info(m
, intel_connector
);
2305 seq_printf(m
, "\tmodes:\n");
2306 list_for_each_entry(mode
, &connector
->modes
, head
)
2307 intel_seq_print_mode(m
, 2, mode
);
2310 static bool cursor_active(struct drm_device
*dev
, int pipe
)
2312 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2315 if (IS_845G(dev
) || IS_I865G(dev
))
2316 state
= I915_READ(_CURACNTR
) & CURSOR_ENABLE
;
2318 state
= I915_READ(CURCNTR(pipe
)) & CURSOR_MODE
;
2323 static bool cursor_position(struct drm_device
*dev
, int pipe
, int *x
, int *y
)
2325 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2328 pos
= I915_READ(CURPOS(pipe
));
2330 *x
= (pos
>> CURSOR_X_SHIFT
) & CURSOR_POS_MASK
;
2331 if (pos
& (CURSOR_POS_SIGN
<< CURSOR_X_SHIFT
))
2334 *y
= (pos
>> CURSOR_Y_SHIFT
) & CURSOR_POS_MASK
;
2335 if (pos
& (CURSOR_POS_SIGN
<< CURSOR_Y_SHIFT
))
2338 return cursor_active(dev
, pipe
);
2341 static int i915_display_info(struct seq_file
*m
, void *unused
)
2343 struct drm_info_node
*node
= m
->private;
2344 struct drm_device
*dev
= node
->minor
->dev
;
2345 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2346 struct intel_crtc
*crtc
;
2347 struct drm_connector
*connector
;
2349 intel_runtime_pm_get(dev_priv
);
2350 drm_modeset_lock_all(dev
);
2351 seq_printf(m
, "CRTC info\n");
2352 seq_printf(m
, "---------\n");
2353 for_each_intel_crtc(dev
, crtc
) {
2357 seq_printf(m
, "CRTC %d: pipe: %c, active: %s\n",
2358 crtc
->base
.base
.id
, pipe_name(crtc
->pipe
),
2359 yesno(crtc
->active
));
2361 intel_crtc_info(m
, crtc
);
2363 active
= cursor_position(dev
, crtc
->pipe
, &x
, &y
);
2364 seq_printf(m
, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
2365 yesno(crtc
->cursor_base
),
2366 x
, y
, crtc
->cursor_addr
,
2370 seq_printf(m
, "\tunderrun reporting: cpu=%s pch=%s \n",
2371 yesno(!crtc
->cpu_fifo_underrun_disabled
),
2372 yesno(!crtc
->pch_fifo_underrun_disabled
));
2375 seq_printf(m
, "\n");
2376 seq_printf(m
, "Connector info\n");
2377 seq_printf(m
, "--------------\n");
2378 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
2379 intel_connector_info(m
, connector
);
2381 drm_modeset_unlock_all(dev
);
2382 intel_runtime_pm_put(dev_priv
);
2387 struct pipe_crc_info
{
2389 struct drm_device
*dev
;
2393 static int i915_pipe_crc_open(struct inode
*inode
, struct file
*filep
)
2395 struct pipe_crc_info
*info
= inode
->i_private
;
2396 struct drm_i915_private
*dev_priv
= info
->dev
->dev_private
;
2397 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
2399 if (info
->pipe
>= INTEL_INFO(info
->dev
)->num_pipes
)
2402 spin_lock_irq(&pipe_crc
->lock
);
2404 if (pipe_crc
->opened
) {
2405 spin_unlock_irq(&pipe_crc
->lock
);
2406 return -EBUSY
; /* already open */
2409 pipe_crc
->opened
= true;
2410 filep
->private_data
= inode
->i_private
;
2412 spin_unlock_irq(&pipe_crc
->lock
);
2417 static int i915_pipe_crc_release(struct inode
*inode
, struct file
*filep
)
2419 struct pipe_crc_info
*info
= inode
->i_private
;
2420 struct drm_i915_private
*dev_priv
= info
->dev
->dev_private
;
2421 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
2423 spin_lock_irq(&pipe_crc
->lock
);
2424 pipe_crc
->opened
= false;
2425 spin_unlock_irq(&pipe_crc
->lock
);
2430 /* (6 fields, 8 chars each, space separated (5) + '\n') */
2431 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
2432 /* account for \'0' */
2433 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
2435 static int pipe_crc_data_count(struct intel_pipe_crc
*pipe_crc
)
2437 assert_spin_locked(&pipe_crc
->lock
);
2438 return CIRC_CNT(pipe_crc
->head
, pipe_crc
->tail
,
2439 INTEL_PIPE_CRC_ENTRIES_NR
);
2443 i915_pipe_crc_read(struct file
*filep
, char __user
*user_buf
, size_t count
,
2446 struct pipe_crc_info
*info
= filep
->private_data
;
2447 struct drm_device
*dev
= info
->dev
;
2448 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2449 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
2450 char buf
[PIPE_CRC_BUFFER_LEN
];
2451 int head
, tail
, n_entries
, n
;
2455 * Don't allow user space to provide buffers not big enough to hold
2458 if (count
< PIPE_CRC_LINE_LEN
)
2461 if (pipe_crc
->source
== INTEL_PIPE_CRC_SOURCE_NONE
)
2464 /* nothing to read */
2465 spin_lock_irq(&pipe_crc
->lock
);
2466 while (pipe_crc_data_count(pipe_crc
) == 0) {
2469 if (filep
->f_flags
& O_NONBLOCK
) {
2470 spin_unlock_irq(&pipe_crc
->lock
);
2474 ret
= wait_event_interruptible_lock_irq(pipe_crc
->wq
,
2475 pipe_crc_data_count(pipe_crc
), pipe_crc
->lock
);
2477 spin_unlock_irq(&pipe_crc
->lock
);
2482 /* We now have one or more entries to read */
2483 head
= pipe_crc
->head
;
2484 tail
= pipe_crc
->tail
;
2485 n_entries
= min((size_t)CIRC_CNT(head
, tail
, INTEL_PIPE_CRC_ENTRIES_NR
),
2486 count
/ PIPE_CRC_LINE_LEN
);
2487 spin_unlock_irq(&pipe_crc
->lock
);
2492 struct intel_pipe_crc_entry
*entry
= &pipe_crc
->entries
[tail
];
2495 bytes_read
+= snprintf(buf
, PIPE_CRC_BUFFER_LEN
,
2496 "%8u %8x %8x %8x %8x %8x\n",
2497 entry
->frame
, entry
->crc
[0],
2498 entry
->crc
[1], entry
->crc
[2],
2499 entry
->crc
[3], entry
->crc
[4]);
2501 ret
= copy_to_user(user_buf
+ n
* PIPE_CRC_LINE_LEN
,
2502 buf
, PIPE_CRC_LINE_LEN
);
2503 if (ret
== PIPE_CRC_LINE_LEN
)
2506 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR
);
2507 tail
= (tail
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
2509 } while (--n_entries
);
2511 spin_lock_irq(&pipe_crc
->lock
);
2512 pipe_crc
->tail
= tail
;
2513 spin_unlock_irq(&pipe_crc
->lock
);
2518 static const struct file_operations i915_pipe_crc_fops
= {
2519 .owner
= THIS_MODULE
,
2520 .open
= i915_pipe_crc_open
,
2521 .read
= i915_pipe_crc_read
,
2522 .release
= i915_pipe_crc_release
,
2525 static struct pipe_crc_info i915_pipe_crc_data
[I915_MAX_PIPES
] = {
2527 .name
= "i915_pipe_A_crc",
2531 .name
= "i915_pipe_B_crc",
2535 .name
= "i915_pipe_C_crc",
2540 static int i915_pipe_crc_create(struct dentry
*root
, struct drm_minor
*minor
,
2543 struct drm_device
*dev
= minor
->dev
;
2545 struct pipe_crc_info
*info
= &i915_pipe_crc_data
[pipe
];
2548 ent
= debugfs_create_file(info
->name
, S_IRUGO
, root
, info
,
2549 &i915_pipe_crc_fops
);
2553 return drm_add_fake_info_node(minor
, ent
, info
);
2556 static const char * const pipe_crc_sources
[] = {
2569 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source
)
2571 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources
) != INTEL_PIPE_CRC_SOURCE_MAX
);
2572 return pipe_crc_sources
[source
];
2575 static int display_crc_ctl_show(struct seq_file
*m
, void *data
)
2577 struct drm_device
*dev
= m
->private;
2578 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2581 for (i
= 0; i
< I915_MAX_PIPES
; i
++)
2582 seq_printf(m
, "%c %s\n", pipe_name(i
),
2583 pipe_crc_source_name(dev_priv
->pipe_crc
[i
].source
));
2588 static int display_crc_ctl_open(struct inode
*inode
, struct file
*file
)
2590 struct drm_device
*dev
= inode
->i_private
;
2592 return single_open(file
, display_crc_ctl_show
, dev
);
2595 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
2598 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
2599 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
2602 case INTEL_PIPE_CRC_SOURCE_PIPE
:
2603 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_INCLUDE_BORDER_I8XX
;
2605 case INTEL_PIPE_CRC_SOURCE_NONE
:
2615 static int i9xx_pipe_crc_auto_source(struct drm_device
*dev
, enum pipe pipe
,
2616 enum intel_pipe_crc_source
*source
)
2618 struct intel_encoder
*encoder
;
2619 struct intel_crtc
*crtc
;
2620 struct intel_digital_port
*dig_port
;
2623 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
2625 drm_modeset_lock_all(dev
);
2626 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
2628 if (!encoder
->base
.crtc
)
2631 crtc
= to_intel_crtc(encoder
->base
.crtc
);
2633 if (crtc
->pipe
!= pipe
)
2636 switch (encoder
->type
) {
2637 case INTEL_OUTPUT_TVOUT
:
2638 *source
= INTEL_PIPE_CRC_SOURCE_TV
;
2640 case INTEL_OUTPUT_DISPLAYPORT
:
2641 case INTEL_OUTPUT_EDP
:
2642 dig_port
= enc_to_dig_port(&encoder
->base
);
2643 switch (dig_port
->port
) {
2645 *source
= INTEL_PIPE_CRC_SOURCE_DP_B
;
2648 *source
= INTEL_PIPE_CRC_SOURCE_DP_C
;
2651 *source
= INTEL_PIPE_CRC_SOURCE_DP_D
;
2654 WARN(1, "nonexisting DP port %c\n",
2655 port_name(dig_port
->port
));
2661 drm_modeset_unlock_all(dev
);
2666 static int vlv_pipe_crc_ctl_reg(struct drm_device
*dev
,
2668 enum intel_pipe_crc_source
*source
,
2671 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2672 bool need_stable_symbols
= false;
2674 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
) {
2675 int ret
= i9xx_pipe_crc_auto_source(dev
, pipe
, source
);
2681 case INTEL_PIPE_CRC_SOURCE_PIPE
:
2682 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_VLV
;
2684 case INTEL_PIPE_CRC_SOURCE_DP_B
:
2685 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_B_VLV
;
2686 need_stable_symbols
= true;
2688 case INTEL_PIPE_CRC_SOURCE_DP_C
:
2689 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_C_VLV
;
2690 need_stable_symbols
= true;
2692 case INTEL_PIPE_CRC_SOURCE_NONE
:
2700 * When the pipe CRC tap point is after the transcoders we need
2701 * to tweak symbol-level features to produce a deterministic series of
2702 * symbols for a given frame. We need to reset those features only once
2703 * a frame (instead of every nth symbol):
2704 * - DC-balance: used to ensure a better clock recovery from the data
2706 * - DisplayPort scrambling: used for EMI reduction
2708 if (need_stable_symbols
) {
2709 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
2711 tmp
|= DC_BALANCE_RESET_VLV
;
2713 tmp
|= PIPE_A_SCRAMBLE_RESET
;
2715 tmp
|= PIPE_B_SCRAMBLE_RESET
;
2717 I915_WRITE(PORT_DFT2_G4X
, tmp
);
2723 static int i9xx_pipe_crc_ctl_reg(struct drm_device
*dev
,
2725 enum intel_pipe_crc_source
*source
,
2728 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2729 bool need_stable_symbols
= false;
2731 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
) {
2732 int ret
= i9xx_pipe_crc_auto_source(dev
, pipe
, source
);
2738 case INTEL_PIPE_CRC_SOURCE_PIPE
:
2739 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_I9XX
;
2741 case INTEL_PIPE_CRC_SOURCE_TV
:
2742 if (!SUPPORTS_TV(dev
))
2744 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_TV_PRE
;
2746 case INTEL_PIPE_CRC_SOURCE_DP_B
:
2749 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_B_G4X
;
2750 need_stable_symbols
= true;
2752 case INTEL_PIPE_CRC_SOURCE_DP_C
:
2755 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_C_G4X
;
2756 need_stable_symbols
= true;
2758 case INTEL_PIPE_CRC_SOURCE_DP_D
:
2761 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_D_G4X
;
2762 need_stable_symbols
= true;
2764 case INTEL_PIPE_CRC_SOURCE_NONE
:
2772 * When the pipe CRC tap point is after the transcoders we need
2773 * to tweak symbol-level features to produce a deterministic series of
2774 * symbols for a given frame. We need to reset those features only once
2775 * a frame (instead of every nth symbol):
2776 * - DC-balance: used to ensure a better clock recovery from the data
2778 * - DisplayPort scrambling: used for EMI reduction
2780 if (need_stable_symbols
) {
2781 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
2783 WARN_ON(!IS_G4X(dev
));
2785 I915_WRITE(PORT_DFT_I9XX
,
2786 I915_READ(PORT_DFT_I9XX
) | DC_BALANCE_RESET
);
2789 tmp
|= PIPE_A_SCRAMBLE_RESET
;
2791 tmp
|= PIPE_B_SCRAMBLE_RESET
;
2793 I915_WRITE(PORT_DFT2_G4X
, tmp
);
2799 static void vlv_undo_pipe_scramble_reset(struct drm_device
*dev
,
2802 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2803 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
2806 tmp
&= ~PIPE_A_SCRAMBLE_RESET
;
2808 tmp
&= ~PIPE_B_SCRAMBLE_RESET
;
2809 if (!(tmp
& PIPE_SCRAMBLE_RESET_MASK
))
2810 tmp
&= ~DC_BALANCE_RESET_VLV
;
2811 I915_WRITE(PORT_DFT2_G4X
, tmp
);
2815 static void g4x_undo_pipe_scramble_reset(struct drm_device
*dev
,
2818 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2819 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
2822 tmp
&= ~PIPE_A_SCRAMBLE_RESET
;
2824 tmp
&= ~PIPE_B_SCRAMBLE_RESET
;
2825 I915_WRITE(PORT_DFT2_G4X
, tmp
);
2827 if (!(tmp
& PIPE_SCRAMBLE_RESET_MASK
)) {
2828 I915_WRITE(PORT_DFT_I9XX
,
2829 I915_READ(PORT_DFT_I9XX
) & ~DC_BALANCE_RESET
);
2833 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
2836 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
2837 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
2840 case INTEL_PIPE_CRC_SOURCE_PLANE1
:
2841 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PRIMARY_ILK
;
2843 case INTEL_PIPE_CRC_SOURCE_PLANE2
:
2844 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_SPRITE_ILK
;
2846 case INTEL_PIPE_CRC_SOURCE_PIPE
:
2847 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_ILK
;
2849 case INTEL_PIPE_CRC_SOURCE_NONE
:
2859 static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
2862 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
2863 *source
= INTEL_PIPE_CRC_SOURCE_PF
;
2866 case INTEL_PIPE_CRC_SOURCE_PLANE1
:
2867 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PRIMARY_IVB
;
2869 case INTEL_PIPE_CRC_SOURCE_PLANE2
:
2870 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_SPRITE_IVB
;
2872 case INTEL_PIPE_CRC_SOURCE_PF
:
2873 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PF_IVB
;
2875 case INTEL_PIPE_CRC_SOURCE_NONE
:
2885 static int pipe_crc_set_source(struct drm_device
*dev
, enum pipe pipe
,
2886 enum intel_pipe_crc_source source
)
2888 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2889 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
2890 u32 val
= 0; /* shut up gcc */
2893 if (pipe_crc
->source
== source
)
2896 /* forbid changing the source without going back to 'none' */
2897 if (pipe_crc
->source
&& source
)
2901 ret
= i8xx_pipe_crc_ctl_reg(&source
, &val
);
2902 else if (INTEL_INFO(dev
)->gen
< 5)
2903 ret
= i9xx_pipe_crc_ctl_reg(dev
, pipe
, &source
, &val
);
2904 else if (IS_VALLEYVIEW(dev
))
2905 ret
= vlv_pipe_crc_ctl_reg(dev
,pipe
, &source
, &val
);
2906 else if (IS_GEN5(dev
) || IS_GEN6(dev
))
2907 ret
= ilk_pipe_crc_ctl_reg(&source
, &val
);
2909 ret
= ivb_pipe_crc_ctl_reg(&source
, &val
);
2914 /* none -> real source transition */
2916 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
2917 pipe_name(pipe
), pipe_crc_source_name(source
));
2919 pipe_crc
->entries
= kzalloc(sizeof(*pipe_crc
->entries
) *
2920 INTEL_PIPE_CRC_ENTRIES_NR
,
2922 if (!pipe_crc
->entries
)
2925 spin_lock_irq(&pipe_crc
->lock
);
2928 spin_unlock_irq(&pipe_crc
->lock
);
2931 pipe_crc
->source
= source
;
2933 I915_WRITE(PIPE_CRC_CTL(pipe
), val
);
2934 POSTING_READ(PIPE_CRC_CTL(pipe
));
2936 /* real source -> none transition */
2937 if (source
== INTEL_PIPE_CRC_SOURCE_NONE
) {
2938 struct intel_pipe_crc_entry
*entries
;
2939 struct intel_crtc
*crtc
=
2940 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
2942 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
2945 drm_modeset_lock(&crtc
->base
.mutex
, NULL
);
2947 intel_wait_for_vblank(dev
, pipe
);
2948 drm_modeset_unlock(&crtc
->base
.mutex
);
2950 spin_lock_irq(&pipe_crc
->lock
);
2951 entries
= pipe_crc
->entries
;
2952 pipe_crc
->entries
= NULL
;
2953 spin_unlock_irq(&pipe_crc
->lock
);
2958 g4x_undo_pipe_scramble_reset(dev
, pipe
);
2959 else if (IS_VALLEYVIEW(dev
))
2960 vlv_undo_pipe_scramble_reset(dev
, pipe
);
2967 * Parse pipe CRC command strings:
2968 * command: wsp* object wsp+ name wsp+ source wsp*
2971 * source: (none | plane1 | plane2 | pf)
2972 * wsp: (#0x20 | #0x9 | #0xA)+
2975 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
2976 * "pipe A none" -> Stop CRC
2978 static int display_crc_ctl_tokenize(char *buf
, char *words
[], int max_words
)
2985 /* skip leading white space */
2986 buf
= skip_spaces(buf
);
2988 break; /* end of buffer */
2990 /* find end of word */
2991 for (end
= buf
; *end
&& !isspace(*end
); end
++)
2994 if (n_words
== max_words
) {
2995 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
2997 return -EINVAL
; /* ran out of words[] before bytes */
3002 words
[n_words
++] = buf
;
3009 enum intel_pipe_crc_object
{
3010 PIPE_CRC_OBJECT_PIPE
,
3013 static const char * const pipe_crc_objects
[] = {
3018 display_crc_ctl_parse_object(const char *buf
, enum intel_pipe_crc_object
*o
)
3022 for (i
= 0; i
< ARRAY_SIZE(pipe_crc_objects
); i
++)
3023 if (!strcmp(buf
, pipe_crc_objects
[i
])) {
3031 static int display_crc_ctl_parse_pipe(const char *buf
, enum pipe
*pipe
)
3033 const char name
= buf
[0];
3035 if (name
< 'A' || name
>= pipe_name(I915_MAX_PIPES
))
3044 display_crc_ctl_parse_source(const char *buf
, enum intel_pipe_crc_source
*s
)
3048 for (i
= 0; i
< ARRAY_SIZE(pipe_crc_sources
); i
++)
3049 if (!strcmp(buf
, pipe_crc_sources
[i
])) {
3057 static int display_crc_ctl_parse(struct drm_device
*dev
, char *buf
, size_t len
)
3061 char *words
[N_WORDS
];
3063 enum intel_pipe_crc_object object
;
3064 enum intel_pipe_crc_source source
;
3066 n_words
= display_crc_ctl_tokenize(buf
, words
, N_WORDS
);
3067 if (n_words
!= N_WORDS
) {
3068 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
3073 if (display_crc_ctl_parse_object(words
[0], &object
) < 0) {
3074 DRM_DEBUG_DRIVER("unknown object %s\n", words
[0]);
3078 if (display_crc_ctl_parse_pipe(words
[1], &pipe
) < 0) {
3079 DRM_DEBUG_DRIVER("unknown pipe %s\n", words
[1]);
3083 if (display_crc_ctl_parse_source(words
[2], &source
) < 0) {
3084 DRM_DEBUG_DRIVER("unknown source %s\n", words
[2]);
3088 return pipe_crc_set_source(dev
, pipe
, source
);
3091 static ssize_t
display_crc_ctl_write(struct file
*file
, const char __user
*ubuf
,
3092 size_t len
, loff_t
*offp
)
3094 struct seq_file
*m
= file
->private_data
;
3095 struct drm_device
*dev
= m
->private;
3102 if (len
> PAGE_SIZE
- 1) {
3103 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
3108 tmpbuf
= kmalloc(len
+ 1, GFP_KERNEL
);
3112 if (copy_from_user(tmpbuf
, ubuf
, len
)) {
3118 ret
= display_crc_ctl_parse(dev
, tmpbuf
, len
);
3129 static const struct file_operations i915_display_crc_ctl_fops
= {
3130 .owner
= THIS_MODULE
,
3131 .open
= display_crc_ctl_open
,
3133 .llseek
= seq_lseek
,
3134 .release
= single_release
,
3135 .write
= display_crc_ctl_write
3138 static void wm_latency_show(struct seq_file
*m
, const uint16_t wm
[5])
3140 struct drm_device
*dev
= m
->private;
3141 int num_levels
= ilk_wm_max_level(dev
) + 1;
3144 drm_modeset_lock_all(dev
);
3146 for (level
= 0; level
< num_levels
; level
++) {
3147 unsigned int latency
= wm
[level
];
3149 /* WM1+ latency values in 0.5us units */
3153 seq_printf(m
, "WM%d %u (%u.%u usec)\n",
3155 latency
/ 10, latency
% 10);
3158 drm_modeset_unlock_all(dev
);
3161 static int pri_wm_latency_show(struct seq_file
*m
, void *data
)
3163 struct drm_device
*dev
= m
->private;
3165 wm_latency_show(m
, to_i915(dev
)->wm
.pri_latency
);
3170 static int spr_wm_latency_show(struct seq_file
*m
, void *data
)
3172 struct drm_device
*dev
= m
->private;
3174 wm_latency_show(m
, to_i915(dev
)->wm
.spr_latency
);
3179 static int cur_wm_latency_show(struct seq_file
*m
, void *data
)
3181 struct drm_device
*dev
= m
->private;
3183 wm_latency_show(m
, to_i915(dev
)->wm
.cur_latency
);
3188 static int pri_wm_latency_open(struct inode
*inode
, struct file
*file
)
3190 struct drm_device
*dev
= inode
->i_private
;
3192 if (!HAS_PCH_SPLIT(dev
))
3195 return single_open(file
, pri_wm_latency_show
, dev
);
3198 static int spr_wm_latency_open(struct inode
*inode
, struct file
*file
)
3200 struct drm_device
*dev
= inode
->i_private
;
3202 if (!HAS_PCH_SPLIT(dev
))
3205 return single_open(file
, spr_wm_latency_show
, dev
);
3208 static int cur_wm_latency_open(struct inode
*inode
, struct file
*file
)
3210 struct drm_device
*dev
= inode
->i_private
;
3212 if (!HAS_PCH_SPLIT(dev
))
3215 return single_open(file
, cur_wm_latency_show
, dev
);
3218 static ssize_t
wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3219 size_t len
, loff_t
*offp
, uint16_t wm
[5])
3221 struct seq_file
*m
= file
->private_data
;
3222 struct drm_device
*dev
= m
->private;
3223 uint16_t new[5] = { 0 };
3224 int num_levels
= ilk_wm_max_level(dev
) + 1;
3229 if (len
>= sizeof(tmp
))
3232 if (copy_from_user(tmp
, ubuf
, len
))
3237 ret
= sscanf(tmp
, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
3238 if (ret
!= num_levels
)
3241 drm_modeset_lock_all(dev
);
3243 for (level
= 0; level
< num_levels
; level
++)
3244 wm
[level
] = new[level
];
3246 drm_modeset_unlock_all(dev
);
3252 static ssize_t
pri_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3253 size_t len
, loff_t
*offp
)
3255 struct seq_file
*m
= file
->private_data
;
3256 struct drm_device
*dev
= m
->private;
3258 return wm_latency_write(file
, ubuf
, len
, offp
, to_i915(dev
)->wm
.pri_latency
);
3261 static ssize_t
spr_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3262 size_t len
, loff_t
*offp
)
3264 struct seq_file
*m
= file
->private_data
;
3265 struct drm_device
*dev
= m
->private;
3267 return wm_latency_write(file
, ubuf
, len
, offp
, to_i915(dev
)->wm
.spr_latency
);
3270 static ssize_t
cur_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3271 size_t len
, loff_t
*offp
)
3273 struct seq_file
*m
= file
->private_data
;
3274 struct drm_device
*dev
= m
->private;
3276 return wm_latency_write(file
, ubuf
, len
, offp
, to_i915(dev
)->wm
.cur_latency
);
3279 static const struct file_operations i915_pri_wm_latency_fops
= {
3280 .owner
= THIS_MODULE
,
3281 .open
= pri_wm_latency_open
,
3283 .llseek
= seq_lseek
,
3284 .release
= single_release
,
3285 .write
= pri_wm_latency_write
3288 static const struct file_operations i915_spr_wm_latency_fops
= {
3289 .owner
= THIS_MODULE
,
3290 .open
= spr_wm_latency_open
,
3292 .llseek
= seq_lseek
,
3293 .release
= single_release
,
3294 .write
= spr_wm_latency_write
3297 static const struct file_operations i915_cur_wm_latency_fops
= {
3298 .owner
= THIS_MODULE
,
3299 .open
= cur_wm_latency_open
,
3301 .llseek
= seq_lseek
,
3302 .release
= single_release
,
3303 .write
= cur_wm_latency_write
3307 i915_wedged_get(void *data
, u64
*val
)
3309 struct drm_device
*dev
= data
;
3310 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3312 *val
= atomic_read(&dev_priv
->gpu_error
.reset_counter
);
3318 i915_wedged_set(void *data
, u64 val
)
3320 struct drm_device
*dev
= data
;
3321 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3323 intel_runtime_pm_get(dev_priv
);
3325 i915_handle_error(dev
, val
,
3326 "Manually setting wedged to %llu", val
);
3328 intel_runtime_pm_put(dev_priv
);
3333 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
3334 i915_wedged_get
, i915_wedged_set
,
3338 i915_ring_stop_get(void *data
, u64
*val
)
3340 struct drm_device
*dev
= data
;
3341 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3343 *val
= dev_priv
->gpu_error
.stop_rings
;
3349 i915_ring_stop_set(void *data
, u64 val
)
3351 struct drm_device
*dev
= data
;
3352 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3355 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val
);
3357 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
3361 dev_priv
->gpu_error
.stop_rings
= val
;
3362 mutex_unlock(&dev
->struct_mutex
);
3367 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops
,
3368 i915_ring_stop_get
, i915_ring_stop_set
,
3372 i915_ring_missed_irq_get(void *data
, u64
*val
)
3374 struct drm_device
*dev
= data
;
3375 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3377 *val
= dev_priv
->gpu_error
.missed_irq_rings
;
3382 i915_ring_missed_irq_set(void *data
, u64 val
)
3384 struct drm_device
*dev
= data
;
3385 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3388 /* Lock against concurrent debugfs callers */
3389 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
3392 dev_priv
->gpu_error
.missed_irq_rings
= val
;
3393 mutex_unlock(&dev
->struct_mutex
);
3398 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops
,
3399 i915_ring_missed_irq_get
, i915_ring_missed_irq_set
,
3403 i915_ring_test_irq_get(void *data
, u64
*val
)
3405 struct drm_device
*dev
= data
;
3406 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3408 *val
= dev_priv
->gpu_error
.test_irq_rings
;
3414 i915_ring_test_irq_set(void *data
, u64 val
)
3416 struct drm_device
*dev
= data
;
3417 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3420 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val
);
3422 /* Lock against concurrent debugfs callers */
3423 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
3427 dev_priv
->gpu_error
.test_irq_rings
= val
;
3428 mutex_unlock(&dev
->struct_mutex
);
3433 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops
,
3434 i915_ring_test_irq_get
, i915_ring_test_irq_set
,
3437 #define DROP_UNBOUND 0x1
3438 #define DROP_BOUND 0x2
3439 #define DROP_RETIRE 0x4
3440 #define DROP_ACTIVE 0x8
3441 #define DROP_ALL (DROP_UNBOUND | \
3446 i915_drop_caches_get(void *data
, u64
*val
)
3454 i915_drop_caches_set(void *data
, u64 val
)
3456 struct drm_device
*dev
= data
;
3457 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3458 struct drm_i915_gem_object
*obj
, *next
;
3459 struct i915_address_space
*vm
;
3460 struct i915_vma
*vma
, *x
;
3463 DRM_DEBUG("Dropping caches: 0x%08llx\n", val
);
3465 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3466 * on ioctls on -EAGAIN. */
3467 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
3471 if (val
& DROP_ACTIVE
) {
3472 ret
= i915_gpu_idle(dev
);
3477 if (val
& (DROP_RETIRE
| DROP_ACTIVE
))
3478 i915_gem_retire_requests(dev
);
3480 if (val
& DROP_BOUND
) {
3481 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
) {
3482 list_for_each_entry_safe(vma
, x
, &vm
->inactive_list
,
3487 ret
= i915_vma_unbind(vma
);
3494 if (val
& DROP_UNBOUND
) {
3495 list_for_each_entry_safe(obj
, next
, &dev_priv
->mm
.unbound_list
,
3497 if (obj
->pages_pin_count
== 0) {
3498 ret
= i915_gem_object_put_pages(obj
);
3505 mutex_unlock(&dev
->struct_mutex
);
3510 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
3511 i915_drop_caches_get
, i915_drop_caches_set
,
3515 i915_max_freq_get(void *data
, u64
*val
)
3517 struct drm_device
*dev
= data
;
3518 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3521 if (INTEL_INFO(dev
)->gen
< 6)
3524 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
3526 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
3530 if (IS_VALLEYVIEW(dev
))
3531 *val
= vlv_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq_softlimit
);
3533 *val
= dev_priv
->rps
.max_freq_softlimit
* GT_FREQUENCY_MULTIPLIER
;
3534 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3540 i915_max_freq_set(void *data
, u64 val
)
3542 struct drm_device
*dev
= data
;
3543 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3544 u32 rp_state_cap
, hw_max
, hw_min
;
3547 if (INTEL_INFO(dev
)->gen
< 6)
3550 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
3552 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val
);
3554 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
3559 * Turbo will still be enabled, but won't go above the set value.
3561 if (IS_VALLEYVIEW(dev
)) {
3562 val
= vlv_freq_opcode(dev_priv
, val
);
3564 hw_max
= valleyview_rps_max_freq(dev_priv
);
3565 hw_min
= valleyview_rps_min_freq(dev_priv
);
3567 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
3569 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
3570 hw_max
= dev_priv
->rps
.max_freq
;
3571 hw_min
= (rp_state_cap
>> 16) & 0xff;
3574 if (val
< hw_min
|| val
> hw_max
|| val
< dev_priv
->rps
.min_freq_softlimit
) {
3575 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3579 dev_priv
->rps
.max_freq_softlimit
= val
;
3581 if (IS_VALLEYVIEW(dev
))
3582 valleyview_set_rps(dev
, val
);
3584 gen6_set_rps(dev
, val
);
3586 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3591 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops
,
3592 i915_max_freq_get
, i915_max_freq_set
,
3596 i915_min_freq_get(void *data
, u64
*val
)
3598 struct drm_device
*dev
= data
;
3599 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3602 if (INTEL_INFO(dev
)->gen
< 6)
3605 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
3607 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
3611 if (IS_VALLEYVIEW(dev
))
3612 *val
= vlv_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq_softlimit
);
3614 *val
= dev_priv
->rps
.min_freq_softlimit
* GT_FREQUENCY_MULTIPLIER
;
3615 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3621 i915_min_freq_set(void *data
, u64 val
)
3623 struct drm_device
*dev
= data
;
3624 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3625 u32 rp_state_cap
, hw_max
, hw_min
;
3628 if (INTEL_INFO(dev
)->gen
< 6)
3631 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
3633 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val
);
3635 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
3640 * Turbo will still be enabled, but won't go below the set value.
3642 if (IS_VALLEYVIEW(dev
)) {
3643 val
= vlv_freq_opcode(dev_priv
, val
);
3645 hw_max
= valleyview_rps_max_freq(dev_priv
);
3646 hw_min
= valleyview_rps_min_freq(dev_priv
);
3648 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
3650 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
3651 hw_max
= dev_priv
->rps
.max_freq
;
3652 hw_min
= (rp_state_cap
>> 16) & 0xff;
3655 if (val
< hw_min
|| val
> hw_max
|| val
> dev_priv
->rps
.max_freq_softlimit
) {
3656 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3660 dev_priv
->rps
.min_freq_softlimit
= val
;
3662 if (IS_VALLEYVIEW(dev
))
3663 valleyview_set_rps(dev
, val
);
3665 gen6_set_rps(dev
, val
);
3667 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3672 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops
,
3673 i915_min_freq_get
, i915_min_freq_set
,
3677 i915_cache_sharing_get(void *data
, u64
*val
)
3679 struct drm_device
*dev
= data
;
3680 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3684 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
3687 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
3690 intel_runtime_pm_get(dev_priv
);
3692 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
3694 intel_runtime_pm_put(dev_priv
);
3695 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
3697 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
3703 i915_cache_sharing_set(void *data
, u64 val
)
3705 struct drm_device
*dev
= data
;
3706 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3709 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
3715 intel_runtime_pm_get(dev_priv
);
3716 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
3718 /* Update the cache sharing policy here as well */
3719 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
3720 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
3721 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
3722 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
3724 intel_runtime_pm_put(dev_priv
);
3728 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
3729 i915_cache_sharing_get
, i915_cache_sharing_set
,
3732 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
3734 struct drm_device
*dev
= inode
->i_private
;
3735 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3737 if (INTEL_INFO(dev
)->gen
< 6)
3740 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
3745 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
3747 struct drm_device
*dev
= inode
->i_private
;
3748 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3750 if (INTEL_INFO(dev
)->gen
< 6)
3753 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
3758 static const struct file_operations i915_forcewake_fops
= {
3759 .owner
= THIS_MODULE
,
3760 .open
= i915_forcewake_open
,
3761 .release
= i915_forcewake_release
,
3764 static int i915_forcewake_create(struct dentry
*root
, struct drm_minor
*minor
)
3766 struct drm_device
*dev
= minor
->dev
;
3769 ent
= debugfs_create_file("i915_forcewake_user",
3772 &i915_forcewake_fops
);
3776 return drm_add_fake_info_node(minor
, ent
, &i915_forcewake_fops
);
3779 static int i915_debugfs_create(struct dentry
*root
,
3780 struct drm_minor
*minor
,
3782 const struct file_operations
*fops
)
3784 struct drm_device
*dev
= minor
->dev
;
3787 ent
= debugfs_create_file(name
,
3794 return drm_add_fake_info_node(minor
, ent
, fops
);
3797 static const struct drm_info_list i915_debugfs_list
[] = {
3798 {"i915_capabilities", i915_capabilities
, 0},
3799 {"i915_gem_objects", i915_gem_object_info
, 0},
3800 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
3801 {"i915_gem_pinned", i915_gem_gtt_info
, 0, (void *) PINNED_LIST
},
3802 {"i915_gem_active", i915_gem_object_list_info
, 0, (void *) ACTIVE_LIST
},
3803 {"i915_gem_inactive", i915_gem_object_list_info
, 0, (void *) INACTIVE_LIST
},
3804 {"i915_gem_stolen", i915_gem_stolen_list_info
},
3805 {"i915_gem_pageflip", i915_gem_pageflip_info
, 0},
3806 {"i915_gem_request", i915_gem_request_info
, 0},
3807 {"i915_gem_seqno", i915_gem_seqno_info
, 0},
3808 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
3809 {"i915_gem_interrupt", i915_interrupt_info
, 0},
3810 {"i915_gem_hws", i915_hws_info
, 0, (void *)RCS
},
3811 {"i915_gem_hws_blt", i915_hws_info
, 0, (void *)BCS
},
3812 {"i915_gem_hws_bsd", i915_hws_info
, 0, (void *)VCS
},
3813 {"i915_gem_hws_vebox", i915_hws_info
, 0, (void *)VECS
},
3814 {"i915_rstdby_delays", i915_rstdby_delays
, 0},
3815 {"i915_frequency_info", i915_frequency_info
, 0},
3816 {"i915_delayfreq_table", i915_delayfreq_table
, 0},
3817 {"i915_inttoext_table", i915_inttoext_table
, 0},
3818 {"i915_drpc_info", i915_drpc_info
, 0},
3819 {"i915_emon_status", i915_emon_status
, 0},
3820 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
3821 {"i915_gfxec", i915_gfxec
, 0},
3822 {"i915_fbc_status", i915_fbc_status
, 0},
3823 {"i915_ips_status", i915_ips_status
, 0},
3824 {"i915_sr_status", i915_sr_status
, 0},
3825 {"i915_opregion", i915_opregion
, 0},
3826 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
3827 {"i915_context_status", i915_context_status
, 0},
3828 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info
, 0},
3829 {"i915_swizzle_info", i915_swizzle_info
, 0},
3830 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
3831 {"i915_llc", i915_llc
, 0},
3832 {"i915_edp_psr_status", i915_edp_psr_status
, 0},
3833 {"i915_sink_crc_eDP1", i915_sink_crc
, 0},
3834 {"i915_energy_uJ", i915_energy_uJ
, 0},
3835 {"i915_pc8_status", i915_pc8_status
, 0},
3836 {"i915_power_domain_info", i915_power_domain_info
, 0},
3837 {"i915_display_info", i915_display_info
, 0},
3839 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
3841 static const struct i915_debugfs_files
{
3843 const struct file_operations
*fops
;
3844 } i915_debugfs_files
[] = {
3845 {"i915_wedged", &i915_wedged_fops
},
3846 {"i915_max_freq", &i915_max_freq_fops
},
3847 {"i915_min_freq", &i915_min_freq_fops
},
3848 {"i915_cache_sharing", &i915_cache_sharing_fops
},
3849 {"i915_ring_stop", &i915_ring_stop_fops
},
3850 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops
},
3851 {"i915_ring_test_irq", &i915_ring_test_irq_fops
},
3852 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
3853 {"i915_error_state", &i915_error_state_fops
},
3854 {"i915_next_seqno", &i915_next_seqno_fops
},
3855 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops
},
3856 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops
},
3857 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops
},
3858 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops
},
3861 void intel_display_crc_init(struct drm_device
*dev
)
3863 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3866 for_each_pipe(pipe
) {
3867 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
3869 pipe_crc
->opened
= false;
3870 spin_lock_init(&pipe_crc
->lock
);
3871 init_waitqueue_head(&pipe_crc
->wq
);
3875 int i915_debugfs_init(struct drm_minor
*minor
)
3879 ret
= i915_forcewake_create(minor
->debugfs_root
, minor
);
3883 for (i
= 0; i
< ARRAY_SIZE(i915_pipe_crc_data
); i
++) {
3884 ret
= i915_pipe_crc_create(minor
->debugfs_root
, minor
, i
);
3889 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
3890 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
3891 i915_debugfs_files
[i
].name
,
3892 i915_debugfs_files
[i
].fops
);
3897 return drm_debugfs_create_files(i915_debugfs_list
,
3898 I915_DEBUGFS_ENTRIES
,
3899 minor
->debugfs_root
, minor
);
3902 void i915_debugfs_cleanup(struct drm_minor
*minor
)
3906 drm_debugfs_remove_files(i915_debugfs_list
,
3907 I915_DEBUGFS_ENTRIES
, minor
);
3909 drm_debugfs_remove_files((struct drm_info_list
*) &i915_forcewake_fops
,
3912 for (i
= 0; i
< ARRAY_SIZE(i915_pipe_crc_data
); i
++) {
3913 struct drm_info_list
*info_list
=
3914 (struct drm_info_list
*)&i915_pipe_crc_data
[i
];
3916 drm_debugfs_remove_files(info_list
, 1, minor
);
3919 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
3920 struct drm_info_list
*info_list
=
3921 (struct drm_info_list
*) i915_debugfs_files
[i
].fops
;
3923 drm_debugfs_remove_files(info_list
, 1, minor
);