2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
43 #if defined(CONFIG_DEBUG_FS)
51 static const char *yesno(int v
)
53 return v
? "yes" : "no";
56 /* As the drm_debugfs_init() routines are called before dev->dev_private is
57 * allocated we need to hook into the minor for release. */
59 drm_add_fake_info_node(struct drm_minor
*minor
,
63 struct drm_info_node
*node
;
65 node
= kmalloc(sizeof(*node
), GFP_KERNEL
);
73 node
->info_ent
= (void *) key
;
75 mutex_lock(&minor
->debugfs_lock
);
76 list_add(&node
->list
, &minor
->debugfs_list
);
77 mutex_unlock(&minor
->debugfs_lock
);
82 static int i915_capabilities(struct seq_file
*m
, void *data
)
84 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
85 struct drm_device
*dev
= node
->minor
->dev
;
86 const struct intel_device_info
*info
= INTEL_INFO(dev
);
88 seq_printf(m
, "gen: %d\n", info
->gen
);
89 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev
));
90 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
91 #define SEP_SEMICOLON ;
92 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_SEMICOLON
);
99 static const char *get_pin_flag(struct drm_i915_gem_object
*obj
)
101 if (obj
->user_pin_count
> 0)
103 else if (obj
->pin_count
> 0)
109 static const char *get_tiling_flag(struct drm_i915_gem_object
*obj
)
111 switch (obj
->tiling_mode
) {
113 case I915_TILING_NONE
: return " ";
114 case I915_TILING_X
: return "X";
115 case I915_TILING_Y
: return "Y";
119 static inline const char *get_global_flag(struct drm_i915_gem_object
*obj
)
121 return obj
->has_global_gtt_mapping
? "g" : " ";
125 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
127 struct i915_vma
*vma
;
128 seq_printf(m
, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
131 get_tiling_flag(obj
),
132 get_global_flag(obj
),
133 obj
->base
.size
/ 1024,
134 obj
->base
.read_domains
,
135 obj
->base
.write_domain
,
136 obj
->last_read_seqno
,
137 obj
->last_write_seqno
,
138 obj
->last_fenced_seqno
,
139 i915_cache_level_str(obj
->cache_level
),
140 obj
->dirty
? " dirty" : "",
141 obj
->madv
== I915_MADV_DONTNEED
? " purgeable" : "");
143 seq_printf(m
, " (name: %d)", obj
->base
.name
);
145 seq_printf(m
, " (pinned x %d)", obj
->pin_count
);
146 if (obj
->pin_display
)
147 seq_printf(m
, " (display)");
148 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
)
149 seq_printf(m
, " (fence: %d)", obj
->fence_reg
);
150 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
151 if (!i915_is_ggtt(vma
->vm
))
155 seq_printf(m
, "gtt offset: %08lx, size: %08lx)",
156 vma
->node
.start
, vma
->node
.size
);
159 seq_printf(m
, " (stolen: %08lx)", obj
->stolen
->start
);
160 if (obj
->pin_mappable
|| obj
->fault_mappable
) {
162 if (obj
->pin_mappable
)
164 if (obj
->fault_mappable
)
167 seq_printf(m
, " (%s mappable)", s
);
169 if (obj
->ring
!= NULL
)
170 seq_printf(m
, " (%s)", obj
->ring
->name
);
173 static void describe_ctx(struct seq_file
*m
, struct i915_hw_context
*ctx
)
175 seq_putc(m
, ctx
->is_initialized
? 'I' : 'i');
176 seq_putc(m
, ctx
->remap_slice
? 'R' : 'r');
180 static int i915_gem_object_list_info(struct seq_file
*m
, void *data
)
182 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
183 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
184 struct list_head
*head
;
185 struct drm_device
*dev
= node
->minor
->dev
;
186 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
187 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
188 struct i915_vma
*vma
;
189 size_t total_obj_size
, total_gtt_size
;
192 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
196 /* FIXME: the user of this interface might want more than just GGTT */
199 seq_puts(m
, "Active:\n");
200 head
= &vm
->active_list
;
203 seq_puts(m
, "Inactive:\n");
204 head
= &vm
->inactive_list
;
207 mutex_unlock(&dev
->struct_mutex
);
211 total_obj_size
= total_gtt_size
= count
= 0;
212 list_for_each_entry(vma
, head
, mm_list
) {
214 describe_obj(m
, vma
->obj
);
216 total_obj_size
+= vma
->obj
->base
.size
;
217 total_gtt_size
+= vma
->node
.size
;
220 mutex_unlock(&dev
->struct_mutex
);
222 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
223 count
, total_obj_size
, total_gtt_size
);
227 static int obj_rank_by_stolen(void *priv
,
228 struct list_head
*A
, struct list_head
*B
)
230 struct drm_i915_gem_object
*a
=
231 container_of(A
, struct drm_i915_gem_object
, obj_exec_link
);
232 struct drm_i915_gem_object
*b
=
233 container_of(B
, struct drm_i915_gem_object
, obj_exec_link
);
235 return a
->stolen
->start
- b
->stolen
->start
;
238 static int i915_gem_stolen_list_info(struct seq_file
*m
, void *data
)
240 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
241 struct drm_device
*dev
= node
->minor
->dev
;
242 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
243 struct drm_i915_gem_object
*obj
;
244 size_t total_obj_size
, total_gtt_size
;
248 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
252 total_obj_size
= total_gtt_size
= count
= 0;
253 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
254 if (obj
->stolen
== NULL
)
257 list_add(&obj
->obj_exec_link
, &stolen
);
259 total_obj_size
+= obj
->base
.size
;
260 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
263 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
264 if (obj
->stolen
== NULL
)
267 list_add(&obj
->obj_exec_link
, &stolen
);
269 total_obj_size
+= obj
->base
.size
;
272 list_sort(NULL
, &stolen
, obj_rank_by_stolen
);
273 seq_puts(m
, "Stolen:\n");
274 while (!list_empty(&stolen
)) {
275 obj
= list_first_entry(&stolen
, typeof(*obj
), obj_exec_link
);
277 describe_obj(m
, obj
);
279 list_del_init(&obj
->obj_exec_link
);
281 mutex_unlock(&dev
->struct_mutex
);
283 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
284 count
, total_obj_size
, total_gtt_size
);
288 #define count_objects(list, member) do { \
289 list_for_each_entry(obj, list, member) { \
290 size += i915_gem_obj_ggtt_size(obj); \
292 if (obj->map_and_fenceable) { \
293 mappable_size += i915_gem_obj_ggtt_size(obj); \
301 size_t total
, active
, inactive
, unbound
;
304 static int per_file_stats(int id
, void *ptr
, void *data
)
306 struct drm_i915_gem_object
*obj
= ptr
;
307 struct file_stats
*stats
= data
;
310 stats
->total
+= obj
->base
.size
;
312 if (i915_gem_obj_ggtt_bound(obj
)) {
313 if (!list_empty(&obj
->ring_list
))
314 stats
->active
+= obj
->base
.size
;
316 stats
->inactive
+= obj
->base
.size
;
318 if (!list_empty(&obj
->global_list
))
319 stats
->unbound
+= obj
->base
.size
;
325 #define count_vmas(list, member) do { \
326 list_for_each_entry(vma, list, member) { \
327 size += i915_gem_obj_ggtt_size(vma->obj); \
329 if (vma->obj->map_and_fenceable) { \
330 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
336 static int i915_gem_object_info(struct seq_file
*m
, void* data
)
338 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
339 struct drm_device
*dev
= node
->minor
->dev
;
340 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
341 u32 count
, mappable_count
, purgeable_count
;
342 size_t size
, mappable_size
, purgeable_size
;
343 struct drm_i915_gem_object
*obj
;
344 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
345 struct drm_file
*file
;
346 struct i915_vma
*vma
;
349 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
353 seq_printf(m
, "%u objects, %zu bytes\n",
354 dev_priv
->mm
.object_count
,
355 dev_priv
->mm
.object_memory
);
357 size
= count
= mappable_size
= mappable_count
= 0;
358 count_objects(&dev_priv
->mm
.bound_list
, global_list
);
359 seq_printf(m
, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
360 count
, mappable_count
, size
, mappable_size
);
362 size
= count
= mappable_size
= mappable_count
= 0;
363 count_vmas(&vm
->active_list
, mm_list
);
364 seq_printf(m
, " %u [%u] active objects, %zu [%zu] bytes\n",
365 count
, mappable_count
, size
, mappable_size
);
367 size
= count
= mappable_size
= mappable_count
= 0;
368 count_vmas(&vm
->inactive_list
, mm_list
);
369 seq_printf(m
, " %u [%u] inactive objects, %zu [%zu] bytes\n",
370 count
, mappable_count
, size
, mappable_size
);
372 size
= count
= purgeable_size
= purgeable_count
= 0;
373 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
374 size
+= obj
->base
.size
, ++count
;
375 if (obj
->madv
== I915_MADV_DONTNEED
)
376 purgeable_size
+= obj
->base
.size
, ++purgeable_count
;
378 seq_printf(m
, "%u unbound objects, %zu bytes\n", count
, size
);
380 size
= count
= mappable_size
= mappable_count
= 0;
381 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
382 if (obj
->fault_mappable
) {
383 size
+= i915_gem_obj_ggtt_size(obj
);
386 if (obj
->pin_mappable
) {
387 mappable_size
+= i915_gem_obj_ggtt_size(obj
);
390 if (obj
->madv
== I915_MADV_DONTNEED
) {
391 purgeable_size
+= obj
->base
.size
;
395 seq_printf(m
, "%u purgeable objects, %zu bytes\n",
396 purgeable_count
, purgeable_size
);
397 seq_printf(m
, "%u pinned mappable objects, %zu bytes\n",
398 mappable_count
, mappable_size
);
399 seq_printf(m
, "%u fault mappable objects, %zu bytes\n",
402 seq_printf(m
, "%zu [%lu] gtt total\n",
403 dev_priv
->gtt
.base
.total
,
404 dev_priv
->gtt
.mappable_end
- dev_priv
->gtt
.base
.start
);
407 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
408 struct file_stats stats
;
410 memset(&stats
, 0, sizeof(stats
));
411 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
412 seq_printf(m
, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
413 get_pid_task(file
->pid
, PIDTYPE_PID
)->comm
,
421 mutex_unlock(&dev
->struct_mutex
);
426 static int i915_gem_gtt_info(struct seq_file
*m
, void *data
)
428 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
429 struct drm_device
*dev
= node
->minor
->dev
;
430 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
431 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
432 struct drm_i915_gem_object
*obj
;
433 size_t total_obj_size
, total_gtt_size
;
436 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
440 total_obj_size
= total_gtt_size
= count
= 0;
441 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
442 if (list
== PINNED_LIST
&& obj
->pin_count
== 0)
446 describe_obj(m
, obj
);
448 total_obj_size
+= obj
->base
.size
;
449 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
453 mutex_unlock(&dev
->struct_mutex
);
455 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
456 count
, total_obj_size
, total_gtt_size
);
461 static int i915_gem_pageflip_info(struct seq_file
*m
, void *data
)
463 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
464 struct drm_device
*dev
= node
->minor
->dev
;
466 struct intel_crtc
*crtc
;
468 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, base
.head
) {
469 const char pipe
= pipe_name(crtc
->pipe
);
470 const char plane
= plane_name(crtc
->plane
);
471 struct intel_unpin_work
*work
;
473 spin_lock_irqsave(&dev
->event_lock
, flags
);
474 work
= crtc
->unpin_work
;
476 seq_printf(m
, "No flip due on pipe %c (plane %c)\n",
479 if (atomic_read(&work
->pending
) < INTEL_FLIP_COMPLETE
) {
480 seq_printf(m
, "Flip queued on pipe %c (plane %c)\n",
483 seq_printf(m
, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
486 if (work
->enable_stall_check
)
487 seq_puts(m
, "Stall check enabled, ");
489 seq_puts(m
, "Stall check waiting for page flip ioctl, ");
490 seq_printf(m
, "%d prepares\n", atomic_read(&work
->pending
));
492 if (work
->old_fb_obj
) {
493 struct drm_i915_gem_object
*obj
= work
->old_fb_obj
;
495 seq_printf(m
, "Old framebuffer gtt_offset 0x%08lx\n",
496 i915_gem_obj_ggtt_offset(obj
));
498 if (work
->pending_flip_obj
) {
499 struct drm_i915_gem_object
*obj
= work
->pending_flip_obj
;
501 seq_printf(m
, "New framebuffer gtt_offset 0x%08lx\n",
502 i915_gem_obj_ggtt_offset(obj
));
505 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
511 static int i915_gem_request_info(struct seq_file
*m
, void *data
)
513 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
514 struct drm_device
*dev
= node
->minor
->dev
;
515 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
516 struct intel_ring_buffer
*ring
;
517 struct drm_i915_gem_request
*gem_request
;
520 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
525 for_each_ring(ring
, dev_priv
, i
) {
526 if (list_empty(&ring
->request_list
))
529 seq_printf(m
, "%s requests:\n", ring
->name
);
530 list_for_each_entry(gem_request
,
533 seq_printf(m
, " %d @ %d\n",
535 (int) (jiffies
- gem_request
->emitted_jiffies
));
539 mutex_unlock(&dev
->struct_mutex
);
542 seq_puts(m
, "No requests\n");
547 static void i915_ring_seqno_info(struct seq_file
*m
,
548 struct intel_ring_buffer
*ring
)
550 if (ring
->get_seqno
) {
551 seq_printf(m
, "Current sequence (%s): %u\n",
552 ring
->name
, ring
->get_seqno(ring
, false));
556 static int i915_gem_seqno_info(struct seq_file
*m
, void *data
)
558 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
559 struct drm_device
*dev
= node
->minor
->dev
;
560 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
561 struct intel_ring_buffer
*ring
;
564 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
568 for_each_ring(ring
, dev_priv
, i
)
569 i915_ring_seqno_info(m
, ring
);
571 mutex_unlock(&dev
->struct_mutex
);
577 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
579 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
580 struct drm_device
*dev
= node
->minor
->dev
;
581 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
582 struct intel_ring_buffer
*ring
;
585 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
589 if (INTEL_INFO(dev
)->gen
>= 8) {
591 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
592 I915_READ(GEN8_MASTER_IRQ
));
594 for (i
= 0; i
< 4; i
++) {
595 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
596 i
, I915_READ(GEN8_GT_IMR(i
)));
597 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
598 i
, I915_READ(GEN8_GT_IIR(i
)));
599 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
600 i
, I915_READ(GEN8_GT_IER(i
)));
604 seq_printf(m
, "Pipe %c IMR:\t%08x\n",
606 I915_READ(GEN8_DE_PIPE_IMR(i
)));
607 seq_printf(m
, "Pipe %c IIR:\t%08x\n",
609 I915_READ(GEN8_DE_PIPE_IIR(i
)));
610 seq_printf(m
, "Pipe %c IER:\t%08x\n",
612 I915_READ(GEN8_DE_PIPE_IER(i
)));
615 seq_printf(m
, "Display Engine port interrupt mask:\t%08x\n",
616 I915_READ(GEN8_DE_PORT_IMR
));
617 seq_printf(m
, "Display Engine port interrupt identity:\t%08x\n",
618 I915_READ(GEN8_DE_PORT_IIR
));
619 seq_printf(m
, "Display Engine port interrupt enable:\t%08x\n",
620 I915_READ(GEN8_DE_PORT_IER
));
622 seq_printf(m
, "Display Engine misc interrupt mask:\t%08x\n",
623 I915_READ(GEN8_DE_MISC_IMR
));
624 seq_printf(m
, "Display Engine misc interrupt identity:\t%08x\n",
625 I915_READ(GEN8_DE_MISC_IIR
));
626 seq_printf(m
, "Display Engine misc interrupt enable:\t%08x\n",
627 I915_READ(GEN8_DE_MISC_IER
));
629 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
630 I915_READ(GEN8_PCU_IMR
));
631 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
632 I915_READ(GEN8_PCU_IIR
));
633 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
634 I915_READ(GEN8_PCU_IER
));
635 } else if (IS_VALLEYVIEW(dev
)) {
636 seq_printf(m
, "Display IER:\t%08x\n",
638 seq_printf(m
, "Display IIR:\t%08x\n",
640 seq_printf(m
, "Display IIR_RW:\t%08x\n",
641 I915_READ(VLV_IIR_RW
));
642 seq_printf(m
, "Display IMR:\t%08x\n",
645 seq_printf(m
, "Pipe %c stat:\t%08x\n",
647 I915_READ(PIPESTAT(pipe
)));
649 seq_printf(m
, "Master IER:\t%08x\n",
650 I915_READ(VLV_MASTER_IER
));
652 seq_printf(m
, "Render IER:\t%08x\n",
654 seq_printf(m
, "Render IIR:\t%08x\n",
656 seq_printf(m
, "Render IMR:\t%08x\n",
659 seq_printf(m
, "PM IER:\t\t%08x\n",
660 I915_READ(GEN6_PMIER
));
661 seq_printf(m
, "PM IIR:\t\t%08x\n",
662 I915_READ(GEN6_PMIIR
));
663 seq_printf(m
, "PM IMR:\t\t%08x\n",
664 I915_READ(GEN6_PMIMR
));
666 seq_printf(m
, "Port hotplug:\t%08x\n",
667 I915_READ(PORT_HOTPLUG_EN
));
668 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
669 I915_READ(VLV_DPFLIPSTAT
));
670 seq_printf(m
, "DPINVGTT:\t%08x\n",
671 I915_READ(DPINVGTT
));
673 } else if (!HAS_PCH_SPLIT(dev
)) {
674 seq_printf(m
, "Interrupt enable: %08x\n",
676 seq_printf(m
, "Interrupt identity: %08x\n",
678 seq_printf(m
, "Interrupt mask: %08x\n",
681 seq_printf(m
, "Pipe %c stat: %08x\n",
683 I915_READ(PIPESTAT(pipe
)));
685 seq_printf(m
, "North Display Interrupt enable: %08x\n",
687 seq_printf(m
, "North Display Interrupt identity: %08x\n",
689 seq_printf(m
, "North Display Interrupt mask: %08x\n",
691 seq_printf(m
, "South Display Interrupt enable: %08x\n",
693 seq_printf(m
, "South Display Interrupt identity: %08x\n",
695 seq_printf(m
, "South Display Interrupt mask: %08x\n",
697 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
699 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
701 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
704 seq_printf(m
, "Interrupts received: %d\n",
705 atomic_read(&dev_priv
->irq_received
));
706 for_each_ring(ring
, dev_priv
, i
) {
707 if (INTEL_INFO(dev
)->gen
>= 6) {
709 "Graphics Interrupt mask (%s): %08x\n",
710 ring
->name
, I915_READ_IMR(ring
));
712 i915_ring_seqno_info(m
, ring
);
714 mutex_unlock(&dev
->struct_mutex
);
719 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
721 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
722 struct drm_device
*dev
= node
->minor
->dev
;
723 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
726 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
730 seq_printf(m
, "Reserved fences = %d\n", dev_priv
->fence_reg_start
);
731 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
732 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
733 struct drm_i915_gem_object
*obj
= dev_priv
->fence_regs
[i
].obj
;
735 seq_printf(m
, "Fence %d, pin count = %d, object = ",
736 i
, dev_priv
->fence_regs
[i
].pin_count
);
738 seq_puts(m
, "unused");
740 describe_obj(m
, obj
);
744 mutex_unlock(&dev
->struct_mutex
);
748 static int i915_hws_info(struct seq_file
*m
, void *data
)
750 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
751 struct drm_device
*dev
= node
->minor
->dev
;
752 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
753 struct intel_ring_buffer
*ring
;
757 ring
= &dev_priv
->ring
[(uintptr_t)node
->info_ent
->data
];
758 hws
= ring
->status_page
.page_addr
;
762 for (i
= 0; i
< 4096 / sizeof(u32
) / 4; i
+= 4) {
763 seq_printf(m
, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
765 hws
[i
], hws
[i
+ 1], hws
[i
+ 2], hws
[i
+ 3]);
771 i915_error_state_write(struct file
*filp
,
772 const char __user
*ubuf
,
776 struct i915_error_state_file_priv
*error_priv
= filp
->private_data
;
777 struct drm_device
*dev
= error_priv
->dev
;
780 DRM_DEBUG_DRIVER("Resetting error state\n");
782 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
786 i915_destroy_error_state(dev
);
787 mutex_unlock(&dev
->struct_mutex
);
792 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
794 struct drm_device
*dev
= inode
->i_private
;
795 struct i915_error_state_file_priv
*error_priv
;
797 error_priv
= kzalloc(sizeof(*error_priv
), GFP_KERNEL
);
801 error_priv
->dev
= dev
;
803 i915_error_state_get(dev
, error_priv
);
805 file
->private_data
= error_priv
;
810 static int i915_error_state_release(struct inode
*inode
, struct file
*file
)
812 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
814 i915_error_state_put(error_priv
);
820 static ssize_t
i915_error_state_read(struct file
*file
, char __user
*userbuf
,
821 size_t count
, loff_t
*pos
)
823 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
824 struct drm_i915_error_state_buf error_str
;
826 ssize_t ret_count
= 0;
829 ret
= i915_error_state_buf_init(&error_str
, count
, *pos
);
833 ret
= i915_error_state_to_str(&error_str
, error_priv
);
837 ret_count
= simple_read_from_buffer(userbuf
, count
, &tmp_pos
,
844 *pos
= error_str
.start
+ ret_count
;
846 i915_error_state_buf_release(&error_str
);
847 return ret
?: ret_count
;
850 static const struct file_operations i915_error_state_fops
= {
851 .owner
= THIS_MODULE
,
852 .open
= i915_error_state_open
,
853 .read
= i915_error_state_read
,
854 .write
= i915_error_state_write
,
855 .llseek
= default_llseek
,
856 .release
= i915_error_state_release
,
860 i915_next_seqno_get(void *data
, u64
*val
)
862 struct drm_device
*dev
= data
;
863 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
866 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
870 *val
= dev_priv
->next_seqno
;
871 mutex_unlock(&dev
->struct_mutex
);
877 i915_next_seqno_set(void *data
, u64 val
)
879 struct drm_device
*dev
= data
;
882 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
886 ret
= i915_gem_set_seqno(dev
, val
);
887 mutex_unlock(&dev
->struct_mutex
);
892 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
893 i915_next_seqno_get
, i915_next_seqno_set
,
896 static int i915_rstdby_delays(struct seq_file
*m
, void *unused
)
898 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
899 struct drm_device
*dev
= node
->minor
->dev
;
900 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
904 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
908 crstanddelay
= I915_READ16(CRSTANDVID
);
910 mutex_unlock(&dev
->struct_mutex
);
912 seq_printf(m
, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay
>> 8) & 0x3f, (crstanddelay
& 0x3f));
917 static int i915_cur_delayinfo(struct seq_file
*m
, void *unused
)
919 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
920 struct drm_device
*dev
= node
->minor
->dev
;
921 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
924 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
927 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
928 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
930 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
931 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
932 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
934 seq_printf(m
, "Current P-state: %d\n",
935 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
936 } else if ((IS_GEN6(dev
) || IS_GEN7(dev
)) && !IS_VALLEYVIEW(dev
)) {
937 u32 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
938 u32 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
939 u32 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
940 u32 rpstat
, cagf
, reqf
;
941 u32 rpupei
, rpcurup
, rpprevup
;
942 u32 rpdownei
, rpcurdown
, rpprevdown
;
945 /* RPSTAT1 is in the GT power well */
946 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
950 gen6_gt_force_wake_get(dev_priv
);
952 reqf
= I915_READ(GEN6_RPNSWREQ
);
953 reqf
&= ~GEN6_TURBO_DISABLE
;
958 reqf
*= GT_FREQUENCY_MULTIPLIER
;
960 rpstat
= I915_READ(GEN6_RPSTAT1
);
961 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
);
962 rpcurup
= I915_READ(GEN6_RP_CUR_UP
);
963 rpprevup
= I915_READ(GEN6_RP_PREV_UP
);
964 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
);
965 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
);
966 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
);
968 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
970 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
971 cagf
*= GT_FREQUENCY_MULTIPLIER
;
973 gen6_gt_force_wake_put(dev_priv
);
974 mutex_unlock(&dev
->struct_mutex
);
976 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
977 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
978 seq_printf(m
, "Render p-state ratio: %d\n",
979 (gt_perf_status
& 0xff00) >> 8);
980 seq_printf(m
, "Render p-state VID: %d\n",
981 gt_perf_status
& 0xff);
982 seq_printf(m
, "Render p-state limit: %d\n",
983 rp_state_limits
& 0xff);
984 seq_printf(m
, "RPNSWREQ: %dMHz\n", reqf
);
985 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
986 seq_printf(m
, "RP CUR UP EI: %dus\n", rpupei
&
988 seq_printf(m
, "RP CUR UP: %dus\n", rpcurup
&
989 GEN6_CURBSYTAVG_MASK
);
990 seq_printf(m
, "RP PREV UP: %dus\n", rpprevup
&
991 GEN6_CURBSYTAVG_MASK
);
992 seq_printf(m
, "RP CUR DOWN EI: %dus\n", rpdownei
&
994 seq_printf(m
, "RP CUR DOWN: %dus\n", rpcurdown
&
995 GEN6_CURBSYTAVG_MASK
);
996 seq_printf(m
, "RP PREV DOWN: %dus\n", rpprevdown
&
997 GEN6_CURBSYTAVG_MASK
);
999 max_freq
= (rp_state_cap
& 0xff0000) >> 16;
1000 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
1001 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1003 max_freq
= (rp_state_cap
& 0xff00) >> 8;
1004 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
1005 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1007 max_freq
= rp_state_cap
& 0xff;
1008 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
1009 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1011 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
1012 dev_priv
->rps
.hw_max
* GT_FREQUENCY_MULTIPLIER
);
1013 } else if (IS_VALLEYVIEW(dev
)) {
1016 mutex_lock(&dev_priv
->rps
.hw_lock
);
1017 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
1018 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
1019 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
1021 val
= vlv_punit_read(dev_priv
, PUNIT_FUSE_BUS1
);
1022 seq_printf(m
, "max GPU freq: %d MHz\n",
1023 vlv_gpu_freq(dev_priv
->mem_freq
, val
));
1025 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
);
1026 seq_printf(m
, "min GPU freq: %d MHz\n",
1027 vlv_gpu_freq(dev_priv
->mem_freq
, val
));
1029 seq_printf(m
, "current GPU freq: %d MHz\n",
1030 vlv_gpu_freq(dev_priv
->mem_freq
,
1031 (freq_sts
>> 8) & 0xff));
1032 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1034 seq_puts(m
, "no P-state info available\n");
1040 static int i915_delayfreq_table(struct seq_file
*m
, void *unused
)
1042 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1043 struct drm_device
*dev
= node
->minor
->dev
;
1044 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1048 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1052 for (i
= 0; i
< 16; i
++) {
1053 delayfreq
= I915_READ(PXVFREQ_BASE
+ i
* 4);
1054 seq_printf(m
, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i
, delayfreq
,
1055 (delayfreq
& PXVFREQ_PX_MASK
) >> PXVFREQ_PX_SHIFT
);
1058 mutex_unlock(&dev
->struct_mutex
);
1063 static inline int MAP_TO_MV(int map
)
1065 return 1250 - (map
* 25);
1068 static int i915_inttoext_table(struct seq_file
*m
, void *unused
)
1070 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1071 struct drm_device
*dev
= node
->minor
->dev
;
1072 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1076 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1080 for (i
= 1; i
<= 32; i
++) {
1081 inttoext
= I915_READ(INTTOEXT_BASE_ILK
+ i
* 4);
1082 seq_printf(m
, "INTTOEXT%02d: 0x%08x\n", i
, inttoext
);
1085 mutex_unlock(&dev
->struct_mutex
);
1090 static int ironlake_drpc_info(struct seq_file
*m
)
1092 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1093 struct drm_device
*dev
= node
->minor
->dev
;
1094 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1095 u32 rgvmodectl
, rstdbyctl
;
1099 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1103 rgvmodectl
= I915_READ(MEMMODECTL
);
1104 rstdbyctl
= I915_READ(RSTDBYCTL
);
1105 crstandvid
= I915_READ16(CRSTANDVID
);
1107 mutex_unlock(&dev
->struct_mutex
);
1109 seq_printf(m
, "HD boost: %s\n", (rgvmodectl
& MEMMODE_BOOST_EN
) ?
1111 seq_printf(m
, "Boost freq: %d\n",
1112 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
1113 MEMMODE_BOOST_FREQ_SHIFT
);
1114 seq_printf(m
, "HW control enabled: %s\n",
1115 rgvmodectl
& MEMMODE_HWIDLE_EN
? "yes" : "no");
1116 seq_printf(m
, "SW control enabled: %s\n",
1117 rgvmodectl
& MEMMODE_SWMODE_EN
? "yes" : "no");
1118 seq_printf(m
, "Gated voltage change: %s\n",
1119 rgvmodectl
& MEMMODE_RCLK_GATE
? "yes" : "no");
1120 seq_printf(m
, "Starting frequency: P%d\n",
1121 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
1122 seq_printf(m
, "Max P-state: P%d\n",
1123 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
1124 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
1125 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
1126 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
1127 seq_printf(m
, "Render standby enabled: %s\n",
1128 (rstdbyctl
& RCX_SW_EXIT
) ? "no" : "yes");
1129 seq_puts(m
, "Current RS state: ");
1130 switch (rstdbyctl
& RSX_STATUS_MASK
) {
1132 seq_puts(m
, "on\n");
1134 case RSX_STATUS_RC1
:
1135 seq_puts(m
, "RC1\n");
1137 case RSX_STATUS_RC1E
:
1138 seq_puts(m
, "RC1E\n");
1140 case RSX_STATUS_RS1
:
1141 seq_puts(m
, "RS1\n");
1143 case RSX_STATUS_RS2
:
1144 seq_puts(m
, "RS2 (RC6)\n");
1146 case RSX_STATUS_RS3
:
1147 seq_puts(m
, "RC3 (RC6+)\n");
1150 seq_puts(m
, "unknown\n");
1157 static int gen6_drpc_info(struct seq_file
*m
)
1160 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1161 struct drm_device
*dev
= node
->minor
->dev
;
1162 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1163 u32 rpmodectl1
, gt_core_status
, rcctl1
, rc6vids
= 0;
1164 unsigned forcewake_count
;
1167 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1171 spin_lock_irq(&dev_priv
->uncore
.lock
);
1172 forcewake_count
= dev_priv
->uncore
.forcewake_count
;
1173 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1175 if (forcewake_count
) {
1176 seq_puts(m
, "RC information inaccurate because somebody "
1177 "holds a forcewake reference \n");
1179 /* NB: we cannot use forcewake, else we read the wrong values */
1180 while (count
++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK
) & 1))
1182 seq_printf(m
, "RC information accurate: %s\n", yesno(count
< 51));
1185 gt_core_status
= readl(dev_priv
->regs
+ GEN6_GT_CORE_STATUS
);
1186 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4, true);
1188 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1189 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1190 mutex_unlock(&dev
->struct_mutex
);
1191 mutex_lock(&dev_priv
->rps
.hw_lock
);
1192 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
1193 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1195 seq_printf(m
, "Video Turbo Mode: %s\n",
1196 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1197 seq_printf(m
, "HW control enabled: %s\n",
1198 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1199 seq_printf(m
, "SW control enabled: %s\n",
1200 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1201 GEN6_RP_MEDIA_SW_MODE
));
1202 seq_printf(m
, "RC1e Enabled: %s\n",
1203 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1204 seq_printf(m
, "RC6 Enabled: %s\n",
1205 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1206 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1207 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1208 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1209 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1210 seq_puts(m
, "Current RC state: ");
1211 switch (gt_core_status
& GEN6_RCn_MASK
) {
1213 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1214 seq_puts(m
, "Core Power Down\n");
1216 seq_puts(m
, "on\n");
1219 seq_puts(m
, "RC3\n");
1222 seq_puts(m
, "RC6\n");
1225 seq_puts(m
, "RC7\n");
1228 seq_puts(m
, "Unknown\n");
1232 seq_printf(m
, "Core Power Down: %s\n",
1233 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1235 /* Not exactly sure what this is */
1236 seq_printf(m
, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1237 I915_READ(GEN6_GT_GFX_RC6_LOCKED
));
1238 seq_printf(m
, "RC6 residency since boot: %u\n",
1239 I915_READ(GEN6_GT_GFX_RC6
));
1240 seq_printf(m
, "RC6+ residency since boot: %u\n",
1241 I915_READ(GEN6_GT_GFX_RC6p
));
1242 seq_printf(m
, "RC6++ residency since boot: %u\n",
1243 I915_READ(GEN6_GT_GFX_RC6pp
));
1245 seq_printf(m
, "RC6 voltage: %dmV\n",
1246 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1247 seq_printf(m
, "RC6+ voltage: %dmV\n",
1248 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1249 seq_printf(m
, "RC6++ voltage: %dmV\n",
1250 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1254 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1256 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1257 struct drm_device
*dev
= node
->minor
->dev
;
1259 if (IS_GEN6(dev
) || IS_GEN7(dev
))
1260 return gen6_drpc_info(m
);
1262 return ironlake_drpc_info(m
);
1265 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1267 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1268 struct drm_device
*dev
= node
->minor
->dev
;
1269 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1271 if (!I915_HAS_FBC(dev
)) {
1272 seq_puts(m
, "FBC unsupported on this chipset\n");
1276 if (intel_fbc_enabled(dev
)) {
1277 seq_puts(m
, "FBC enabled\n");
1279 seq_puts(m
, "FBC disabled: ");
1280 switch (dev_priv
->fbc
.no_fbc_reason
) {
1282 seq_puts(m
, "FBC actived, but currently disabled in hardware");
1284 case FBC_UNSUPPORTED
:
1285 seq_puts(m
, "unsupported by this chipset");
1288 seq_puts(m
, "no outputs");
1290 case FBC_STOLEN_TOO_SMALL
:
1291 seq_puts(m
, "not enough stolen memory");
1293 case FBC_UNSUPPORTED_MODE
:
1294 seq_puts(m
, "mode not supported");
1296 case FBC_MODE_TOO_LARGE
:
1297 seq_puts(m
, "mode too large");
1300 seq_puts(m
, "FBC unsupported on plane");
1303 seq_puts(m
, "scanout buffer not tiled");
1305 case FBC_MULTIPLE_PIPES
:
1306 seq_puts(m
, "multiple pipes are enabled");
1308 case FBC_MODULE_PARAM
:
1309 seq_puts(m
, "disabled per module param (default off)");
1311 case FBC_CHIP_DEFAULT
:
1312 seq_puts(m
, "disabled per chip default");
1315 seq_puts(m
, "unknown reason");
1322 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1324 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1325 struct drm_device
*dev
= node
->minor
->dev
;
1326 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1328 if (!HAS_IPS(dev
)) {
1329 seq_puts(m
, "not supported\n");
1333 if (I915_READ(IPS_CTL
) & IPS_ENABLE
)
1334 seq_puts(m
, "enabled\n");
1336 seq_puts(m
, "disabled\n");
1341 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1343 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1344 struct drm_device
*dev
= node
->minor
->dev
;
1345 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1346 bool sr_enabled
= false;
1348 if (HAS_PCH_SPLIT(dev
))
1349 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1350 else if (IS_CRESTLINE(dev
) || IS_I945G(dev
) || IS_I945GM(dev
))
1351 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1352 else if (IS_I915GM(dev
))
1353 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1354 else if (IS_PINEVIEW(dev
))
1355 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1357 seq_printf(m
, "self-refresh: %s\n",
1358 sr_enabled
? "enabled" : "disabled");
1363 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1365 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1366 struct drm_device
*dev
= node
->minor
->dev
;
1367 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1368 unsigned long temp
, chipset
, gfx
;
1374 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1378 temp
= i915_mch_val(dev_priv
);
1379 chipset
= i915_chipset_val(dev_priv
);
1380 gfx
= i915_gfx_val(dev_priv
);
1381 mutex_unlock(&dev
->struct_mutex
);
1383 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1384 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1385 seq_printf(m
, "GFX power: %ld\n", gfx
);
1386 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1391 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1393 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1394 struct drm_device
*dev
= node
->minor
->dev
;
1395 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1397 int gpu_freq
, ia_freq
;
1399 if (!(IS_GEN6(dev
) || IS_GEN7(dev
))) {
1400 seq_puts(m
, "unsupported on this chipset\n");
1404 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
1406 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1410 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1412 for (gpu_freq
= dev_priv
->rps
.min_delay
;
1413 gpu_freq
<= dev_priv
->rps
.max_delay
;
1416 sandybridge_pcode_read(dev_priv
,
1417 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1419 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1420 gpu_freq
* GT_FREQUENCY_MULTIPLIER
,
1421 ((ia_freq
>> 0) & 0xff) * 100,
1422 ((ia_freq
>> 8) & 0xff) * 100);
1425 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1430 static int i915_gfxec(struct seq_file
*m
, void *unused
)
1432 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1433 struct drm_device
*dev
= node
->minor
->dev
;
1434 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1437 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1441 seq_printf(m
, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1443 mutex_unlock(&dev
->struct_mutex
);
1448 static int i915_opregion(struct seq_file
*m
, void *unused
)
1450 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1451 struct drm_device
*dev
= node
->minor
->dev
;
1452 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1453 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1454 void *data
= kmalloc(OPREGION_SIZE
, GFP_KERNEL
);
1460 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1464 if (opregion
->header
) {
1465 memcpy_fromio(data
, opregion
->header
, OPREGION_SIZE
);
1466 seq_write(m
, data
, OPREGION_SIZE
);
1469 mutex_unlock(&dev
->struct_mutex
);
1476 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1478 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1479 struct drm_device
*dev
= node
->minor
->dev
;
1480 struct intel_fbdev
*ifbdev
= NULL
;
1481 struct intel_framebuffer
*fb
;
1483 #ifdef CONFIG_DRM_I915_FBDEV
1484 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1485 int ret
= mutex_lock_interruptible(&dev
->mode_config
.mutex
);
1489 ifbdev
= dev_priv
->fbdev
;
1490 fb
= to_intel_framebuffer(ifbdev
->helper
.fb
);
1492 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1496 fb
->base
.bits_per_pixel
,
1497 atomic_read(&fb
->base
.refcount
.refcount
));
1498 describe_obj(m
, fb
->obj
);
1500 mutex_unlock(&dev
->mode_config
.mutex
);
1503 mutex_lock(&dev
->mode_config
.fb_lock
);
1504 list_for_each_entry(fb
, &dev
->mode_config
.fb_list
, base
.head
) {
1505 if (ifbdev
&& &fb
->base
== ifbdev
->helper
.fb
)
1508 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1512 fb
->base
.bits_per_pixel
,
1513 atomic_read(&fb
->base
.refcount
.refcount
));
1514 describe_obj(m
, fb
->obj
);
1517 mutex_unlock(&dev
->mode_config
.fb_lock
);
1522 static int i915_context_status(struct seq_file
*m
, void *unused
)
1524 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1525 struct drm_device
*dev
= node
->minor
->dev
;
1526 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1527 struct intel_ring_buffer
*ring
;
1528 struct i915_hw_context
*ctx
;
1531 ret
= mutex_lock_interruptible(&dev
->mode_config
.mutex
);
1535 if (dev_priv
->ips
.pwrctx
) {
1536 seq_puts(m
, "power context ");
1537 describe_obj(m
, dev_priv
->ips
.pwrctx
);
1541 if (dev_priv
->ips
.renderctx
) {
1542 seq_puts(m
, "render context ");
1543 describe_obj(m
, dev_priv
->ips
.renderctx
);
1547 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
) {
1548 seq_puts(m
, "HW context ");
1549 describe_ctx(m
, ctx
);
1550 for_each_ring(ring
, dev_priv
, i
)
1551 if (ring
->default_context
== ctx
)
1552 seq_printf(m
, "(default context %s) ", ring
->name
);
1554 describe_obj(m
, ctx
->obj
);
1558 mutex_unlock(&dev
->mode_config
.mutex
);
1563 static int i915_gen6_forcewake_count_info(struct seq_file
*m
, void *data
)
1565 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1566 struct drm_device
*dev
= node
->minor
->dev
;
1567 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1568 unsigned forcewake_count
;
1570 spin_lock_irq(&dev_priv
->uncore
.lock
);
1571 forcewake_count
= dev_priv
->uncore
.forcewake_count
;
1572 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1574 seq_printf(m
, "forcewake count = %u\n", forcewake_count
);
1579 static const char *swizzle_string(unsigned swizzle
)
1582 case I915_BIT_6_SWIZZLE_NONE
:
1584 case I915_BIT_6_SWIZZLE_9
:
1586 case I915_BIT_6_SWIZZLE_9_10
:
1587 return "bit9/bit10";
1588 case I915_BIT_6_SWIZZLE_9_11
:
1589 return "bit9/bit11";
1590 case I915_BIT_6_SWIZZLE_9_10_11
:
1591 return "bit9/bit10/bit11";
1592 case I915_BIT_6_SWIZZLE_9_17
:
1593 return "bit9/bit17";
1594 case I915_BIT_6_SWIZZLE_9_10_17
:
1595 return "bit9/bit10/bit17";
1596 case I915_BIT_6_SWIZZLE_UNKNOWN
:
1603 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
1605 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1606 struct drm_device
*dev
= node
->minor
->dev
;
1607 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1610 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1614 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
1615 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
1616 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
1617 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
1619 if (IS_GEN3(dev
) || IS_GEN4(dev
)) {
1620 seq_printf(m
, "DDC = 0x%08x\n",
1622 seq_printf(m
, "C0DRB3 = 0x%04x\n",
1623 I915_READ16(C0DRB3
));
1624 seq_printf(m
, "C1DRB3 = 0x%04x\n",
1625 I915_READ16(C1DRB3
));
1626 } else if (INTEL_INFO(dev
)->gen
>= 6) {
1627 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
1628 I915_READ(MAD_DIMM_C0
));
1629 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
1630 I915_READ(MAD_DIMM_C1
));
1631 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
1632 I915_READ(MAD_DIMM_C2
));
1633 seq_printf(m
, "TILECTL = 0x%08x\n",
1634 I915_READ(TILECTL
));
1636 seq_printf(m
, "GAMTARBMODE = 0x%08x\n",
1637 I915_READ(GAMTARBMODE
));
1639 seq_printf(m
, "ARB_MODE = 0x%08x\n",
1640 I915_READ(ARB_MODE
));
1641 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
1642 I915_READ(DISP_ARB_CTL
));
1644 mutex_unlock(&dev
->struct_mutex
);
1649 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
1651 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1652 struct drm_device
*dev
= node
->minor
->dev
;
1653 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1654 struct intel_ring_buffer
*ring
;
1658 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1661 if (INTEL_INFO(dev
)->gen
== 6)
1662 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
1664 for_each_ring(ring
, dev_priv
, i
) {
1665 seq_printf(m
, "%s\n", ring
->name
);
1666 if (INTEL_INFO(dev
)->gen
== 7)
1667 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring
)));
1668 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring
)));
1669 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring
)));
1670 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring
)));
1672 if (dev_priv
->mm
.aliasing_ppgtt
) {
1673 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
1675 seq_puts(m
, "aliasing PPGTT:\n");
1676 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd_offset
);
1678 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
1679 mutex_unlock(&dev
->struct_mutex
);
1684 static int i915_dpio_info(struct seq_file
*m
, void *data
)
1686 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1687 struct drm_device
*dev
= node
->minor
->dev
;
1688 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1692 if (!IS_VALLEYVIEW(dev
)) {
1693 seq_puts(m
, "unsupported\n");
1697 ret
= mutex_lock_interruptible(&dev_priv
->dpio_lock
);
1701 seq_printf(m
, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL
));
1703 seq_printf(m
, "DPIO_DIV_A: 0x%08x\n",
1704 vlv_dpio_read(dev_priv
, PIPE_A
, _DPIO_DIV_A
));
1705 seq_printf(m
, "DPIO_DIV_B: 0x%08x\n",
1706 vlv_dpio_read(dev_priv
, PIPE_A
, _DPIO_DIV_B
));
1708 seq_printf(m
, "DPIO_REFSFR_A: 0x%08x\n",
1709 vlv_dpio_read(dev_priv
, PIPE_A
, _DPIO_REFSFR_A
));
1710 seq_printf(m
, "DPIO_REFSFR_B: 0x%08x\n",
1711 vlv_dpio_read(dev_priv
, PIPE_A
, _DPIO_REFSFR_B
));
1713 seq_printf(m
, "DPIO_CORE_CLK_A: 0x%08x\n",
1714 vlv_dpio_read(dev_priv
, PIPE_A
, _DPIO_CORE_CLK_A
));
1715 seq_printf(m
, "DPIO_CORE_CLK_B: 0x%08x\n",
1716 vlv_dpio_read(dev_priv
, PIPE_A
, _DPIO_CORE_CLK_B
));
1718 seq_printf(m
, "DPIO_LPF_COEFF_A: 0x%08x\n",
1719 vlv_dpio_read(dev_priv
, PIPE_A
, _DPIO_LPF_COEFF_A
));
1720 seq_printf(m
, "DPIO_LPF_COEFF_B: 0x%08x\n",
1721 vlv_dpio_read(dev_priv
, PIPE_A
, _DPIO_LPF_COEFF_B
));
1723 seq_printf(m
, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1724 vlv_dpio_read(dev_priv
, PIPE_A
, DPIO_FASTCLK_DISABLE
));
1726 mutex_unlock(&dev_priv
->dpio_lock
);
1731 static int i915_llc(struct seq_file
*m
, void *data
)
1733 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1734 struct drm_device
*dev
= node
->minor
->dev
;
1735 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1737 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1738 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev
)));
1739 seq_printf(m
, "eLLC: %zuMB\n", dev_priv
->ellc_size
);
1744 static int i915_edp_psr_status(struct seq_file
*m
, void *data
)
1746 struct drm_info_node
*node
= m
->private;
1747 struct drm_device
*dev
= node
->minor
->dev
;
1748 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1750 bool enabled
= false;
1752 seq_printf(m
, "Sink_Support: %s\n", yesno(dev_priv
->psr
.sink_support
));
1753 seq_printf(m
, "Source_OK: %s\n", yesno(dev_priv
->psr
.source_ok
));
1755 enabled
= HAS_PSR(dev
) &&
1756 I915_READ(EDP_PSR_CTL(dev
)) & EDP_PSR_ENABLE
;
1757 seq_printf(m
, "Enabled: %s\n", yesno(enabled
));
1760 psrperf
= I915_READ(EDP_PSR_PERF_CNT(dev
)) &
1761 EDP_PSR_PERF_CNT_MASK
;
1762 seq_printf(m
, "Performance_Counter: %u\n", psrperf
);
1767 static int i915_energy_uJ(struct seq_file
*m
, void *data
)
1769 struct drm_info_node
*node
= m
->private;
1770 struct drm_device
*dev
= node
->minor
->dev
;
1771 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1775 if (INTEL_INFO(dev
)->gen
< 6)
1778 rdmsrl(MSR_RAPL_POWER_UNIT
, power
);
1779 power
= (power
& 0x1f00) >> 8;
1780 units
= 1000000 / (1 << power
); /* convert to uJ */
1781 power
= I915_READ(MCH_SECP_NRG_STTS
);
1784 seq_printf(m
, "%llu", (long long unsigned)power
);
1789 static int i915_pc8_status(struct seq_file
*m
, void *unused
)
1791 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1792 struct drm_device
*dev
= node
->minor
->dev
;
1793 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1795 if (!IS_HASWELL(dev
)) {
1796 seq_puts(m
, "not supported\n");
1800 mutex_lock(&dev_priv
->pc8
.lock
);
1801 seq_printf(m
, "Requirements met: %s\n",
1802 yesno(dev_priv
->pc8
.requirements_met
));
1803 seq_printf(m
, "GPU idle: %s\n", yesno(dev_priv
->pc8
.gpu_idle
));
1804 seq_printf(m
, "Disable count: %d\n", dev_priv
->pc8
.disable_count
);
1805 seq_printf(m
, "IRQs disabled: %s\n",
1806 yesno(dev_priv
->pc8
.irqs_disabled
));
1807 seq_printf(m
, "Enabled: %s\n", yesno(dev_priv
->pc8
.enabled
));
1808 mutex_unlock(&dev_priv
->pc8
.lock
);
1813 struct pipe_crc_info
{
1815 struct drm_device
*dev
;
1819 static int i915_pipe_crc_open(struct inode
*inode
, struct file
*filep
)
1821 struct pipe_crc_info
*info
= inode
->i_private
;
1822 struct drm_i915_private
*dev_priv
= info
->dev
->dev_private
;
1823 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
1825 spin_lock_irq(&pipe_crc
->lock
);
1827 if (pipe_crc
->opened
) {
1828 spin_unlock_irq(&pipe_crc
->lock
);
1829 return -EBUSY
; /* already open */
1832 pipe_crc
->opened
= true;
1833 filep
->private_data
= inode
->i_private
;
1835 spin_unlock_irq(&pipe_crc
->lock
);
1840 static int i915_pipe_crc_release(struct inode
*inode
, struct file
*filep
)
1842 struct pipe_crc_info
*info
= inode
->i_private
;
1843 struct drm_i915_private
*dev_priv
= info
->dev
->dev_private
;
1844 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
1846 spin_lock_irq(&pipe_crc
->lock
);
1847 pipe_crc
->opened
= false;
1848 spin_unlock_irq(&pipe_crc
->lock
);
1853 /* (6 fields, 8 chars each, space separated (5) + '\n') */
1854 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
1855 /* account for \'0' */
1856 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
1858 static int pipe_crc_data_count(struct intel_pipe_crc
*pipe_crc
)
1860 assert_spin_locked(&pipe_crc
->lock
);
1861 return CIRC_CNT(pipe_crc
->head
, pipe_crc
->tail
,
1862 INTEL_PIPE_CRC_ENTRIES_NR
);
1866 i915_pipe_crc_read(struct file
*filep
, char __user
*user_buf
, size_t count
,
1869 struct pipe_crc_info
*info
= filep
->private_data
;
1870 struct drm_device
*dev
= info
->dev
;
1871 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1872 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
1873 char buf
[PIPE_CRC_BUFFER_LEN
];
1874 int head
, tail
, n_entries
, n
;
1878 * Don't allow user space to provide buffers not big enough to hold
1881 if (count
< PIPE_CRC_LINE_LEN
)
1884 if (pipe_crc
->source
== INTEL_PIPE_CRC_SOURCE_NONE
)
1887 /* nothing to read */
1888 spin_lock_irq(&pipe_crc
->lock
);
1889 while (pipe_crc_data_count(pipe_crc
) == 0) {
1892 if (filep
->f_flags
& O_NONBLOCK
) {
1893 spin_unlock_irq(&pipe_crc
->lock
);
1897 ret
= wait_event_interruptible_lock_irq(pipe_crc
->wq
,
1898 pipe_crc_data_count(pipe_crc
), pipe_crc
->lock
);
1900 spin_unlock_irq(&pipe_crc
->lock
);
1905 /* We now have one or more entries to read */
1906 head
= pipe_crc
->head
;
1907 tail
= pipe_crc
->tail
;
1908 n_entries
= min((size_t)CIRC_CNT(head
, tail
, INTEL_PIPE_CRC_ENTRIES_NR
),
1909 count
/ PIPE_CRC_LINE_LEN
);
1910 spin_unlock_irq(&pipe_crc
->lock
);
1915 struct intel_pipe_crc_entry
*entry
= &pipe_crc
->entries
[tail
];
1918 bytes_read
+= snprintf(buf
, PIPE_CRC_BUFFER_LEN
,
1919 "%8u %8x %8x %8x %8x %8x\n",
1920 entry
->frame
, entry
->crc
[0],
1921 entry
->crc
[1], entry
->crc
[2],
1922 entry
->crc
[3], entry
->crc
[4]);
1924 ret
= copy_to_user(user_buf
+ n
* PIPE_CRC_LINE_LEN
,
1925 buf
, PIPE_CRC_LINE_LEN
);
1926 if (ret
== PIPE_CRC_LINE_LEN
)
1929 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR
);
1930 tail
= (tail
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
1932 } while (--n_entries
);
1934 spin_lock_irq(&pipe_crc
->lock
);
1935 pipe_crc
->tail
= tail
;
1936 spin_unlock_irq(&pipe_crc
->lock
);
1941 static const struct file_operations i915_pipe_crc_fops
= {
1942 .owner
= THIS_MODULE
,
1943 .open
= i915_pipe_crc_open
,
1944 .read
= i915_pipe_crc_read
,
1945 .release
= i915_pipe_crc_release
,
1948 static struct pipe_crc_info i915_pipe_crc_data
[I915_MAX_PIPES
] = {
1950 .name
= "i915_pipe_A_crc",
1954 .name
= "i915_pipe_B_crc",
1958 .name
= "i915_pipe_C_crc",
1963 static int i915_pipe_crc_create(struct dentry
*root
, struct drm_minor
*minor
,
1966 struct drm_device
*dev
= minor
->dev
;
1968 struct pipe_crc_info
*info
= &i915_pipe_crc_data
[pipe
];
1971 ent
= debugfs_create_file(info
->name
, S_IRUGO
, root
, info
,
1972 &i915_pipe_crc_fops
);
1974 return PTR_ERR(ent
);
1976 return drm_add_fake_info_node(minor
, ent
, info
);
1979 static const char * const pipe_crc_sources
[] = {
1992 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source
)
1994 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources
) != INTEL_PIPE_CRC_SOURCE_MAX
);
1995 return pipe_crc_sources
[source
];
1998 static int display_crc_ctl_show(struct seq_file
*m
, void *data
)
2000 struct drm_device
*dev
= m
->private;
2001 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2004 for (i
= 0; i
< I915_MAX_PIPES
; i
++)
2005 seq_printf(m
, "%c %s\n", pipe_name(i
),
2006 pipe_crc_source_name(dev_priv
->pipe_crc
[i
].source
));
2011 static int display_crc_ctl_open(struct inode
*inode
, struct file
*file
)
2013 struct drm_device
*dev
= inode
->i_private
;
2015 return single_open(file
, display_crc_ctl_show
, dev
);
2018 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
2021 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
2022 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
2025 case INTEL_PIPE_CRC_SOURCE_PIPE
:
2026 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_INCLUDE_BORDER_I8XX
;
2028 case INTEL_PIPE_CRC_SOURCE_NONE
:
2038 static int i9xx_pipe_crc_auto_source(struct drm_device
*dev
, enum pipe pipe
,
2039 enum intel_pipe_crc_source
*source
)
2041 struct intel_encoder
*encoder
;
2042 struct intel_crtc
*crtc
;
2043 struct intel_digital_port
*dig_port
;
2046 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
2048 mutex_lock(&dev
->mode_config
.mutex
);
2049 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
2051 if (!encoder
->base
.crtc
)
2054 crtc
= to_intel_crtc(encoder
->base
.crtc
);
2056 if (crtc
->pipe
!= pipe
)
2059 switch (encoder
->type
) {
2060 case INTEL_OUTPUT_TVOUT
:
2061 *source
= INTEL_PIPE_CRC_SOURCE_TV
;
2063 case INTEL_OUTPUT_DISPLAYPORT
:
2064 case INTEL_OUTPUT_EDP
:
2065 dig_port
= enc_to_dig_port(&encoder
->base
);
2066 switch (dig_port
->port
) {
2068 *source
= INTEL_PIPE_CRC_SOURCE_DP_B
;
2071 *source
= INTEL_PIPE_CRC_SOURCE_DP_C
;
2074 *source
= INTEL_PIPE_CRC_SOURCE_DP_D
;
2077 WARN(1, "nonexisting DP port %c\n",
2078 port_name(dig_port
->port
));
2084 mutex_unlock(&dev
->mode_config
.mutex
);
2089 static int vlv_pipe_crc_ctl_reg(struct drm_device
*dev
,
2091 enum intel_pipe_crc_source
*source
,
2094 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2095 bool need_stable_symbols
= false;
2097 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
) {
2098 int ret
= i9xx_pipe_crc_auto_source(dev
, pipe
, source
);
2104 case INTEL_PIPE_CRC_SOURCE_PIPE
:
2105 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_VLV
;
2107 case INTEL_PIPE_CRC_SOURCE_DP_B
:
2108 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_B_VLV
;
2109 need_stable_symbols
= true;
2111 case INTEL_PIPE_CRC_SOURCE_DP_C
:
2112 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_C_VLV
;
2113 need_stable_symbols
= true;
2115 case INTEL_PIPE_CRC_SOURCE_NONE
:
2123 * When the pipe CRC tap point is after the transcoders we need
2124 * to tweak symbol-level features to produce a deterministic series of
2125 * symbols for a given frame. We need to reset those features only once
2126 * a frame (instead of every nth symbol):
2127 * - DC-balance: used to ensure a better clock recovery from the data
2129 * - DisplayPort scrambling: used for EMI reduction
2131 if (need_stable_symbols
) {
2132 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
2134 WARN_ON(!IS_G4X(dev
));
2136 tmp
|= DC_BALANCE_RESET_VLV
;
2138 tmp
|= PIPE_A_SCRAMBLE_RESET
;
2140 tmp
|= PIPE_B_SCRAMBLE_RESET
;
2142 I915_WRITE(PORT_DFT2_G4X
, tmp
);
2148 static int i9xx_pipe_crc_ctl_reg(struct drm_device
*dev
,
2150 enum intel_pipe_crc_source
*source
,
2153 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2154 bool need_stable_symbols
= false;
2156 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
) {
2157 int ret
= i9xx_pipe_crc_auto_source(dev
, pipe
, source
);
2163 case INTEL_PIPE_CRC_SOURCE_PIPE
:
2164 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_I9XX
;
2166 case INTEL_PIPE_CRC_SOURCE_TV
:
2167 if (!SUPPORTS_TV(dev
))
2169 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_TV_PRE
;
2171 case INTEL_PIPE_CRC_SOURCE_DP_B
:
2174 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_B_G4X
;
2175 need_stable_symbols
= true;
2177 case INTEL_PIPE_CRC_SOURCE_DP_C
:
2180 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_C_G4X
;
2181 need_stable_symbols
= true;
2183 case INTEL_PIPE_CRC_SOURCE_DP_D
:
2186 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_D_G4X
;
2187 need_stable_symbols
= true;
2189 case INTEL_PIPE_CRC_SOURCE_NONE
:
2197 * When the pipe CRC tap point is after the transcoders we need
2198 * to tweak symbol-level features to produce a deterministic series of
2199 * symbols for a given frame. We need to reset those features only once
2200 * a frame (instead of every nth symbol):
2201 * - DC-balance: used to ensure a better clock recovery from the data
2203 * - DisplayPort scrambling: used for EMI reduction
2205 if (need_stable_symbols
) {
2206 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
2208 WARN_ON(!IS_G4X(dev
));
2210 I915_WRITE(PORT_DFT_I9XX
,
2211 I915_READ(PORT_DFT_I9XX
) | DC_BALANCE_RESET
);
2214 tmp
|= PIPE_A_SCRAMBLE_RESET
;
2216 tmp
|= PIPE_B_SCRAMBLE_RESET
;
2218 I915_WRITE(PORT_DFT2_G4X
, tmp
);
2224 static void vlv_undo_pipe_scramble_reset(struct drm_device
*dev
,
2227 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2228 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
2231 tmp
&= ~PIPE_A_SCRAMBLE_RESET
;
2233 tmp
&= ~PIPE_B_SCRAMBLE_RESET
;
2234 if (!(tmp
& PIPE_SCRAMBLE_RESET_MASK
))
2235 tmp
&= ~DC_BALANCE_RESET_VLV
;
2236 I915_WRITE(PORT_DFT2_G4X
, tmp
);
2240 static void g4x_undo_pipe_scramble_reset(struct drm_device
*dev
,
2243 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2244 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
2247 tmp
&= ~PIPE_A_SCRAMBLE_RESET
;
2249 tmp
&= ~PIPE_B_SCRAMBLE_RESET
;
2250 I915_WRITE(PORT_DFT2_G4X
, tmp
);
2252 if (!(tmp
& PIPE_SCRAMBLE_RESET_MASK
)) {
2253 I915_WRITE(PORT_DFT_I9XX
,
2254 I915_READ(PORT_DFT_I9XX
) & ~DC_BALANCE_RESET
);
2258 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
2261 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
2262 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
2265 case INTEL_PIPE_CRC_SOURCE_PLANE1
:
2266 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PRIMARY_ILK
;
2268 case INTEL_PIPE_CRC_SOURCE_PLANE2
:
2269 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_SPRITE_ILK
;
2271 case INTEL_PIPE_CRC_SOURCE_PIPE
:
2272 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_ILK
;
2274 case INTEL_PIPE_CRC_SOURCE_NONE
:
2284 static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
2287 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
2288 *source
= INTEL_PIPE_CRC_SOURCE_PF
;
2291 case INTEL_PIPE_CRC_SOURCE_PLANE1
:
2292 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PRIMARY_IVB
;
2294 case INTEL_PIPE_CRC_SOURCE_PLANE2
:
2295 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_SPRITE_IVB
;
2297 case INTEL_PIPE_CRC_SOURCE_PF
:
2298 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PF_IVB
;
2300 case INTEL_PIPE_CRC_SOURCE_NONE
:
2310 static int pipe_crc_set_source(struct drm_device
*dev
, enum pipe pipe
,
2311 enum intel_pipe_crc_source source
)
2313 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2314 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
2318 if (pipe_crc
->source
== source
)
2321 /* forbid changing the source without going back to 'none' */
2322 if (pipe_crc
->source
&& source
)
2326 ret
= i8xx_pipe_crc_ctl_reg(&source
, &val
);
2327 else if (INTEL_INFO(dev
)->gen
< 5)
2328 ret
= i9xx_pipe_crc_ctl_reg(dev
, pipe
, &source
, &val
);
2329 else if (IS_VALLEYVIEW(dev
))
2330 ret
= vlv_pipe_crc_ctl_reg(dev
,pipe
, &source
, &val
);
2331 else if (IS_GEN5(dev
) || IS_GEN6(dev
))
2332 ret
= ilk_pipe_crc_ctl_reg(&source
, &val
);
2334 ret
= ivb_pipe_crc_ctl_reg(&source
, &val
);
2339 /* none -> real source transition */
2341 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
2342 pipe_name(pipe
), pipe_crc_source_name(source
));
2344 pipe_crc
->entries
= kzalloc(sizeof(*pipe_crc
->entries
) *
2345 INTEL_PIPE_CRC_ENTRIES_NR
,
2347 if (!pipe_crc
->entries
)
2350 spin_lock_irq(&pipe_crc
->lock
);
2353 spin_unlock_irq(&pipe_crc
->lock
);
2356 pipe_crc
->source
= source
;
2358 I915_WRITE(PIPE_CRC_CTL(pipe
), val
);
2359 POSTING_READ(PIPE_CRC_CTL(pipe
));
2361 /* real source -> none transition */
2362 if (source
== INTEL_PIPE_CRC_SOURCE_NONE
) {
2363 struct intel_pipe_crc_entry
*entries
;
2365 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
2368 intel_wait_for_vblank(dev
, pipe
);
2370 spin_lock_irq(&pipe_crc
->lock
);
2371 entries
= pipe_crc
->entries
;
2372 pipe_crc
->entries
= NULL
;
2373 spin_unlock_irq(&pipe_crc
->lock
);
2378 g4x_undo_pipe_scramble_reset(dev
, pipe
);
2379 else if (IS_VALLEYVIEW(dev
))
2380 vlv_undo_pipe_scramble_reset(dev
, pipe
);
2387 * Parse pipe CRC command strings:
2388 * command: wsp* object wsp+ name wsp+ source wsp*
2391 * source: (none | plane1 | plane2 | pf)
2392 * wsp: (#0x20 | #0x9 | #0xA)+
2395 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
2396 * "pipe A none" -> Stop CRC
2398 static int display_crc_ctl_tokenize(char *buf
, char *words
[], int max_words
)
2405 /* skip leading white space */
2406 buf
= skip_spaces(buf
);
2408 break; /* end of buffer */
2410 /* find end of word */
2411 for (end
= buf
; *end
&& !isspace(*end
); end
++)
2414 if (n_words
== max_words
) {
2415 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
2417 return -EINVAL
; /* ran out of words[] before bytes */
2422 words
[n_words
++] = buf
;
2429 enum intel_pipe_crc_object
{
2430 PIPE_CRC_OBJECT_PIPE
,
2433 static const char * const pipe_crc_objects
[] = {
2438 display_crc_ctl_parse_object(const char *buf
, enum intel_pipe_crc_object
*o
)
2442 for (i
= 0; i
< ARRAY_SIZE(pipe_crc_objects
); i
++)
2443 if (!strcmp(buf
, pipe_crc_objects
[i
])) {
2451 static int display_crc_ctl_parse_pipe(const char *buf
, enum pipe
*pipe
)
2453 const char name
= buf
[0];
2455 if (name
< 'A' || name
>= pipe_name(I915_MAX_PIPES
))
2464 display_crc_ctl_parse_source(const char *buf
, enum intel_pipe_crc_source
*s
)
2468 for (i
= 0; i
< ARRAY_SIZE(pipe_crc_sources
); i
++)
2469 if (!strcmp(buf
, pipe_crc_sources
[i
])) {
2477 static int display_crc_ctl_parse(struct drm_device
*dev
, char *buf
, size_t len
)
2481 char *words
[N_WORDS
];
2483 enum intel_pipe_crc_object object
;
2484 enum intel_pipe_crc_source source
;
2486 n_words
= display_crc_ctl_tokenize(buf
, words
, N_WORDS
);
2487 if (n_words
!= N_WORDS
) {
2488 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
2493 if (display_crc_ctl_parse_object(words
[0], &object
) < 0) {
2494 DRM_DEBUG_DRIVER("unknown object %s\n", words
[0]);
2498 if (display_crc_ctl_parse_pipe(words
[1], &pipe
) < 0) {
2499 DRM_DEBUG_DRIVER("unknown pipe %s\n", words
[1]);
2503 if (display_crc_ctl_parse_source(words
[2], &source
) < 0) {
2504 DRM_DEBUG_DRIVER("unknown source %s\n", words
[2]);
2508 return pipe_crc_set_source(dev
, pipe
, source
);
2511 static ssize_t
display_crc_ctl_write(struct file
*file
, const char __user
*ubuf
,
2512 size_t len
, loff_t
*offp
)
2514 struct seq_file
*m
= file
->private_data
;
2515 struct drm_device
*dev
= m
->private;
2522 if (len
> PAGE_SIZE
- 1) {
2523 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
2528 tmpbuf
= kmalloc(len
+ 1, GFP_KERNEL
);
2532 if (copy_from_user(tmpbuf
, ubuf
, len
)) {
2538 ret
= display_crc_ctl_parse(dev
, tmpbuf
, len
);
2549 static const struct file_operations i915_display_crc_ctl_fops
= {
2550 .owner
= THIS_MODULE
,
2551 .open
= display_crc_ctl_open
,
2553 .llseek
= seq_lseek
,
2554 .release
= single_release
,
2555 .write
= display_crc_ctl_write
2559 i915_wedged_get(void *data
, u64
*val
)
2561 struct drm_device
*dev
= data
;
2562 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2564 *val
= atomic_read(&dev_priv
->gpu_error
.reset_counter
);
2570 i915_wedged_set(void *data
, u64 val
)
2572 struct drm_device
*dev
= data
;
2574 DRM_INFO("Manually setting wedged to %llu\n", val
);
2575 i915_handle_error(dev
, val
);
2580 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
2581 i915_wedged_get
, i915_wedged_set
,
2585 i915_ring_stop_get(void *data
, u64
*val
)
2587 struct drm_device
*dev
= data
;
2588 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2590 *val
= dev_priv
->gpu_error
.stop_rings
;
2596 i915_ring_stop_set(void *data
, u64 val
)
2598 struct drm_device
*dev
= data
;
2599 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2602 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val
);
2604 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2608 dev_priv
->gpu_error
.stop_rings
= val
;
2609 mutex_unlock(&dev
->struct_mutex
);
2614 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops
,
2615 i915_ring_stop_get
, i915_ring_stop_set
,
2619 i915_ring_missed_irq_get(void *data
, u64
*val
)
2621 struct drm_device
*dev
= data
;
2622 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2624 *val
= dev_priv
->gpu_error
.missed_irq_rings
;
2629 i915_ring_missed_irq_set(void *data
, u64 val
)
2631 struct drm_device
*dev
= data
;
2632 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2635 /* Lock against concurrent debugfs callers */
2636 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2639 dev_priv
->gpu_error
.missed_irq_rings
= val
;
2640 mutex_unlock(&dev
->struct_mutex
);
2645 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops
,
2646 i915_ring_missed_irq_get
, i915_ring_missed_irq_set
,
2650 i915_ring_test_irq_get(void *data
, u64
*val
)
2652 struct drm_device
*dev
= data
;
2653 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2655 *val
= dev_priv
->gpu_error
.test_irq_rings
;
2661 i915_ring_test_irq_set(void *data
, u64 val
)
2663 struct drm_device
*dev
= data
;
2664 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2667 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val
);
2669 /* Lock against concurrent debugfs callers */
2670 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2674 dev_priv
->gpu_error
.test_irq_rings
= val
;
2675 mutex_unlock(&dev
->struct_mutex
);
2680 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops
,
2681 i915_ring_test_irq_get
, i915_ring_test_irq_set
,
2684 #define DROP_UNBOUND 0x1
2685 #define DROP_BOUND 0x2
2686 #define DROP_RETIRE 0x4
2687 #define DROP_ACTIVE 0x8
2688 #define DROP_ALL (DROP_UNBOUND | \
2693 i915_drop_caches_get(void *data
, u64
*val
)
2701 i915_drop_caches_set(void *data
, u64 val
)
2703 struct drm_device
*dev
= data
;
2704 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2705 struct drm_i915_gem_object
*obj
, *next
;
2706 struct i915_address_space
*vm
;
2707 struct i915_vma
*vma
, *x
;
2710 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val
);
2712 /* No need to check and wait for gpu resets, only libdrm auto-restarts
2713 * on ioctls on -EAGAIN. */
2714 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2718 if (val
& DROP_ACTIVE
) {
2719 ret
= i915_gpu_idle(dev
);
2724 if (val
& (DROP_RETIRE
| DROP_ACTIVE
))
2725 i915_gem_retire_requests(dev
);
2727 if (val
& DROP_BOUND
) {
2728 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
) {
2729 list_for_each_entry_safe(vma
, x
, &vm
->inactive_list
,
2731 if (vma
->obj
->pin_count
)
2734 ret
= i915_vma_unbind(vma
);
2741 if (val
& DROP_UNBOUND
) {
2742 list_for_each_entry_safe(obj
, next
, &dev_priv
->mm
.unbound_list
,
2744 if (obj
->pages_pin_count
== 0) {
2745 ret
= i915_gem_object_put_pages(obj
);
2752 mutex_unlock(&dev
->struct_mutex
);
2757 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
2758 i915_drop_caches_get
, i915_drop_caches_set
,
2762 i915_max_freq_get(void *data
, u64
*val
)
2764 struct drm_device
*dev
= data
;
2765 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2768 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2771 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
2773 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
2777 if (IS_VALLEYVIEW(dev
))
2778 *val
= vlv_gpu_freq(dev_priv
->mem_freq
,
2779 dev_priv
->rps
.max_delay
);
2781 *val
= dev_priv
->rps
.max_delay
* GT_FREQUENCY_MULTIPLIER
;
2782 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2788 i915_max_freq_set(void *data
, u64 val
)
2790 struct drm_device
*dev
= data
;
2791 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2794 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2797 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
2799 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val
);
2801 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
2806 * Turbo will still be enabled, but won't go above the set value.
2808 if (IS_VALLEYVIEW(dev
)) {
2809 val
= vlv_freq_opcode(dev_priv
->mem_freq
, val
);
2810 dev_priv
->rps
.max_delay
= val
;
2811 gen6_set_rps(dev
, val
);
2813 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
2814 dev_priv
->rps
.max_delay
= val
;
2815 gen6_set_rps(dev
, val
);
2818 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2823 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops
,
2824 i915_max_freq_get
, i915_max_freq_set
,
2828 i915_min_freq_get(void *data
, u64
*val
)
2830 struct drm_device
*dev
= data
;
2831 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2834 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2837 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
2839 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
2843 if (IS_VALLEYVIEW(dev
))
2844 *val
= vlv_gpu_freq(dev_priv
->mem_freq
,
2845 dev_priv
->rps
.min_delay
);
2847 *val
= dev_priv
->rps
.min_delay
* GT_FREQUENCY_MULTIPLIER
;
2848 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2854 i915_min_freq_set(void *data
, u64 val
)
2856 struct drm_device
*dev
= data
;
2857 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2860 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2863 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
2865 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val
);
2867 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
2872 * Turbo will still be enabled, but won't go below the set value.
2874 if (IS_VALLEYVIEW(dev
)) {
2875 val
= vlv_freq_opcode(dev_priv
->mem_freq
, val
);
2876 dev_priv
->rps
.min_delay
= val
;
2877 valleyview_set_rps(dev
, val
);
2879 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
2880 dev_priv
->rps
.min_delay
= val
;
2881 gen6_set_rps(dev
, val
);
2883 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2888 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops
,
2889 i915_min_freq_get
, i915_min_freq_set
,
2893 i915_cache_sharing_get(void *data
, u64
*val
)
2895 struct drm_device
*dev
= data
;
2896 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2900 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2903 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2907 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
2908 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
2910 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
2916 i915_cache_sharing_set(void *data
, u64 val
)
2918 struct drm_device
*dev
= data
;
2919 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2922 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2928 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
2930 /* Update the cache sharing policy here as well */
2931 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
2932 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
2933 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
2934 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
2939 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
2940 i915_cache_sharing_get
, i915_cache_sharing_set
,
2943 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
2945 struct drm_device
*dev
= inode
->i_private
;
2946 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2948 if (INTEL_INFO(dev
)->gen
< 6)
2951 gen6_gt_force_wake_get(dev_priv
);
2956 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
2958 struct drm_device
*dev
= inode
->i_private
;
2959 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2961 if (INTEL_INFO(dev
)->gen
< 6)
2964 gen6_gt_force_wake_put(dev_priv
);
2969 static const struct file_operations i915_forcewake_fops
= {
2970 .owner
= THIS_MODULE
,
2971 .open
= i915_forcewake_open
,
2972 .release
= i915_forcewake_release
,
2975 static int i915_forcewake_create(struct dentry
*root
, struct drm_minor
*minor
)
2977 struct drm_device
*dev
= minor
->dev
;
2980 ent
= debugfs_create_file("i915_forcewake_user",
2983 &i915_forcewake_fops
);
2985 return PTR_ERR(ent
);
2987 return drm_add_fake_info_node(minor
, ent
, &i915_forcewake_fops
);
2990 static int i915_debugfs_create(struct dentry
*root
,
2991 struct drm_minor
*minor
,
2993 const struct file_operations
*fops
)
2995 struct drm_device
*dev
= minor
->dev
;
2998 ent
= debugfs_create_file(name
,
3003 return PTR_ERR(ent
);
3005 return drm_add_fake_info_node(minor
, ent
, fops
);
3008 static struct drm_info_list i915_debugfs_list
[] = {
3009 {"i915_capabilities", i915_capabilities
, 0},
3010 {"i915_gem_objects", i915_gem_object_info
, 0},
3011 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
3012 {"i915_gem_pinned", i915_gem_gtt_info
, 0, (void *) PINNED_LIST
},
3013 {"i915_gem_active", i915_gem_object_list_info
, 0, (void *) ACTIVE_LIST
},
3014 {"i915_gem_inactive", i915_gem_object_list_info
, 0, (void *) INACTIVE_LIST
},
3015 {"i915_gem_stolen", i915_gem_stolen_list_info
},
3016 {"i915_gem_pageflip", i915_gem_pageflip_info
, 0},
3017 {"i915_gem_request", i915_gem_request_info
, 0},
3018 {"i915_gem_seqno", i915_gem_seqno_info
, 0},
3019 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
3020 {"i915_gem_interrupt", i915_interrupt_info
, 0},
3021 {"i915_gem_hws", i915_hws_info
, 0, (void *)RCS
},
3022 {"i915_gem_hws_blt", i915_hws_info
, 0, (void *)BCS
},
3023 {"i915_gem_hws_bsd", i915_hws_info
, 0, (void *)VCS
},
3024 {"i915_gem_hws_vebox", i915_hws_info
, 0, (void *)VECS
},
3025 {"i915_rstdby_delays", i915_rstdby_delays
, 0},
3026 {"i915_cur_delayinfo", i915_cur_delayinfo
, 0},
3027 {"i915_delayfreq_table", i915_delayfreq_table
, 0},
3028 {"i915_inttoext_table", i915_inttoext_table
, 0},
3029 {"i915_drpc_info", i915_drpc_info
, 0},
3030 {"i915_emon_status", i915_emon_status
, 0},
3031 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
3032 {"i915_gfxec", i915_gfxec
, 0},
3033 {"i915_fbc_status", i915_fbc_status
, 0},
3034 {"i915_ips_status", i915_ips_status
, 0},
3035 {"i915_sr_status", i915_sr_status
, 0},
3036 {"i915_opregion", i915_opregion
, 0},
3037 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
3038 {"i915_context_status", i915_context_status
, 0},
3039 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info
, 0},
3040 {"i915_swizzle_info", i915_swizzle_info
, 0},
3041 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
3042 {"i915_dpio", i915_dpio_info
, 0},
3043 {"i915_llc", i915_llc
, 0},
3044 {"i915_edp_psr_status", i915_edp_psr_status
, 0},
3045 {"i915_energy_uJ", i915_energy_uJ
, 0},
3046 {"i915_pc8_status", i915_pc8_status
, 0},
3048 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
3050 static struct i915_debugfs_files
{
3052 const struct file_operations
*fops
;
3053 } i915_debugfs_files
[] = {
3054 {"i915_wedged", &i915_wedged_fops
},
3055 {"i915_max_freq", &i915_max_freq_fops
},
3056 {"i915_min_freq", &i915_min_freq_fops
},
3057 {"i915_cache_sharing", &i915_cache_sharing_fops
},
3058 {"i915_ring_stop", &i915_ring_stop_fops
},
3059 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops
},
3060 {"i915_ring_test_irq", &i915_ring_test_irq_fops
},
3061 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
3062 {"i915_error_state", &i915_error_state_fops
},
3063 {"i915_next_seqno", &i915_next_seqno_fops
},
3064 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops
},
3067 void intel_display_crc_init(struct drm_device
*dev
)
3069 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3072 for (i
= 0; i
< INTEL_INFO(dev
)->num_pipes
; i
++) {
3073 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[i
];
3075 pipe_crc
->opened
= false;
3076 spin_lock_init(&pipe_crc
->lock
);
3077 init_waitqueue_head(&pipe_crc
->wq
);
3081 int i915_debugfs_init(struct drm_minor
*minor
)
3085 ret
= i915_forcewake_create(minor
->debugfs_root
, minor
);
3089 for (i
= 0; i
< ARRAY_SIZE(i915_pipe_crc_data
); i
++) {
3090 ret
= i915_pipe_crc_create(minor
->debugfs_root
, minor
, i
);
3095 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
3096 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
3097 i915_debugfs_files
[i
].name
,
3098 i915_debugfs_files
[i
].fops
);
3103 return drm_debugfs_create_files(i915_debugfs_list
,
3104 I915_DEBUGFS_ENTRIES
,
3105 minor
->debugfs_root
, minor
);
3108 void i915_debugfs_cleanup(struct drm_minor
*minor
)
3112 drm_debugfs_remove_files(i915_debugfs_list
,
3113 I915_DEBUGFS_ENTRIES
, minor
);
3115 drm_debugfs_remove_files((struct drm_info_list
*) &i915_forcewake_fops
,
3118 for (i
= 0; i
< ARRAY_SIZE(i915_pipe_crc_data
); i
++) {
3119 struct drm_info_list
*info_list
=
3120 (struct drm_info_list
*)&i915_pipe_crc_data
[i
];
3122 drm_debugfs_remove_files(info_list
, 1, minor
);
3125 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
3126 struct drm_info_list
*info_list
=
3127 (struct drm_info_list
*) i915_debugfs_files
[i
].fops
;
3129 drm_debugfs_remove_files(info_list
, 1, minor
);
3133 #endif /* CONFIG_DEBUG_FS */