2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/list_sort.h>
34 #include <asm/msr-index.h>
36 #include "intel_drv.h"
37 #include "intel_ringbuffer.h"
38 #include <drm/i915_drm.h>
41 #define DRM_I915_RING_DEBUG 1
44 #if defined(CONFIG_DEBUG_FS)
52 static const char *yesno(int v
)
54 return v
? "yes" : "no";
57 static int i915_capabilities(struct seq_file
*m
, void *data
)
59 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
60 struct drm_device
*dev
= node
->minor
->dev
;
61 const struct intel_device_info
*info
= INTEL_INFO(dev
);
63 seq_printf(m
, "gen: %d\n", info
->gen
);
64 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev
));
65 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
66 #define SEP_SEMICOLON ;
67 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_SEMICOLON
);
74 static const char *get_pin_flag(struct drm_i915_gem_object
*obj
)
76 if (obj
->user_pin_count
> 0)
78 else if (obj
->pin_count
> 0)
84 static const char *get_tiling_flag(struct drm_i915_gem_object
*obj
)
86 switch (obj
->tiling_mode
) {
88 case I915_TILING_NONE
: return " ";
89 case I915_TILING_X
: return "X";
90 case I915_TILING_Y
: return "Y";
94 static inline const char *get_global_flag(struct drm_i915_gem_object
*obj
)
96 return obj
->has_global_gtt_mapping
? "g" : " ";
100 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
102 struct i915_vma
*vma
;
103 seq_printf(m
, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
106 get_tiling_flag(obj
),
107 get_global_flag(obj
),
108 obj
->base
.size
/ 1024,
109 obj
->base
.read_domains
,
110 obj
->base
.write_domain
,
111 obj
->last_read_seqno
,
112 obj
->last_write_seqno
,
113 obj
->last_fenced_seqno
,
114 i915_cache_level_str(obj
->cache_level
),
115 obj
->dirty
? " dirty" : "",
116 obj
->madv
== I915_MADV_DONTNEED
? " purgeable" : "");
118 seq_printf(m
, " (name: %d)", obj
->base
.name
);
120 seq_printf(m
, " (pinned x %d)", obj
->pin_count
);
121 if (obj
->pin_display
)
122 seq_printf(m
, " (display)");
123 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
)
124 seq_printf(m
, " (fence: %d)", obj
->fence_reg
);
125 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
126 if (!i915_is_ggtt(vma
->vm
))
130 seq_printf(m
, "gtt offset: %08lx, size: %08lx)",
131 vma
->node
.start
, vma
->node
.size
);
134 seq_printf(m
, " (stolen: %08lx)", obj
->stolen
->start
);
135 if (obj
->pin_mappable
|| obj
->fault_mappable
) {
137 if (obj
->pin_mappable
)
139 if (obj
->fault_mappable
)
142 seq_printf(m
, " (%s mappable)", s
);
144 if (obj
->ring
!= NULL
)
145 seq_printf(m
, " (%s)", obj
->ring
->name
);
148 static int i915_gem_object_list_info(struct seq_file
*m
, void *data
)
150 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
151 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
152 struct list_head
*head
;
153 struct drm_device
*dev
= node
->minor
->dev
;
154 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
155 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
156 struct i915_vma
*vma
;
157 size_t total_obj_size
, total_gtt_size
;
160 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
164 /* FIXME: the user of this interface might want more than just GGTT */
167 seq_puts(m
, "Active:\n");
168 head
= &vm
->active_list
;
171 seq_puts(m
, "Inactive:\n");
172 head
= &vm
->inactive_list
;
175 mutex_unlock(&dev
->struct_mutex
);
179 total_obj_size
= total_gtt_size
= count
= 0;
180 list_for_each_entry(vma
, head
, mm_list
) {
182 describe_obj(m
, vma
->obj
);
184 total_obj_size
+= vma
->obj
->base
.size
;
185 total_gtt_size
+= vma
->node
.size
;
188 mutex_unlock(&dev
->struct_mutex
);
190 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
191 count
, total_obj_size
, total_gtt_size
);
195 static int obj_rank_by_stolen(void *priv
,
196 struct list_head
*A
, struct list_head
*B
)
198 struct drm_i915_gem_object
*a
=
199 container_of(A
, struct drm_i915_gem_object
, obj_exec_link
);
200 struct drm_i915_gem_object
*b
=
201 container_of(B
, struct drm_i915_gem_object
, obj_exec_link
);
203 return a
->stolen
->start
- b
->stolen
->start
;
206 static int i915_gem_stolen_list_info(struct seq_file
*m
, void *data
)
208 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
209 struct drm_device
*dev
= node
->minor
->dev
;
210 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
211 struct drm_i915_gem_object
*obj
;
212 size_t total_obj_size
, total_gtt_size
;
216 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
220 total_obj_size
= total_gtt_size
= count
= 0;
221 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
222 if (obj
->stolen
== NULL
)
225 list_add(&obj
->obj_exec_link
, &stolen
);
227 total_obj_size
+= obj
->base
.size
;
228 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
231 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
232 if (obj
->stolen
== NULL
)
235 list_add(&obj
->obj_exec_link
, &stolen
);
237 total_obj_size
+= obj
->base
.size
;
240 list_sort(NULL
, &stolen
, obj_rank_by_stolen
);
241 seq_puts(m
, "Stolen:\n");
242 while (!list_empty(&stolen
)) {
243 obj
= list_first_entry(&stolen
, typeof(*obj
), obj_exec_link
);
245 describe_obj(m
, obj
);
247 list_del_init(&obj
->obj_exec_link
);
249 mutex_unlock(&dev
->struct_mutex
);
251 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
252 count
, total_obj_size
, total_gtt_size
);
256 #define count_objects(list, member) do { \
257 list_for_each_entry(obj, list, member) { \
258 size += i915_gem_obj_ggtt_size(obj); \
260 if (obj->map_and_fenceable) { \
261 mappable_size += i915_gem_obj_ggtt_size(obj); \
269 size_t total
, active
, inactive
, unbound
;
272 static int per_file_stats(int id
, void *ptr
, void *data
)
274 struct drm_i915_gem_object
*obj
= ptr
;
275 struct file_stats
*stats
= data
;
278 stats
->total
+= obj
->base
.size
;
280 if (i915_gem_obj_ggtt_bound(obj
)) {
281 if (!list_empty(&obj
->ring_list
))
282 stats
->active
+= obj
->base
.size
;
284 stats
->inactive
+= obj
->base
.size
;
286 if (!list_empty(&obj
->global_list
))
287 stats
->unbound
+= obj
->base
.size
;
293 #define count_vmas(list, member) do { \
294 list_for_each_entry(vma, list, member) { \
295 size += i915_gem_obj_ggtt_size(vma->obj); \
297 if (vma->obj->map_and_fenceable) { \
298 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
304 static int i915_gem_object_info(struct seq_file
*m
, void* data
)
306 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
307 struct drm_device
*dev
= node
->minor
->dev
;
308 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
309 u32 count
, mappable_count
, purgeable_count
;
310 size_t size
, mappable_size
, purgeable_size
;
311 struct drm_i915_gem_object
*obj
;
312 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
313 struct drm_file
*file
;
314 struct i915_vma
*vma
;
317 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
321 seq_printf(m
, "%u objects, %zu bytes\n",
322 dev_priv
->mm
.object_count
,
323 dev_priv
->mm
.object_memory
);
325 size
= count
= mappable_size
= mappable_count
= 0;
326 count_objects(&dev_priv
->mm
.bound_list
, global_list
);
327 seq_printf(m
, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
328 count
, mappable_count
, size
, mappable_size
);
330 size
= count
= mappable_size
= mappable_count
= 0;
331 count_vmas(&vm
->active_list
, mm_list
);
332 seq_printf(m
, " %u [%u] active objects, %zu [%zu] bytes\n",
333 count
, mappable_count
, size
, mappable_size
);
335 size
= count
= mappable_size
= mappable_count
= 0;
336 count_vmas(&vm
->inactive_list
, mm_list
);
337 seq_printf(m
, " %u [%u] inactive objects, %zu [%zu] bytes\n",
338 count
, mappable_count
, size
, mappable_size
);
340 size
= count
= purgeable_size
= purgeable_count
= 0;
341 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
342 size
+= obj
->base
.size
, ++count
;
343 if (obj
->madv
== I915_MADV_DONTNEED
)
344 purgeable_size
+= obj
->base
.size
, ++purgeable_count
;
346 seq_printf(m
, "%u unbound objects, %zu bytes\n", count
, size
);
348 size
= count
= mappable_size
= mappable_count
= 0;
349 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
350 if (obj
->fault_mappable
) {
351 size
+= i915_gem_obj_ggtt_size(obj
);
354 if (obj
->pin_mappable
) {
355 mappable_size
+= i915_gem_obj_ggtt_size(obj
);
358 if (obj
->madv
== I915_MADV_DONTNEED
) {
359 purgeable_size
+= obj
->base
.size
;
363 seq_printf(m
, "%u purgeable objects, %zu bytes\n",
364 purgeable_count
, purgeable_size
);
365 seq_printf(m
, "%u pinned mappable objects, %zu bytes\n",
366 mappable_count
, mappable_size
);
367 seq_printf(m
, "%u fault mappable objects, %zu bytes\n",
370 seq_printf(m
, "%zu [%lu] gtt total\n",
371 dev_priv
->gtt
.base
.total
,
372 dev_priv
->gtt
.mappable_end
- dev_priv
->gtt
.base
.start
);
375 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
376 struct file_stats stats
;
378 memset(&stats
, 0, sizeof(stats
));
379 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
380 seq_printf(m
, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
381 get_pid_task(file
->pid
, PIDTYPE_PID
)->comm
,
389 mutex_unlock(&dev
->struct_mutex
);
394 static int i915_gem_gtt_info(struct seq_file
*m
, void *data
)
396 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
397 struct drm_device
*dev
= node
->minor
->dev
;
398 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
399 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
400 struct drm_i915_gem_object
*obj
;
401 size_t total_obj_size
, total_gtt_size
;
404 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
408 total_obj_size
= total_gtt_size
= count
= 0;
409 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
410 if (list
== PINNED_LIST
&& obj
->pin_count
== 0)
414 describe_obj(m
, obj
);
416 total_obj_size
+= obj
->base
.size
;
417 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
421 mutex_unlock(&dev
->struct_mutex
);
423 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
424 count
, total_obj_size
, total_gtt_size
);
429 static int i915_gem_pageflip_info(struct seq_file
*m
, void *data
)
431 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
432 struct drm_device
*dev
= node
->minor
->dev
;
434 struct intel_crtc
*crtc
;
436 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, base
.head
) {
437 const char pipe
= pipe_name(crtc
->pipe
);
438 const char plane
= plane_name(crtc
->plane
);
439 struct intel_unpin_work
*work
;
441 spin_lock_irqsave(&dev
->event_lock
, flags
);
442 work
= crtc
->unpin_work
;
444 seq_printf(m
, "No flip due on pipe %c (plane %c)\n",
447 if (atomic_read(&work
->pending
) < INTEL_FLIP_COMPLETE
) {
448 seq_printf(m
, "Flip queued on pipe %c (plane %c)\n",
451 seq_printf(m
, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
454 if (work
->enable_stall_check
)
455 seq_puts(m
, "Stall check enabled, ");
457 seq_puts(m
, "Stall check waiting for page flip ioctl, ");
458 seq_printf(m
, "%d prepares\n", atomic_read(&work
->pending
));
460 if (work
->old_fb_obj
) {
461 struct drm_i915_gem_object
*obj
= work
->old_fb_obj
;
463 seq_printf(m
, "Old framebuffer gtt_offset 0x%08lx\n",
464 i915_gem_obj_ggtt_offset(obj
));
466 if (work
->pending_flip_obj
) {
467 struct drm_i915_gem_object
*obj
= work
->pending_flip_obj
;
469 seq_printf(m
, "New framebuffer gtt_offset 0x%08lx\n",
470 i915_gem_obj_ggtt_offset(obj
));
473 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
479 static int i915_gem_request_info(struct seq_file
*m
, void *data
)
481 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
482 struct drm_device
*dev
= node
->minor
->dev
;
483 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
484 struct intel_ring_buffer
*ring
;
485 struct drm_i915_gem_request
*gem_request
;
488 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
493 for_each_ring(ring
, dev_priv
, i
) {
494 if (list_empty(&ring
->request_list
))
497 seq_printf(m
, "%s requests:\n", ring
->name
);
498 list_for_each_entry(gem_request
,
501 seq_printf(m
, " %d @ %d\n",
503 (int) (jiffies
- gem_request
->emitted_jiffies
));
507 mutex_unlock(&dev
->struct_mutex
);
510 seq_puts(m
, "No requests\n");
515 static void i915_ring_seqno_info(struct seq_file
*m
,
516 struct intel_ring_buffer
*ring
)
518 if (ring
->get_seqno
) {
519 seq_printf(m
, "Current sequence (%s): %u\n",
520 ring
->name
, ring
->get_seqno(ring
, false));
524 static int i915_gem_seqno_info(struct seq_file
*m
, void *data
)
526 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
527 struct drm_device
*dev
= node
->minor
->dev
;
528 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
529 struct intel_ring_buffer
*ring
;
532 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
536 for_each_ring(ring
, dev_priv
, i
)
537 i915_ring_seqno_info(m
, ring
);
539 mutex_unlock(&dev
->struct_mutex
);
545 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
547 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
548 struct drm_device
*dev
= node
->minor
->dev
;
549 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
550 struct intel_ring_buffer
*ring
;
553 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
557 if (IS_VALLEYVIEW(dev
)) {
558 seq_printf(m
, "Display IER:\t%08x\n",
560 seq_printf(m
, "Display IIR:\t%08x\n",
562 seq_printf(m
, "Display IIR_RW:\t%08x\n",
563 I915_READ(VLV_IIR_RW
));
564 seq_printf(m
, "Display IMR:\t%08x\n",
567 seq_printf(m
, "Pipe %c stat:\t%08x\n",
569 I915_READ(PIPESTAT(pipe
)));
571 seq_printf(m
, "Master IER:\t%08x\n",
572 I915_READ(VLV_MASTER_IER
));
574 seq_printf(m
, "Render IER:\t%08x\n",
576 seq_printf(m
, "Render IIR:\t%08x\n",
578 seq_printf(m
, "Render IMR:\t%08x\n",
581 seq_printf(m
, "PM IER:\t\t%08x\n",
582 I915_READ(GEN6_PMIER
));
583 seq_printf(m
, "PM IIR:\t\t%08x\n",
584 I915_READ(GEN6_PMIIR
));
585 seq_printf(m
, "PM IMR:\t\t%08x\n",
586 I915_READ(GEN6_PMIMR
));
588 seq_printf(m
, "Port hotplug:\t%08x\n",
589 I915_READ(PORT_HOTPLUG_EN
));
590 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
591 I915_READ(VLV_DPFLIPSTAT
));
592 seq_printf(m
, "DPINVGTT:\t%08x\n",
593 I915_READ(DPINVGTT
));
595 } else if (!HAS_PCH_SPLIT(dev
)) {
596 seq_printf(m
, "Interrupt enable: %08x\n",
598 seq_printf(m
, "Interrupt identity: %08x\n",
600 seq_printf(m
, "Interrupt mask: %08x\n",
603 seq_printf(m
, "Pipe %c stat: %08x\n",
605 I915_READ(PIPESTAT(pipe
)));
607 seq_printf(m
, "North Display Interrupt enable: %08x\n",
609 seq_printf(m
, "North Display Interrupt identity: %08x\n",
611 seq_printf(m
, "North Display Interrupt mask: %08x\n",
613 seq_printf(m
, "South Display Interrupt enable: %08x\n",
615 seq_printf(m
, "South Display Interrupt identity: %08x\n",
617 seq_printf(m
, "South Display Interrupt mask: %08x\n",
619 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
621 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
623 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
626 seq_printf(m
, "Interrupts received: %d\n",
627 atomic_read(&dev_priv
->irq_received
));
628 for_each_ring(ring
, dev_priv
, i
) {
629 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
631 "Graphics Interrupt mask (%s): %08x\n",
632 ring
->name
, I915_READ_IMR(ring
));
634 i915_ring_seqno_info(m
, ring
);
636 mutex_unlock(&dev
->struct_mutex
);
641 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
643 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
644 struct drm_device
*dev
= node
->minor
->dev
;
645 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
648 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
652 seq_printf(m
, "Reserved fences = %d\n", dev_priv
->fence_reg_start
);
653 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
654 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
655 struct drm_i915_gem_object
*obj
= dev_priv
->fence_regs
[i
].obj
;
657 seq_printf(m
, "Fence %d, pin count = %d, object = ",
658 i
, dev_priv
->fence_regs
[i
].pin_count
);
660 seq_puts(m
, "unused");
662 describe_obj(m
, obj
);
666 mutex_unlock(&dev
->struct_mutex
);
670 static int i915_hws_info(struct seq_file
*m
, void *data
)
672 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
673 struct drm_device
*dev
= node
->minor
->dev
;
674 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
675 struct intel_ring_buffer
*ring
;
679 ring
= &dev_priv
->ring
[(uintptr_t)node
->info_ent
->data
];
680 hws
= ring
->status_page
.page_addr
;
684 for (i
= 0; i
< 4096 / sizeof(u32
) / 4; i
+= 4) {
685 seq_printf(m
, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
687 hws
[i
], hws
[i
+ 1], hws
[i
+ 2], hws
[i
+ 3]);
693 i915_error_state_write(struct file
*filp
,
694 const char __user
*ubuf
,
698 struct i915_error_state_file_priv
*error_priv
= filp
->private_data
;
699 struct drm_device
*dev
= error_priv
->dev
;
702 DRM_DEBUG_DRIVER("Resetting error state\n");
704 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
708 i915_destroy_error_state(dev
);
709 mutex_unlock(&dev
->struct_mutex
);
714 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
716 struct drm_device
*dev
= inode
->i_private
;
717 struct i915_error_state_file_priv
*error_priv
;
719 error_priv
= kzalloc(sizeof(*error_priv
), GFP_KERNEL
);
723 error_priv
->dev
= dev
;
725 i915_error_state_get(dev
, error_priv
);
727 file
->private_data
= error_priv
;
732 static int i915_error_state_release(struct inode
*inode
, struct file
*file
)
734 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
736 i915_error_state_put(error_priv
);
742 static ssize_t
i915_error_state_read(struct file
*file
, char __user
*userbuf
,
743 size_t count
, loff_t
*pos
)
745 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
746 struct drm_i915_error_state_buf error_str
;
748 ssize_t ret_count
= 0;
751 ret
= i915_error_state_buf_init(&error_str
, count
, *pos
);
755 ret
= i915_error_state_to_str(&error_str
, error_priv
);
759 ret_count
= simple_read_from_buffer(userbuf
, count
, &tmp_pos
,
766 *pos
= error_str
.start
+ ret_count
;
768 i915_error_state_buf_release(&error_str
);
769 return ret
?: ret_count
;
772 static const struct file_operations i915_error_state_fops
= {
773 .owner
= THIS_MODULE
,
774 .open
= i915_error_state_open
,
775 .read
= i915_error_state_read
,
776 .write
= i915_error_state_write
,
777 .llseek
= default_llseek
,
778 .release
= i915_error_state_release
,
782 i915_next_seqno_get(void *data
, u64
*val
)
784 struct drm_device
*dev
= data
;
785 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
788 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
792 *val
= dev_priv
->next_seqno
;
793 mutex_unlock(&dev
->struct_mutex
);
799 i915_next_seqno_set(void *data
, u64 val
)
801 struct drm_device
*dev
= data
;
804 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
808 ret
= i915_gem_set_seqno(dev
, val
);
809 mutex_unlock(&dev
->struct_mutex
);
814 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
815 i915_next_seqno_get
, i915_next_seqno_set
,
818 static int i915_rstdby_delays(struct seq_file
*m
, void *unused
)
820 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
821 struct drm_device
*dev
= node
->minor
->dev
;
822 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
826 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
830 crstanddelay
= I915_READ16(CRSTANDVID
);
832 mutex_unlock(&dev
->struct_mutex
);
834 seq_printf(m
, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay
>> 8) & 0x3f, (crstanddelay
& 0x3f));
839 static int i915_cur_delayinfo(struct seq_file
*m
, void *unused
)
841 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
842 struct drm_device
*dev
= node
->minor
->dev
;
843 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
847 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
848 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
850 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
851 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
852 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
854 seq_printf(m
, "Current P-state: %d\n",
855 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
856 } else if ((IS_GEN6(dev
) || IS_GEN7(dev
)) && !IS_VALLEYVIEW(dev
)) {
857 u32 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
858 u32 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
859 u32 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
861 u32 rpupei
, rpcurup
, rpprevup
;
862 u32 rpdownei
, rpcurdown
, rpprevdown
;
865 /* RPSTAT1 is in the GT power well */
866 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
870 gen6_gt_force_wake_get(dev_priv
);
872 rpstat
= I915_READ(GEN6_RPSTAT1
);
873 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
);
874 rpcurup
= I915_READ(GEN6_RP_CUR_UP
);
875 rpprevup
= I915_READ(GEN6_RP_PREV_UP
);
876 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
);
877 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
);
878 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
);
880 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
882 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
883 cagf
*= GT_FREQUENCY_MULTIPLIER
;
885 gen6_gt_force_wake_put(dev_priv
);
886 mutex_unlock(&dev
->struct_mutex
);
888 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
889 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
890 seq_printf(m
, "Render p-state ratio: %d\n",
891 (gt_perf_status
& 0xff00) >> 8);
892 seq_printf(m
, "Render p-state VID: %d\n",
893 gt_perf_status
& 0xff);
894 seq_printf(m
, "Render p-state limit: %d\n",
895 rp_state_limits
& 0xff);
896 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
897 seq_printf(m
, "RP CUR UP EI: %dus\n", rpupei
&
899 seq_printf(m
, "RP CUR UP: %dus\n", rpcurup
&
900 GEN6_CURBSYTAVG_MASK
);
901 seq_printf(m
, "RP PREV UP: %dus\n", rpprevup
&
902 GEN6_CURBSYTAVG_MASK
);
903 seq_printf(m
, "RP CUR DOWN EI: %dus\n", rpdownei
&
905 seq_printf(m
, "RP CUR DOWN: %dus\n", rpcurdown
&
906 GEN6_CURBSYTAVG_MASK
);
907 seq_printf(m
, "RP PREV DOWN: %dus\n", rpprevdown
&
908 GEN6_CURBSYTAVG_MASK
);
910 max_freq
= (rp_state_cap
& 0xff0000) >> 16;
911 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
912 max_freq
* GT_FREQUENCY_MULTIPLIER
);
914 max_freq
= (rp_state_cap
& 0xff00) >> 8;
915 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
916 max_freq
* GT_FREQUENCY_MULTIPLIER
);
918 max_freq
= rp_state_cap
& 0xff;
919 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
920 max_freq
* GT_FREQUENCY_MULTIPLIER
);
922 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
923 dev_priv
->rps
.hw_max
* GT_FREQUENCY_MULTIPLIER
);
924 } else if (IS_VALLEYVIEW(dev
)) {
927 mutex_lock(&dev_priv
->rps
.hw_lock
);
928 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
929 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
930 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
932 val
= vlv_punit_read(dev_priv
, PUNIT_FUSE_BUS1
);
933 seq_printf(m
, "max GPU freq: %d MHz\n",
934 vlv_gpu_freq(dev_priv
->mem_freq
, val
));
936 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
);
937 seq_printf(m
, "min GPU freq: %d MHz\n",
938 vlv_gpu_freq(dev_priv
->mem_freq
, val
));
940 seq_printf(m
, "current GPU freq: %d MHz\n",
941 vlv_gpu_freq(dev_priv
->mem_freq
,
942 (freq_sts
>> 8) & 0xff));
943 mutex_unlock(&dev_priv
->rps
.hw_lock
);
945 seq_puts(m
, "no P-state info available\n");
951 static int i915_delayfreq_table(struct seq_file
*m
, void *unused
)
953 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
954 struct drm_device
*dev
= node
->minor
->dev
;
955 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
959 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
963 for (i
= 0; i
< 16; i
++) {
964 delayfreq
= I915_READ(PXVFREQ_BASE
+ i
* 4);
965 seq_printf(m
, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i
, delayfreq
,
966 (delayfreq
& PXVFREQ_PX_MASK
) >> PXVFREQ_PX_SHIFT
);
969 mutex_unlock(&dev
->struct_mutex
);
974 static inline int MAP_TO_MV(int map
)
976 return 1250 - (map
* 25);
979 static int i915_inttoext_table(struct seq_file
*m
, void *unused
)
981 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
982 struct drm_device
*dev
= node
->minor
->dev
;
983 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
987 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
991 for (i
= 1; i
<= 32; i
++) {
992 inttoext
= I915_READ(INTTOEXT_BASE_ILK
+ i
* 4);
993 seq_printf(m
, "INTTOEXT%02d: 0x%08x\n", i
, inttoext
);
996 mutex_unlock(&dev
->struct_mutex
);
1001 static int ironlake_drpc_info(struct seq_file
*m
)
1003 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1004 struct drm_device
*dev
= node
->minor
->dev
;
1005 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1006 u32 rgvmodectl
, rstdbyctl
;
1010 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1014 rgvmodectl
= I915_READ(MEMMODECTL
);
1015 rstdbyctl
= I915_READ(RSTDBYCTL
);
1016 crstandvid
= I915_READ16(CRSTANDVID
);
1018 mutex_unlock(&dev
->struct_mutex
);
1020 seq_printf(m
, "HD boost: %s\n", (rgvmodectl
& MEMMODE_BOOST_EN
) ?
1022 seq_printf(m
, "Boost freq: %d\n",
1023 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
1024 MEMMODE_BOOST_FREQ_SHIFT
);
1025 seq_printf(m
, "HW control enabled: %s\n",
1026 rgvmodectl
& MEMMODE_HWIDLE_EN
? "yes" : "no");
1027 seq_printf(m
, "SW control enabled: %s\n",
1028 rgvmodectl
& MEMMODE_SWMODE_EN
? "yes" : "no");
1029 seq_printf(m
, "Gated voltage change: %s\n",
1030 rgvmodectl
& MEMMODE_RCLK_GATE
? "yes" : "no");
1031 seq_printf(m
, "Starting frequency: P%d\n",
1032 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
1033 seq_printf(m
, "Max P-state: P%d\n",
1034 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
1035 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
1036 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
1037 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
1038 seq_printf(m
, "Render standby enabled: %s\n",
1039 (rstdbyctl
& RCX_SW_EXIT
) ? "no" : "yes");
1040 seq_puts(m
, "Current RS state: ");
1041 switch (rstdbyctl
& RSX_STATUS_MASK
) {
1043 seq_puts(m
, "on\n");
1045 case RSX_STATUS_RC1
:
1046 seq_puts(m
, "RC1\n");
1048 case RSX_STATUS_RC1E
:
1049 seq_puts(m
, "RC1E\n");
1051 case RSX_STATUS_RS1
:
1052 seq_puts(m
, "RS1\n");
1054 case RSX_STATUS_RS2
:
1055 seq_puts(m
, "RS2 (RC6)\n");
1057 case RSX_STATUS_RS3
:
1058 seq_puts(m
, "RC3 (RC6+)\n");
1061 seq_puts(m
, "unknown\n");
1068 static int gen6_drpc_info(struct seq_file
*m
)
1071 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1072 struct drm_device
*dev
= node
->minor
->dev
;
1073 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1074 u32 rpmodectl1
, gt_core_status
, rcctl1
, rc6vids
= 0;
1075 unsigned forcewake_count
;
1078 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1082 spin_lock_irq(&dev_priv
->uncore
.lock
);
1083 forcewake_count
= dev_priv
->uncore
.forcewake_count
;
1084 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1086 if (forcewake_count
) {
1087 seq_puts(m
, "RC information inaccurate because somebody "
1088 "holds a forcewake reference \n");
1090 /* NB: we cannot use forcewake, else we read the wrong values */
1091 while (count
++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK
) & 1))
1093 seq_printf(m
, "RC information accurate: %s\n", yesno(count
< 51));
1096 gt_core_status
= readl(dev_priv
->regs
+ GEN6_GT_CORE_STATUS
);
1097 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4, true);
1099 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1100 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1101 mutex_unlock(&dev
->struct_mutex
);
1102 mutex_lock(&dev_priv
->rps
.hw_lock
);
1103 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
1104 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1106 seq_printf(m
, "Video Turbo Mode: %s\n",
1107 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1108 seq_printf(m
, "HW control enabled: %s\n",
1109 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1110 seq_printf(m
, "SW control enabled: %s\n",
1111 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1112 GEN6_RP_MEDIA_SW_MODE
));
1113 seq_printf(m
, "RC1e Enabled: %s\n",
1114 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1115 seq_printf(m
, "RC6 Enabled: %s\n",
1116 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1117 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1118 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1119 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1120 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1121 seq_puts(m
, "Current RC state: ");
1122 switch (gt_core_status
& GEN6_RCn_MASK
) {
1124 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1125 seq_puts(m
, "Core Power Down\n");
1127 seq_puts(m
, "on\n");
1130 seq_puts(m
, "RC3\n");
1133 seq_puts(m
, "RC6\n");
1136 seq_puts(m
, "RC7\n");
1139 seq_puts(m
, "Unknown\n");
1143 seq_printf(m
, "Core Power Down: %s\n",
1144 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1146 /* Not exactly sure what this is */
1147 seq_printf(m
, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1148 I915_READ(GEN6_GT_GFX_RC6_LOCKED
));
1149 seq_printf(m
, "RC6 residency since boot: %u\n",
1150 I915_READ(GEN6_GT_GFX_RC6
));
1151 seq_printf(m
, "RC6+ residency since boot: %u\n",
1152 I915_READ(GEN6_GT_GFX_RC6p
));
1153 seq_printf(m
, "RC6++ residency since boot: %u\n",
1154 I915_READ(GEN6_GT_GFX_RC6pp
));
1156 seq_printf(m
, "RC6 voltage: %dmV\n",
1157 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1158 seq_printf(m
, "RC6+ voltage: %dmV\n",
1159 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1160 seq_printf(m
, "RC6++ voltage: %dmV\n",
1161 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1165 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1167 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1168 struct drm_device
*dev
= node
->minor
->dev
;
1170 if (IS_GEN6(dev
) || IS_GEN7(dev
))
1171 return gen6_drpc_info(m
);
1173 return ironlake_drpc_info(m
);
1176 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1178 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1179 struct drm_device
*dev
= node
->minor
->dev
;
1180 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1182 if (!I915_HAS_FBC(dev
)) {
1183 seq_puts(m
, "FBC unsupported on this chipset\n");
1187 if (intel_fbc_enabled(dev
)) {
1188 seq_puts(m
, "FBC enabled\n");
1190 seq_puts(m
, "FBC disabled: ");
1191 switch (dev_priv
->fbc
.no_fbc_reason
) {
1193 seq_puts(m
, "FBC actived, but currently disabled in hardware");
1195 case FBC_UNSUPPORTED
:
1196 seq_puts(m
, "unsupported by this chipset");
1199 seq_puts(m
, "no outputs");
1201 case FBC_STOLEN_TOO_SMALL
:
1202 seq_puts(m
, "not enough stolen memory");
1204 case FBC_UNSUPPORTED_MODE
:
1205 seq_puts(m
, "mode not supported");
1207 case FBC_MODE_TOO_LARGE
:
1208 seq_puts(m
, "mode too large");
1211 seq_puts(m
, "FBC unsupported on plane");
1214 seq_puts(m
, "scanout buffer not tiled");
1216 case FBC_MULTIPLE_PIPES
:
1217 seq_puts(m
, "multiple pipes are enabled");
1219 case FBC_MODULE_PARAM
:
1220 seq_puts(m
, "disabled per module param (default off)");
1222 case FBC_CHIP_DEFAULT
:
1223 seq_puts(m
, "disabled per chip default");
1226 seq_puts(m
, "unknown reason");
1233 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1235 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1236 struct drm_device
*dev
= node
->minor
->dev
;
1237 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1239 if (!HAS_IPS(dev
)) {
1240 seq_puts(m
, "not supported\n");
1244 if (I915_READ(IPS_CTL
) & IPS_ENABLE
)
1245 seq_puts(m
, "enabled\n");
1247 seq_puts(m
, "disabled\n");
1252 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1254 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1255 struct drm_device
*dev
= node
->minor
->dev
;
1256 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1257 bool sr_enabled
= false;
1259 if (HAS_PCH_SPLIT(dev
))
1260 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1261 else if (IS_CRESTLINE(dev
) || IS_I945G(dev
) || IS_I945GM(dev
))
1262 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1263 else if (IS_I915GM(dev
))
1264 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1265 else if (IS_PINEVIEW(dev
))
1266 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1268 seq_printf(m
, "self-refresh: %s\n",
1269 sr_enabled
? "enabled" : "disabled");
1274 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1276 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1277 struct drm_device
*dev
= node
->minor
->dev
;
1278 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1279 unsigned long temp
, chipset
, gfx
;
1285 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1289 temp
= i915_mch_val(dev_priv
);
1290 chipset
= i915_chipset_val(dev_priv
);
1291 gfx
= i915_gfx_val(dev_priv
);
1292 mutex_unlock(&dev
->struct_mutex
);
1294 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1295 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1296 seq_printf(m
, "GFX power: %ld\n", gfx
);
1297 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1302 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1304 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1305 struct drm_device
*dev
= node
->minor
->dev
;
1306 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1308 int gpu_freq
, ia_freq
;
1310 if (!(IS_GEN6(dev
) || IS_GEN7(dev
))) {
1311 seq_puts(m
, "unsupported on this chipset\n");
1315 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1319 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1321 for (gpu_freq
= dev_priv
->rps
.min_delay
;
1322 gpu_freq
<= dev_priv
->rps
.max_delay
;
1325 sandybridge_pcode_read(dev_priv
,
1326 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1328 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1329 gpu_freq
* GT_FREQUENCY_MULTIPLIER
,
1330 ((ia_freq
>> 0) & 0xff) * 100,
1331 ((ia_freq
>> 8) & 0xff) * 100);
1334 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1339 static int i915_gfxec(struct seq_file
*m
, void *unused
)
1341 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1342 struct drm_device
*dev
= node
->minor
->dev
;
1343 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1346 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1350 seq_printf(m
, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1352 mutex_unlock(&dev
->struct_mutex
);
1357 static int i915_opregion(struct seq_file
*m
, void *unused
)
1359 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1360 struct drm_device
*dev
= node
->minor
->dev
;
1361 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1362 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1363 void *data
= kmalloc(OPREGION_SIZE
, GFP_KERNEL
);
1369 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1373 if (opregion
->header
) {
1374 memcpy_fromio(data
, opregion
->header
, OPREGION_SIZE
);
1375 seq_write(m
, data
, OPREGION_SIZE
);
1378 mutex_unlock(&dev
->struct_mutex
);
1385 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1387 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1388 struct drm_device
*dev
= node
->minor
->dev
;
1389 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1390 struct intel_fbdev
*ifbdev
;
1391 struct intel_framebuffer
*fb
;
1394 ret
= mutex_lock_interruptible(&dev
->mode_config
.mutex
);
1398 ifbdev
= dev_priv
->fbdev
;
1399 fb
= to_intel_framebuffer(ifbdev
->helper
.fb
);
1401 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1405 fb
->base
.bits_per_pixel
,
1406 atomic_read(&fb
->base
.refcount
.refcount
));
1407 describe_obj(m
, fb
->obj
);
1409 mutex_unlock(&dev
->mode_config
.mutex
);
1411 mutex_lock(&dev
->mode_config
.fb_lock
);
1412 list_for_each_entry(fb
, &dev
->mode_config
.fb_list
, base
.head
) {
1413 if (&fb
->base
== ifbdev
->helper
.fb
)
1416 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1420 fb
->base
.bits_per_pixel
,
1421 atomic_read(&fb
->base
.refcount
.refcount
));
1422 describe_obj(m
, fb
->obj
);
1425 mutex_unlock(&dev
->mode_config
.fb_lock
);
1430 static int i915_context_status(struct seq_file
*m
, void *unused
)
1432 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1433 struct drm_device
*dev
= node
->minor
->dev
;
1434 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1435 struct intel_ring_buffer
*ring
;
1438 ret
= mutex_lock_interruptible(&dev
->mode_config
.mutex
);
1442 if (dev_priv
->ips
.pwrctx
) {
1443 seq_puts(m
, "power context ");
1444 describe_obj(m
, dev_priv
->ips
.pwrctx
);
1448 if (dev_priv
->ips
.renderctx
) {
1449 seq_puts(m
, "render context ");
1450 describe_obj(m
, dev_priv
->ips
.renderctx
);
1454 for_each_ring(ring
, dev_priv
, i
) {
1455 if (ring
->default_context
) {
1456 seq_printf(m
, "HW default context %s ring ", ring
->name
);
1457 describe_obj(m
, ring
->default_context
->obj
);
1462 mutex_unlock(&dev
->mode_config
.mutex
);
1467 static int i915_gen6_forcewake_count_info(struct seq_file
*m
, void *data
)
1469 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1470 struct drm_device
*dev
= node
->minor
->dev
;
1471 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1472 unsigned forcewake_count
;
1474 spin_lock_irq(&dev_priv
->uncore
.lock
);
1475 forcewake_count
= dev_priv
->uncore
.forcewake_count
;
1476 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1478 seq_printf(m
, "forcewake count = %u\n", forcewake_count
);
1483 static const char *swizzle_string(unsigned swizzle
)
1486 case I915_BIT_6_SWIZZLE_NONE
:
1488 case I915_BIT_6_SWIZZLE_9
:
1490 case I915_BIT_6_SWIZZLE_9_10
:
1491 return "bit9/bit10";
1492 case I915_BIT_6_SWIZZLE_9_11
:
1493 return "bit9/bit11";
1494 case I915_BIT_6_SWIZZLE_9_10_11
:
1495 return "bit9/bit10/bit11";
1496 case I915_BIT_6_SWIZZLE_9_17
:
1497 return "bit9/bit17";
1498 case I915_BIT_6_SWIZZLE_9_10_17
:
1499 return "bit9/bit10/bit17";
1500 case I915_BIT_6_SWIZZLE_UNKNOWN
:
1507 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
1509 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1510 struct drm_device
*dev
= node
->minor
->dev
;
1511 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1514 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1518 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
1519 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
1520 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
1521 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
1523 if (IS_GEN3(dev
) || IS_GEN4(dev
)) {
1524 seq_printf(m
, "DDC = 0x%08x\n",
1526 seq_printf(m
, "C0DRB3 = 0x%04x\n",
1527 I915_READ16(C0DRB3
));
1528 seq_printf(m
, "C1DRB3 = 0x%04x\n",
1529 I915_READ16(C1DRB3
));
1530 } else if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1531 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
1532 I915_READ(MAD_DIMM_C0
));
1533 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
1534 I915_READ(MAD_DIMM_C1
));
1535 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
1536 I915_READ(MAD_DIMM_C2
));
1537 seq_printf(m
, "TILECTL = 0x%08x\n",
1538 I915_READ(TILECTL
));
1539 seq_printf(m
, "ARB_MODE = 0x%08x\n",
1540 I915_READ(ARB_MODE
));
1541 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
1542 I915_READ(DISP_ARB_CTL
));
1544 mutex_unlock(&dev
->struct_mutex
);
1549 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
1551 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1552 struct drm_device
*dev
= node
->minor
->dev
;
1553 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1554 struct intel_ring_buffer
*ring
;
1558 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1561 if (INTEL_INFO(dev
)->gen
== 6)
1562 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
1564 for_each_ring(ring
, dev_priv
, i
) {
1565 seq_printf(m
, "%s\n", ring
->name
);
1566 if (INTEL_INFO(dev
)->gen
== 7)
1567 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring
)));
1568 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring
)));
1569 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring
)));
1570 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring
)));
1572 if (dev_priv
->mm
.aliasing_ppgtt
) {
1573 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
1575 seq_puts(m
, "aliasing PPGTT:\n");
1576 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd_offset
);
1578 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
1579 mutex_unlock(&dev
->struct_mutex
);
1584 static int i915_dpio_info(struct seq_file
*m
, void *data
)
1586 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1587 struct drm_device
*dev
= node
->minor
->dev
;
1588 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1592 if (!IS_VALLEYVIEW(dev
)) {
1593 seq_puts(m
, "unsupported\n");
1597 ret
= mutex_lock_interruptible(&dev_priv
->dpio_lock
);
1601 seq_printf(m
, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL
));
1603 seq_printf(m
, "DPIO_DIV_A: 0x%08x\n",
1604 vlv_dpio_read(dev_priv
, _DPIO_DIV_A
));
1605 seq_printf(m
, "DPIO_DIV_B: 0x%08x\n",
1606 vlv_dpio_read(dev_priv
, _DPIO_DIV_B
));
1608 seq_printf(m
, "DPIO_REFSFR_A: 0x%08x\n",
1609 vlv_dpio_read(dev_priv
, _DPIO_REFSFR_A
));
1610 seq_printf(m
, "DPIO_REFSFR_B: 0x%08x\n",
1611 vlv_dpio_read(dev_priv
, _DPIO_REFSFR_B
));
1613 seq_printf(m
, "DPIO_CORE_CLK_A: 0x%08x\n",
1614 vlv_dpio_read(dev_priv
, _DPIO_CORE_CLK_A
));
1615 seq_printf(m
, "DPIO_CORE_CLK_B: 0x%08x\n",
1616 vlv_dpio_read(dev_priv
, _DPIO_CORE_CLK_B
));
1618 seq_printf(m
, "DPIO_LPF_COEFF_A: 0x%08x\n",
1619 vlv_dpio_read(dev_priv
, _DPIO_LPF_COEFF_A
));
1620 seq_printf(m
, "DPIO_LPF_COEFF_B: 0x%08x\n",
1621 vlv_dpio_read(dev_priv
, _DPIO_LPF_COEFF_B
));
1623 seq_printf(m
, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1624 vlv_dpio_read(dev_priv
, DPIO_FASTCLK_DISABLE
));
1626 mutex_unlock(&dev_priv
->dpio_lock
);
1631 static int i915_llc(struct seq_file
*m
, void *data
)
1633 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1634 struct drm_device
*dev
= node
->minor
->dev
;
1635 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1637 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1638 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev
)));
1639 seq_printf(m
, "eLLC: %zuMB\n", dev_priv
->ellc_size
);
1644 static int i915_edp_psr_status(struct seq_file
*m
, void *data
)
1646 struct drm_info_node
*node
= m
->private;
1647 struct drm_device
*dev
= node
->minor
->dev
;
1648 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1649 u32 psrstat
, psrperf
;
1651 if (!IS_HASWELL(dev
)) {
1652 seq_puts(m
, "PSR not supported on this platform\n");
1653 } else if (IS_HASWELL(dev
) && I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
) {
1654 seq_puts(m
, "PSR enabled\n");
1656 seq_puts(m
, "PSR disabled: ");
1657 switch (dev_priv
->no_psr_reason
) {
1659 seq_puts(m
, "not supported on this platform");
1662 seq_puts(m
, "not supported by panel");
1664 case PSR_MODULE_PARAM
:
1665 seq_puts(m
, "disabled by flag");
1667 case PSR_CRTC_NOT_ACTIVE
:
1668 seq_puts(m
, "crtc not active");
1670 case PSR_PWR_WELL_ENABLED
:
1671 seq_puts(m
, "power well enabled");
1674 seq_puts(m
, "not tiled");
1676 case PSR_SPRITE_ENABLED
:
1677 seq_puts(m
, "sprite enabled");
1679 case PSR_S3D_ENABLED
:
1680 seq_puts(m
, "stereo 3d enabled");
1682 case PSR_INTERLACED_ENABLED
:
1683 seq_puts(m
, "interlaced enabled");
1685 case PSR_HSW_NOT_DDIA
:
1686 seq_puts(m
, "HSW ties PSR to DDI A (eDP)");
1689 seq_puts(m
, "unknown reason");
1695 psrstat
= I915_READ(EDP_PSR_STATUS_CTL
);
1697 seq_puts(m
, "PSR Current State: ");
1698 switch (psrstat
& EDP_PSR_STATUS_STATE_MASK
) {
1699 case EDP_PSR_STATUS_STATE_IDLE
:
1700 seq_puts(m
, "Reset state\n");
1702 case EDP_PSR_STATUS_STATE_SRDONACK
:
1703 seq_puts(m
, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
1705 case EDP_PSR_STATUS_STATE_SRDENT
:
1706 seq_puts(m
, "SRD entry\n");
1708 case EDP_PSR_STATUS_STATE_BUFOFF
:
1709 seq_puts(m
, "Wait for buffer turn off\n");
1711 case EDP_PSR_STATUS_STATE_BUFON
:
1712 seq_puts(m
, "Wait for buffer turn on\n");
1714 case EDP_PSR_STATUS_STATE_AUXACK
:
1715 seq_puts(m
, "Wait for AUX to acknowledge on SRD exit\n");
1717 case EDP_PSR_STATUS_STATE_SRDOFFACK
:
1718 seq_puts(m
, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
1721 seq_puts(m
, "Unknown\n");
1725 seq_puts(m
, "Link Status: ");
1726 switch (psrstat
& EDP_PSR_STATUS_LINK_MASK
) {
1727 case EDP_PSR_STATUS_LINK_FULL_OFF
:
1728 seq_puts(m
, "Link is fully off\n");
1730 case EDP_PSR_STATUS_LINK_FULL_ON
:
1731 seq_puts(m
, "Link is fully on\n");
1733 case EDP_PSR_STATUS_LINK_STANDBY
:
1734 seq_puts(m
, "Link is in standby\n");
1737 seq_puts(m
, "Unknown\n");
1741 seq_printf(m
, "PSR Entry Count: %u\n",
1742 psrstat
>> EDP_PSR_STATUS_COUNT_SHIFT
&
1743 EDP_PSR_STATUS_COUNT_MASK
);
1745 seq_printf(m
, "Max Sleep Timer Counter: %u\n",
1746 psrstat
>> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT
&
1747 EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK
);
1749 seq_printf(m
, "Had AUX error: %s\n",
1750 yesno(psrstat
& EDP_PSR_STATUS_AUX_ERROR
));
1752 seq_printf(m
, "Sending AUX: %s\n",
1753 yesno(psrstat
& EDP_PSR_STATUS_AUX_SENDING
));
1755 seq_printf(m
, "Sending Idle: %s\n",
1756 yesno(psrstat
& EDP_PSR_STATUS_SENDING_IDLE
));
1758 seq_printf(m
, "Sending TP2 TP3: %s\n",
1759 yesno(psrstat
& EDP_PSR_STATUS_SENDING_TP2_TP3
));
1761 seq_printf(m
, "Sending TP1: %s\n",
1762 yesno(psrstat
& EDP_PSR_STATUS_SENDING_TP1
));
1764 seq_printf(m
, "Idle Count: %u\n",
1765 psrstat
& EDP_PSR_STATUS_IDLE_MASK
);
1767 psrperf
= (I915_READ(EDP_PSR_PERF_CNT
)) & EDP_PSR_PERF_CNT_MASK
;
1768 seq_printf(m
, "Performance Counter: %u\n", psrperf
);
1773 static int i915_energy_uJ(struct seq_file
*m
, void *data
)
1775 struct drm_info_node
*node
= m
->private;
1776 struct drm_device
*dev
= node
->minor
->dev
;
1777 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1781 if (INTEL_INFO(dev
)->gen
< 6)
1784 rdmsrl(MSR_RAPL_POWER_UNIT
, power
);
1785 power
= (power
& 0x1f00) >> 8;
1786 units
= 1000000 / (1 << power
); /* convert to uJ */
1787 power
= I915_READ(MCH_SECP_NRG_STTS
);
1790 seq_printf(m
, "%llu", (long long unsigned)power
);
1795 static int i915_pc8_status(struct seq_file
*m
, void *unused
)
1797 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1798 struct drm_device
*dev
= node
->minor
->dev
;
1799 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1801 if (!IS_HASWELL(dev
)) {
1802 seq_puts(m
, "not supported\n");
1806 mutex_lock(&dev_priv
->pc8
.lock
);
1807 seq_printf(m
, "Requirements met: %s\n",
1808 yesno(dev_priv
->pc8
.requirements_met
));
1809 seq_printf(m
, "GPU idle: %s\n", yesno(dev_priv
->pc8
.gpu_idle
));
1810 seq_printf(m
, "Disable count: %d\n", dev_priv
->pc8
.disable_count
);
1811 seq_printf(m
, "IRQs disabled: %s\n",
1812 yesno(dev_priv
->pc8
.irqs_disabled
));
1813 seq_printf(m
, "Enabled: %s\n", yesno(dev_priv
->pc8
.enabled
));
1814 mutex_unlock(&dev_priv
->pc8
.lock
);
1820 i915_wedged_get(void *data
, u64
*val
)
1822 struct drm_device
*dev
= data
;
1823 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1825 *val
= atomic_read(&dev_priv
->gpu_error
.reset_counter
);
1831 i915_wedged_set(void *data
, u64 val
)
1833 struct drm_device
*dev
= data
;
1835 DRM_INFO("Manually setting wedged to %llu\n", val
);
1836 i915_handle_error(dev
, val
);
1841 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
1842 i915_wedged_get
, i915_wedged_set
,
1846 i915_ring_stop_get(void *data
, u64
*val
)
1848 struct drm_device
*dev
= data
;
1849 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1851 *val
= dev_priv
->gpu_error
.stop_rings
;
1857 i915_ring_stop_set(void *data
, u64 val
)
1859 struct drm_device
*dev
= data
;
1860 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1863 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val
);
1865 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1869 dev_priv
->gpu_error
.stop_rings
= val
;
1870 mutex_unlock(&dev
->struct_mutex
);
1875 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops
,
1876 i915_ring_stop_get
, i915_ring_stop_set
,
1879 #define DROP_UNBOUND 0x1
1880 #define DROP_BOUND 0x2
1881 #define DROP_RETIRE 0x4
1882 #define DROP_ACTIVE 0x8
1883 #define DROP_ALL (DROP_UNBOUND | \
1888 i915_drop_caches_get(void *data
, u64
*val
)
1896 i915_drop_caches_set(void *data
, u64 val
)
1898 struct drm_device
*dev
= data
;
1899 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1900 struct drm_i915_gem_object
*obj
, *next
;
1901 struct i915_address_space
*vm
;
1902 struct i915_vma
*vma
, *x
;
1905 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val
);
1907 /* No need to check and wait for gpu resets, only libdrm auto-restarts
1908 * on ioctls on -EAGAIN. */
1909 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1913 if (val
& DROP_ACTIVE
) {
1914 ret
= i915_gpu_idle(dev
);
1919 if (val
& (DROP_RETIRE
| DROP_ACTIVE
))
1920 i915_gem_retire_requests(dev
);
1922 if (val
& DROP_BOUND
) {
1923 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
) {
1924 list_for_each_entry_safe(vma
, x
, &vm
->inactive_list
,
1926 if (vma
->obj
->pin_count
)
1929 ret
= i915_vma_unbind(vma
);
1936 if (val
& DROP_UNBOUND
) {
1937 list_for_each_entry_safe(obj
, next
, &dev_priv
->mm
.unbound_list
,
1939 if (obj
->pages_pin_count
== 0) {
1940 ret
= i915_gem_object_put_pages(obj
);
1947 mutex_unlock(&dev
->struct_mutex
);
1952 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
1953 i915_drop_caches_get
, i915_drop_caches_set
,
1957 i915_max_freq_get(void *data
, u64
*val
)
1959 struct drm_device
*dev
= data
;
1960 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1963 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
1966 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1970 if (IS_VALLEYVIEW(dev
))
1971 *val
= vlv_gpu_freq(dev_priv
->mem_freq
,
1972 dev_priv
->rps
.max_delay
);
1974 *val
= dev_priv
->rps
.max_delay
* GT_FREQUENCY_MULTIPLIER
;
1975 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1981 i915_max_freq_set(void *data
, u64 val
)
1983 struct drm_device
*dev
= data
;
1984 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1987 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
1990 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val
);
1992 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1997 * Turbo will still be enabled, but won't go above the set value.
1999 if (IS_VALLEYVIEW(dev
)) {
2000 val
= vlv_freq_opcode(dev_priv
->mem_freq
, val
);
2001 dev_priv
->rps
.max_delay
= val
;
2002 gen6_set_rps(dev
, val
);
2004 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
2005 dev_priv
->rps
.max_delay
= val
;
2006 gen6_set_rps(dev
, val
);
2009 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2014 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops
,
2015 i915_max_freq_get
, i915_max_freq_set
,
2019 i915_min_freq_get(void *data
, u64
*val
)
2021 struct drm_device
*dev
= data
;
2022 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2025 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2028 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
2032 if (IS_VALLEYVIEW(dev
))
2033 *val
= vlv_gpu_freq(dev_priv
->mem_freq
,
2034 dev_priv
->rps
.min_delay
);
2036 *val
= dev_priv
->rps
.min_delay
* GT_FREQUENCY_MULTIPLIER
;
2037 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2043 i915_min_freq_set(void *data
, u64 val
)
2045 struct drm_device
*dev
= data
;
2046 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2049 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2052 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val
);
2054 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
2059 * Turbo will still be enabled, but won't go below the set value.
2061 if (IS_VALLEYVIEW(dev
)) {
2062 val
= vlv_freq_opcode(dev_priv
->mem_freq
, val
);
2063 dev_priv
->rps
.min_delay
= val
;
2064 valleyview_set_rps(dev
, val
);
2066 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
2067 dev_priv
->rps
.min_delay
= val
;
2068 gen6_set_rps(dev
, val
);
2070 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2075 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops
,
2076 i915_min_freq_get
, i915_min_freq_set
,
2080 i915_cache_sharing_get(void *data
, u64
*val
)
2082 struct drm_device
*dev
= data
;
2083 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2087 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2090 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2094 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
2095 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
2097 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
2103 i915_cache_sharing_set(void *data
, u64 val
)
2105 struct drm_device
*dev
= data
;
2106 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2109 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2115 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
2117 /* Update the cache sharing policy here as well */
2118 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
2119 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
2120 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
2121 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
2126 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
2127 i915_cache_sharing_get
, i915_cache_sharing_set
,
2130 /* As the drm_debugfs_init() routines are called before dev->dev_private is
2131 * allocated we need to hook into the minor for release. */
2133 drm_add_fake_info_node(struct drm_minor
*minor
,
2137 struct drm_info_node
*node
;
2139 node
= kmalloc(sizeof(struct drm_info_node
), GFP_KERNEL
);
2141 debugfs_remove(ent
);
2145 node
->minor
= minor
;
2147 node
->info_ent
= (void *) key
;
2149 mutex_lock(&minor
->debugfs_lock
);
2150 list_add(&node
->list
, &minor
->debugfs_list
);
2151 mutex_unlock(&minor
->debugfs_lock
);
2156 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
2158 struct drm_device
*dev
= inode
->i_private
;
2159 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2161 if (INTEL_INFO(dev
)->gen
< 6)
2164 gen6_gt_force_wake_get(dev_priv
);
2169 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
2171 struct drm_device
*dev
= inode
->i_private
;
2172 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2174 if (INTEL_INFO(dev
)->gen
< 6)
2177 gen6_gt_force_wake_put(dev_priv
);
2182 static const struct file_operations i915_forcewake_fops
= {
2183 .owner
= THIS_MODULE
,
2184 .open
= i915_forcewake_open
,
2185 .release
= i915_forcewake_release
,
2188 static int i915_forcewake_create(struct dentry
*root
, struct drm_minor
*minor
)
2190 struct drm_device
*dev
= minor
->dev
;
2193 ent
= debugfs_create_file("i915_forcewake_user",
2196 &i915_forcewake_fops
);
2198 return PTR_ERR(ent
);
2200 return drm_add_fake_info_node(minor
, ent
, &i915_forcewake_fops
);
2203 static int i915_debugfs_create(struct dentry
*root
,
2204 struct drm_minor
*minor
,
2206 const struct file_operations
*fops
)
2208 struct drm_device
*dev
= minor
->dev
;
2211 ent
= debugfs_create_file(name
,
2216 return PTR_ERR(ent
);
2218 return drm_add_fake_info_node(minor
, ent
, fops
);
2221 static struct drm_info_list i915_debugfs_list
[] = {
2222 {"i915_capabilities", i915_capabilities
, 0},
2223 {"i915_gem_objects", i915_gem_object_info
, 0},
2224 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
2225 {"i915_gem_pinned", i915_gem_gtt_info
, 0, (void *) PINNED_LIST
},
2226 {"i915_gem_active", i915_gem_object_list_info
, 0, (void *) ACTIVE_LIST
},
2227 {"i915_gem_inactive", i915_gem_object_list_info
, 0, (void *) INACTIVE_LIST
},
2228 {"i915_gem_stolen", i915_gem_stolen_list_info
},
2229 {"i915_gem_pageflip", i915_gem_pageflip_info
, 0},
2230 {"i915_gem_request", i915_gem_request_info
, 0},
2231 {"i915_gem_seqno", i915_gem_seqno_info
, 0},
2232 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
2233 {"i915_gem_interrupt", i915_interrupt_info
, 0},
2234 {"i915_gem_hws", i915_hws_info
, 0, (void *)RCS
},
2235 {"i915_gem_hws_blt", i915_hws_info
, 0, (void *)BCS
},
2236 {"i915_gem_hws_bsd", i915_hws_info
, 0, (void *)VCS
},
2237 {"i915_gem_hws_vebox", i915_hws_info
, 0, (void *)VECS
},
2238 {"i915_rstdby_delays", i915_rstdby_delays
, 0},
2239 {"i915_cur_delayinfo", i915_cur_delayinfo
, 0},
2240 {"i915_delayfreq_table", i915_delayfreq_table
, 0},
2241 {"i915_inttoext_table", i915_inttoext_table
, 0},
2242 {"i915_drpc_info", i915_drpc_info
, 0},
2243 {"i915_emon_status", i915_emon_status
, 0},
2244 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
2245 {"i915_gfxec", i915_gfxec
, 0},
2246 {"i915_fbc_status", i915_fbc_status
, 0},
2247 {"i915_ips_status", i915_ips_status
, 0},
2248 {"i915_sr_status", i915_sr_status
, 0},
2249 {"i915_opregion", i915_opregion
, 0},
2250 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
2251 {"i915_context_status", i915_context_status
, 0},
2252 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info
, 0},
2253 {"i915_swizzle_info", i915_swizzle_info
, 0},
2254 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
2255 {"i915_dpio", i915_dpio_info
, 0},
2256 {"i915_llc", i915_llc
, 0},
2257 {"i915_edp_psr_status", i915_edp_psr_status
, 0},
2258 {"i915_energy_uJ", i915_energy_uJ
, 0},
2259 {"i915_pc8_status", i915_pc8_status
, 0},
2261 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2263 static struct i915_debugfs_files
{
2265 const struct file_operations
*fops
;
2266 } i915_debugfs_files
[] = {
2267 {"i915_wedged", &i915_wedged_fops
},
2268 {"i915_max_freq", &i915_max_freq_fops
},
2269 {"i915_min_freq", &i915_min_freq_fops
},
2270 {"i915_cache_sharing", &i915_cache_sharing_fops
},
2271 {"i915_ring_stop", &i915_ring_stop_fops
},
2272 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
2273 {"i915_error_state", &i915_error_state_fops
},
2274 {"i915_next_seqno", &i915_next_seqno_fops
},
2277 int i915_debugfs_init(struct drm_minor
*minor
)
2281 ret
= i915_forcewake_create(minor
->debugfs_root
, minor
);
2285 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
2286 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
2287 i915_debugfs_files
[i
].name
,
2288 i915_debugfs_files
[i
].fops
);
2293 return drm_debugfs_create_files(i915_debugfs_list
,
2294 I915_DEBUGFS_ENTRIES
,
2295 minor
->debugfs_root
, minor
);
2298 void i915_debugfs_cleanup(struct drm_minor
*minor
)
2302 drm_debugfs_remove_files(i915_debugfs_list
,
2303 I915_DEBUGFS_ENTRIES
, minor
);
2304 drm_debugfs_remove_files((struct drm_info_list
*) &i915_forcewake_fops
,
2306 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
2307 struct drm_info_list
*info_list
=
2308 (struct drm_info_list
*) i915_debugfs_files
[i
].fops
;
2310 drm_debugfs_remove_files(info_list
, 1, minor
);
2314 #endif /* CONFIG_DEBUG_FS */