2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/list_sort.h>
35 #include "intel_drv.h"
36 #include "intel_ringbuffer.h"
37 #include <drm/i915_drm.h>
40 #define DRM_I915_RING_DEBUG 1
43 #if defined(CONFIG_DEBUG_FS)
51 static const char *yesno(int v
)
53 return v
? "yes" : "no";
56 static int i915_capabilities(struct seq_file
*m
, void *data
)
58 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
59 struct drm_device
*dev
= node
->minor
->dev
;
60 const struct intel_device_info
*info
= INTEL_INFO(dev
);
62 seq_printf(m
, "gen: %d\n", info
->gen
);
63 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev
));
64 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65 #define SEP_SEMICOLON ;
66 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_SEMICOLON
);
73 static const char *get_pin_flag(struct drm_i915_gem_object
*obj
)
75 if (obj
->user_pin_count
> 0)
77 else if (obj
->pin_count
> 0)
83 static const char *get_tiling_flag(struct drm_i915_gem_object
*obj
)
85 switch (obj
->tiling_mode
) {
87 case I915_TILING_NONE
: return " ";
88 case I915_TILING_X
: return "X";
89 case I915_TILING_Y
: return "Y";
93 static inline const char *get_global_flag(struct drm_i915_gem_object
*obj
)
95 return obj
->has_global_gtt_mapping
? "g" : " ";
99 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
101 struct i915_vma
*vma
;
102 seq_printf(m
, "%pK: %s%s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
105 get_tiling_flag(obj
),
106 get_global_flag(obj
),
107 obj
->base
.size
/ 1024,
108 obj
->base
.read_domains
,
109 obj
->base
.write_domain
,
110 obj
->last_read_seqno
,
111 obj
->last_write_seqno
,
112 obj
->last_fenced_seqno
,
113 i915_cache_level_str(obj
->cache_level
),
114 obj
->dirty
? " dirty" : "",
115 obj
->madv
== I915_MADV_DONTNEED
? " purgeable" : "");
117 seq_printf(m
, " (name: %d)", obj
->base
.name
);
119 seq_printf(m
, " (pinned x %d)", obj
->pin_count
);
120 if (obj
->pin_display
)
121 seq_printf(m
, " (display)");
122 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
)
123 seq_printf(m
, " (fence: %d)", obj
->fence_reg
);
124 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
125 if (!i915_is_ggtt(vma
->vm
))
129 seq_printf(m
, "gtt offset: %08lx, size: %08lx)",
130 vma
->node
.start
, vma
->node
.size
);
133 seq_printf(m
, " (stolen: %08lx)", obj
->stolen
->start
);
134 if (obj
->pin_mappable
|| obj
->fault_mappable
) {
136 if (obj
->pin_mappable
)
138 if (obj
->fault_mappable
)
141 seq_printf(m
, " (%s mappable)", s
);
143 if (obj
->ring
!= NULL
)
144 seq_printf(m
, " (%s)", obj
->ring
->name
);
147 static int i915_gem_object_list_info(struct seq_file
*m
, void *data
)
149 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
150 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
151 struct list_head
*head
;
152 struct drm_device
*dev
= node
->minor
->dev
;
153 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
154 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
155 struct i915_vma
*vma
;
156 size_t total_obj_size
, total_gtt_size
;
159 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
163 /* FIXME: the user of this interface might want more than just GGTT */
166 seq_puts(m
, "Active:\n");
167 head
= &vm
->active_list
;
170 seq_puts(m
, "Inactive:\n");
171 head
= &vm
->inactive_list
;
174 mutex_unlock(&dev
->struct_mutex
);
178 total_obj_size
= total_gtt_size
= count
= 0;
179 list_for_each_entry(vma
, head
, mm_list
) {
181 describe_obj(m
, vma
->obj
);
183 total_obj_size
+= vma
->obj
->base
.size
;
184 total_gtt_size
+= vma
->node
.size
;
187 mutex_unlock(&dev
->struct_mutex
);
189 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
190 count
, total_obj_size
, total_gtt_size
);
194 static int obj_rank_by_stolen(void *priv
,
195 struct list_head
*A
, struct list_head
*B
)
197 struct drm_i915_gem_object
*a
=
198 container_of(A
, struct drm_i915_gem_object
, obj_exec_link
);
199 struct drm_i915_gem_object
*b
=
200 container_of(B
, struct drm_i915_gem_object
, obj_exec_link
);
202 return a
->stolen
->start
- b
->stolen
->start
;
205 static int i915_gem_stolen_list_info(struct seq_file
*m
, void *data
)
207 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
208 struct drm_device
*dev
= node
->minor
->dev
;
209 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
210 struct drm_i915_gem_object
*obj
;
211 size_t total_obj_size
, total_gtt_size
;
215 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
219 total_obj_size
= total_gtt_size
= count
= 0;
220 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
221 if (obj
->stolen
== NULL
)
224 list_add(&obj
->obj_exec_link
, &stolen
);
226 total_obj_size
+= obj
->base
.size
;
227 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
230 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
231 if (obj
->stolen
== NULL
)
234 list_add(&obj
->obj_exec_link
, &stolen
);
236 total_obj_size
+= obj
->base
.size
;
239 list_sort(NULL
, &stolen
, obj_rank_by_stolen
);
240 seq_puts(m
, "Stolen:\n");
241 while (!list_empty(&stolen
)) {
242 obj
= list_first_entry(&stolen
, typeof(*obj
), obj_exec_link
);
244 describe_obj(m
, obj
);
246 list_del_init(&obj
->obj_exec_link
);
248 mutex_unlock(&dev
->struct_mutex
);
250 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
251 count
, total_obj_size
, total_gtt_size
);
255 #define count_objects(list, member) do { \
256 list_for_each_entry(obj, list, member) { \
257 size += i915_gem_obj_ggtt_size(obj); \
259 if (obj->map_and_fenceable) { \
260 mappable_size += i915_gem_obj_ggtt_size(obj); \
268 size_t total
, active
, inactive
, unbound
;
271 static int per_file_stats(int id
, void *ptr
, void *data
)
273 struct drm_i915_gem_object
*obj
= ptr
;
274 struct file_stats
*stats
= data
;
277 stats
->total
+= obj
->base
.size
;
279 if (i915_gem_obj_ggtt_bound(obj
)) {
280 if (!list_empty(&obj
->ring_list
))
281 stats
->active
+= obj
->base
.size
;
283 stats
->inactive
+= obj
->base
.size
;
285 if (!list_empty(&obj
->global_list
))
286 stats
->unbound
+= obj
->base
.size
;
292 #define count_vmas(list, member) do { \
293 list_for_each_entry(vma, list, member) { \
294 size += i915_gem_obj_ggtt_size(vma->obj); \
296 if (vma->obj->map_and_fenceable) { \
297 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
303 static int i915_gem_object_info(struct seq_file
*m
, void* data
)
305 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
306 struct drm_device
*dev
= node
->minor
->dev
;
307 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
308 u32 count
, mappable_count
, purgeable_count
;
309 size_t size
, mappable_size
, purgeable_size
;
310 struct drm_i915_gem_object
*obj
;
311 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
312 struct drm_file
*file
;
313 struct i915_vma
*vma
;
316 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
320 seq_printf(m
, "%u objects, %zu bytes\n",
321 dev_priv
->mm
.object_count
,
322 dev_priv
->mm
.object_memory
);
324 size
= count
= mappable_size
= mappable_count
= 0;
325 count_objects(&dev_priv
->mm
.bound_list
, global_list
);
326 seq_printf(m
, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
327 count
, mappable_count
, size
, mappable_size
);
329 size
= count
= mappable_size
= mappable_count
= 0;
330 count_vmas(&vm
->active_list
, mm_list
);
331 seq_printf(m
, " %u [%u] active objects, %zu [%zu] bytes\n",
332 count
, mappable_count
, size
, mappable_size
);
334 size
= count
= mappable_size
= mappable_count
= 0;
335 count_vmas(&vm
->inactive_list
, mm_list
);
336 seq_printf(m
, " %u [%u] inactive objects, %zu [%zu] bytes\n",
337 count
, mappable_count
, size
, mappable_size
);
339 size
= count
= purgeable_size
= purgeable_count
= 0;
340 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
341 size
+= obj
->base
.size
, ++count
;
342 if (obj
->madv
== I915_MADV_DONTNEED
)
343 purgeable_size
+= obj
->base
.size
, ++purgeable_count
;
345 seq_printf(m
, "%u unbound objects, %zu bytes\n", count
, size
);
347 size
= count
= mappable_size
= mappable_count
= 0;
348 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
349 if (obj
->fault_mappable
) {
350 size
+= i915_gem_obj_ggtt_size(obj
);
353 if (obj
->pin_mappable
) {
354 mappable_size
+= i915_gem_obj_ggtt_size(obj
);
357 if (obj
->madv
== I915_MADV_DONTNEED
) {
358 purgeable_size
+= obj
->base
.size
;
362 seq_printf(m
, "%u purgeable objects, %zu bytes\n",
363 purgeable_count
, purgeable_size
);
364 seq_printf(m
, "%u pinned mappable objects, %zu bytes\n",
365 mappable_count
, mappable_size
);
366 seq_printf(m
, "%u fault mappable objects, %zu bytes\n",
369 seq_printf(m
, "%zu [%lu] gtt total\n",
370 dev_priv
->gtt
.base
.total
,
371 dev_priv
->gtt
.mappable_end
- dev_priv
->gtt
.base
.start
);
374 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
375 struct file_stats stats
;
377 memset(&stats
, 0, sizeof(stats
));
378 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
379 seq_printf(m
, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
380 get_pid_task(file
->pid
, PIDTYPE_PID
)->comm
,
388 mutex_unlock(&dev
->struct_mutex
);
393 static int i915_gem_gtt_info(struct seq_file
*m
, void *data
)
395 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
396 struct drm_device
*dev
= node
->minor
->dev
;
397 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
398 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
399 struct drm_i915_gem_object
*obj
;
400 size_t total_obj_size
, total_gtt_size
;
403 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
407 total_obj_size
= total_gtt_size
= count
= 0;
408 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
409 if (list
== PINNED_LIST
&& obj
->pin_count
== 0)
413 describe_obj(m
, obj
);
415 total_obj_size
+= obj
->base
.size
;
416 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
420 mutex_unlock(&dev
->struct_mutex
);
422 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
423 count
, total_obj_size
, total_gtt_size
);
428 static int i915_gem_pageflip_info(struct seq_file
*m
, void *data
)
430 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
431 struct drm_device
*dev
= node
->minor
->dev
;
433 struct intel_crtc
*crtc
;
435 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, base
.head
) {
436 const char pipe
= pipe_name(crtc
->pipe
);
437 const char plane
= plane_name(crtc
->plane
);
438 struct intel_unpin_work
*work
;
440 spin_lock_irqsave(&dev
->event_lock
, flags
);
441 work
= crtc
->unpin_work
;
443 seq_printf(m
, "No flip due on pipe %c (plane %c)\n",
446 if (atomic_read(&work
->pending
) < INTEL_FLIP_COMPLETE
) {
447 seq_printf(m
, "Flip queued on pipe %c (plane %c)\n",
450 seq_printf(m
, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
453 if (work
->enable_stall_check
)
454 seq_puts(m
, "Stall check enabled, ");
456 seq_puts(m
, "Stall check waiting for page flip ioctl, ");
457 seq_printf(m
, "%d prepares\n", atomic_read(&work
->pending
));
459 if (work
->old_fb_obj
) {
460 struct drm_i915_gem_object
*obj
= work
->old_fb_obj
;
462 seq_printf(m
, "Old framebuffer gtt_offset 0x%08lx\n",
463 i915_gem_obj_ggtt_offset(obj
));
465 if (work
->pending_flip_obj
) {
466 struct drm_i915_gem_object
*obj
= work
->pending_flip_obj
;
468 seq_printf(m
, "New framebuffer gtt_offset 0x%08lx\n",
469 i915_gem_obj_ggtt_offset(obj
));
472 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
478 static int i915_gem_request_info(struct seq_file
*m
, void *data
)
480 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
481 struct drm_device
*dev
= node
->minor
->dev
;
482 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
483 struct intel_ring_buffer
*ring
;
484 struct drm_i915_gem_request
*gem_request
;
487 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
492 for_each_ring(ring
, dev_priv
, i
) {
493 if (list_empty(&ring
->request_list
))
496 seq_printf(m
, "%s requests:\n", ring
->name
);
497 list_for_each_entry(gem_request
,
500 seq_printf(m
, " %d @ %d\n",
502 (int) (jiffies
- gem_request
->emitted_jiffies
));
506 mutex_unlock(&dev
->struct_mutex
);
509 seq_puts(m
, "No requests\n");
514 static void i915_ring_seqno_info(struct seq_file
*m
,
515 struct intel_ring_buffer
*ring
)
517 if (ring
->get_seqno
) {
518 seq_printf(m
, "Current sequence (%s): %u\n",
519 ring
->name
, ring
->get_seqno(ring
, false));
523 static int i915_gem_seqno_info(struct seq_file
*m
, void *data
)
525 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
526 struct drm_device
*dev
= node
->minor
->dev
;
527 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
528 struct intel_ring_buffer
*ring
;
531 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
535 for_each_ring(ring
, dev_priv
, i
)
536 i915_ring_seqno_info(m
, ring
);
538 mutex_unlock(&dev
->struct_mutex
);
544 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
546 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
547 struct drm_device
*dev
= node
->minor
->dev
;
548 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
549 struct intel_ring_buffer
*ring
;
552 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
556 if (IS_VALLEYVIEW(dev
)) {
557 seq_printf(m
, "Display IER:\t%08x\n",
559 seq_printf(m
, "Display IIR:\t%08x\n",
561 seq_printf(m
, "Display IIR_RW:\t%08x\n",
562 I915_READ(VLV_IIR_RW
));
563 seq_printf(m
, "Display IMR:\t%08x\n",
566 seq_printf(m
, "Pipe %c stat:\t%08x\n",
568 I915_READ(PIPESTAT(pipe
)));
570 seq_printf(m
, "Master IER:\t%08x\n",
571 I915_READ(VLV_MASTER_IER
));
573 seq_printf(m
, "Render IER:\t%08x\n",
575 seq_printf(m
, "Render IIR:\t%08x\n",
577 seq_printf(m
, "Render IMR:\t%08x\n",
580 seq_printf(m
, "PM IER:\t\t%08x\n",
581 I915_READ(GEN6_PMIER
));
582 seq_printf(m
, "PM IIR:\t\t%08x\n",
583 I915_READ(GEN6_PMIIR
));
584 seq_printf(m
, "PM IMR:\t\t%08x\n",
585 I915_READ(GEN6_PMIMR
));
587 seq_printf(m
, "Port hotplug:\t%08x\n",
588 I915_READ(PORT_HOTPLUG_EN
));
589 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
590 I915_READ(VLV_DPFLIPSTAT
));
591 seq_printf(m
, "DPINVGTT:\t%08x\n",
592 I915_READ(DPINVGTT
));
594 } else if (!HAS_PCH_SPLIT(dev
)) {
595 seq_printf(m
, "Interrupt enable: %08x\n",
597 seq_printf(m
, "Interrupt identity: %08x\n",
599 seq_printf(m
, "Interrupt mask: %08x\n",
602 seq_printf(m
, "Pipe %c stat: %08x\n",
604 I915_READ(PIPESTAT(pipe
)));
606 seq_printf(m
, "North Display Interrupt enable: %08x\n",
608 seq_printf(m
, "North Display Interrupt identity: %08x\n",
610 seq_printf(m
, "North Display Interrupt mask: %08x\n",
612 seq_printf(m
, "South Display Interrupt enable: %08x\n",
614 seq_printf(m
, "South Display Interrupt identity: %08x\n",
616 seq_printf(m
, "South Display Interrupt mask: %08x\n",
618 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
620 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
622 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
625 seq_printf(m
, "Interrupts received: %d\n",
626 atomic_read(&dev_priv
->irq_received
));
627 for_each_ring(ring
, dev_priv
, i
) {
628 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
630 "Graphics Interrupt mask (%s): %08x\n",
631 ring
->name
, I915_READ_IMR(ring
));
633 i915_ring_seqno_info(m
, ring
);
635 mutex_unlock(&dev
->struct_mutex
);
640 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
642 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
643 struct drm_device
*dev
= node
->minor
->dev
;
644 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
647 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
651 seq_printf(m
, "Reserved fences = %d\n", dev_priv
->fence_reg_start
);
652 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
653 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
654 struct drm_i915_gem_object
*obj
= dev_priv
->fence_regs
[i
].obj
;
656 seq_printf(m
, "Fence %d, pin count = %d, object = ",
657 i
, dev_priv
->fence_regs
[i
].pin_count
);
659 seq_puts(m
, "unused");
661 describe_obj(m
, obj
);
665 mutex_unlock(&dev
->struct_mutex
);
669 static int i915_hws_info(struct seq_file
*m
, void *data
)
671 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
672 struct drm_device
*dev
= node
->minor
->dev
;
673 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
674 struct intel_ring_buffer
*ring
;
678 ring
= &dev_priv
->ring
[(uintptr_t)node
->info_ent
->data
];
679 hws
= ring
->status_page
.page_addr
;
683 for (i
= 0; i
< 4096 / sizeof(u32
) / 4; i
+= 4) {
684 seq_printf(m
, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
686 hws
[i
], hws
[i
+ 1], hws
[i
+ 2], hws
[i
+ 3]);
692 i915_error_state_write(struct file
*filp
,
693 const char __user
*ubuf
,
697 struct i915_error_state_file_priv
*error_priv
= filp
->private_data
;
698 struct drm_device
*dev
= error_priv
->dev
;
701 DRM_DEBUG_DRIVER("Resetting error state\n");
703 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
707 i915_destroy_error_state(dev
);
708 mutex_unlock(&dev
->struct_mutex
);
713 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
715 struct drm_device
*dev
= inode
->i_private
;
716 struct i915_error_state_file_priv
*error_priv
;
718 error_priv
= kzalloc(sizeof(*error_priv
), GFP_KERNEL
);
722 error_priv
->dev
= dev
;
724 i915_error_state_get(dev
, error_priv
);
726 file
->private_data
= error_priv
;
731 static int i915_error_state_release(struct inode
*inode
, struct file
*file
)
733 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
735 i915_error_state_put(error_priv
);
741 static ssize_t
i915_error_state_read(struct file
*file
, char __user
*userbuf
,
742 size_t count
, loff_t
*pos
)
744 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
745 struct drm_i915_error_state_buf error_str
;
747 ssize_t ret_count
= 0;
750 ret
= i915_error_state_buf_init(&error_str
, count
, *pos
);
754 ret
= i915_error_state_to_str(&error_str
, error_priv
);
758 ret_count
= simple_read_from_buffer(userbuf
, count
, &tmp_pos
,
765 *pos
= error_str
.start
+ ret_count
;
767 i915_error_state_buf_release(&error_str
);
768 return ret
?: ret_count
;
771 static const struct file_operations i915_error_state_fops
= {
772 .owner
= THIS_MODULE
,
773 .open
= i915_error_state_open
,
774 .read
= i915_error_state_read
,
775 .write
= i915_error_state_write
,
776 .llseek
= default_llseek
,
777 .release
= i915_error_state_release
,
781 i915_next_seqno_get(void *data
, u64
*val
)
783 struct drm_device
*dev
= data
;
784 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
787 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
791 *val
= dev_priv
->next_seqno
;
792 mutex_unlock(&dev
->struct_mutex
);
798 i915_next_seqno_set(void *data
, u64 val
)
800 struct drm_device
*dev
= data
;
803 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
807 ret
= i915_gem_set_seqno(dev
, val
);
808 mutex_unlock(&dev
->struct_mutex
);
813 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
814 i915_next_seqno_get
, i915_next_seqno_set
,
817 static int i915_rstdby_delays(struct seq_file
*m
, void *unused
)
819 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
820 struct drm_device
*dev
= node
->minor
->dev
;
821 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
825 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
829 crstanddelay
= I915_READ16(CRSTANDVID
);
831 mutex_unlock(&dev
->struct_mutex
);
833 seq_printf(m
, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay
>> 8) & 0x3f, (crstanddelay
& 0x3f));
838 static int i915_cur_delayinfo(struct seq_file
*m
, void *unused
)
840 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
841 struct drm_device
*dev
= node
->minor
->dev
;
842 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
846 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
847 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
849 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
850 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
851 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
853 seq_printf(m
, "Current P-state: %d\n",
854 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
855 } else if ((IS_GEN6(dev
) || IS_GEN7(dev
)) && !IS_VALLEYVIEW(dev
)) {
856 u32 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
857 u32 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
858 u32 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
860 u32 rpupei
, rpcurup
, rpprevup
;
861 u32 rpdownei
, rpcurdown
, rpprevdown
;
864 /* RPSTAT1 is in the GT power well */
865 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
869 gen6_gt_force_wake_get(dev_priv
);
871 rpstat
= I915_READ(GEN6_RPSTAT1
);
872 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
);
873 rpcurup
= I915_READ(GEN6_RP_CUR_UP
);
874 rpprevup
= I915_READ(GEN6_RP_PREV_UP
);
875 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
);
876 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
);
877 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
);
879 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
881 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
882 cagf
*= GT_FREQUENCY_MULTIPLIER
;
884 gen6_gt_force_wake_put(dev_priv
);
885 mutex_unlock(&dev
->struct_mutex
);
887 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
888 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
889 seq_printf(m
, "Render p-state ratio: %d\n",
890 (gt_perf_status
& 0xff00) >> 8);
891 seq_printf(m
, "Render p-state VID: %d\n",
892 gt_perf_status
& 0xff);
893 seq_printf(m
, "Render p-state limit: %d\n",
894 rp_state_limits
& 0xff);
895 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
896 seq_printf(m
, "RP CUR UP EI: %dus\n", rpupei
&
898 seq_printf(m
, "RP CUR UP: %dus\n", rpcurup
&
899 GEN6_CURBSYTAVG_MASK
);
900 seq_printf(m
, "RP PREV UP: %dus\n", rpprevup
&
901 GEN6_CURBSYTAVG_MASK
);
902 seq_printf(m
, "RP CUR DOWN EI: %dus\n", rpdownei
&
904 seq_printf(m
, "RP CUR DOWN: %dus\n", rpcurdown
&
905 GEN6_CURBSYTAVG_MASK
);
906 seq_printf(m
, "RP PREV DOWN: %dus\n", rpprevdown
&
907 GEN6_CURBSYTAVG_MASK
);
909 max_freq
= (rp_state_cap
& 0xff0000) >> 16;
910 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
911 max_freq
* GT_FREQUENCY_MULTIPLIER
);
913 max_freq
= (rp_state_cap
& 0xff00) >> 8;
914 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
915 max_freq
* GT_FREQUENCY_MULTIPLIER
);
917 max_freq
= rp_state_cap
& 0xff;
918 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
919 max_freq
* GT_FREQUENCY_MULTIPLIER
);
921 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
922 dev_priv
->rps
.hw_max
* GT_FREQUENCY_MULTIPLIER
);
923 } else if (IS_VALLEYVIEW(dev
)) {
926 mutex_lock(&dev_priv
->rps
.hw_lock
);
927 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
928 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
929 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
931 val
= vlv_punit_read(dev_priv
, PUNIT_FUSE_BUS1
);
932 seq_printf(m
, "max GPU freq: %d MHz\n",
933 vlv_gpu_freq(dev_priv
->mem_freq
, val
));
935 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
);
936 seq_printf(m
, "min GPU freq: %d MHz\n",
937 vlv_gpu_freq(dev_priv
->mem_freq
, val
));
939 seq_printf(m
, "current GPU freq: %d MHz\n",
940 vlv_gpu_freq(dev_priv
->mem_freq
,
941 (freq_sts
>> 8) & 0xff));
942 mutex_unlock(&dev_priv
->rps
.hw_lock
);
944 seq_puts(m
, "no P-state info available\n");
950 static int i915_delayfreq_table(struct seq_file
*m
, void *unused
)
952 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
953 struct drm_device
*dev
= node
->minor
->dev
;
954 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
958 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
962 for (i
= 0; i
< 16; i
++) {
963 delayfreq
= I915_READ(PXVFREQ_BASE
+ i
* 4);
964 seq_printf(m
, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i
, delayfreq
,
965 (delayfreq
& PXVFREQ_PX_MASK
) >> PXVFREQ_PX_SHIFT
);
968 mutex_unlock(&dev
->struct_mutex
);
973 static inline int MAP_TO_MV(int map
)
975 return 1250 - (map
* 25);
978 static int i915_inttoext_table(struct seq_file
*m
, void *unused
)
980 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
981 struct drm_device
*dev
= node
->minor
->dev
;
982 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
986 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
990 for (i
= 1; i
<= 32; i
++) {
991 inttoext
= I915_READ(INTTOEXT_BASE_ILK
+ i
* 4);
992 seq_printf(m
, "INTTOEXT%02d: 0x%08x\n", i
, inttoext
);
995 mutex_unlock(&dev
->struct_mutex
);
1000 static int ironlake_drpc_info(struct seq_file
*m
)
1002 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1003 struct drm_device
*dev
= node
->minor
->dev
;
1004 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1005 u32 rgvmodectl
, rstdbyctl
;
1009 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1013 rgvmodectl
= I915_READ(MEMMODECTL
);
1014 rstdbyctl
= I915_READ(RSTDBYCTL
);
1015 crstandvid
= I915_READ16(CRSTANDVID
);
1017 mutex_unlock(&dev
->struct_mutex
);
1019 seq_printf(m
, "HD boost: %s\n", (rgvmodectl
& MEMMODE_BOOST_EN
) ?
1021 seq_printf(m
, "Boost freq: %d\n",
1022 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
1023 MEMMODE_BOOST_FREQ_SHIFT
);
1024 seq_printf(m
, "HW control enabled: %s\n",
1025 rgvmodectl
& MEMMODE_HWIDLE_EN
? "yes" : "no");
1026 seq_printf(m
, "SW control enabled: %s\n",
1027 rgvmodectl
& MEMMODE_SWMODE_EN
? "yes" : "no");
1028 seq_printf(m
, "Gated voltage change: %s\n",
1029 rgvmodectl
& MEMMODE_RCLK_GATE
? "yes" : "no");
1030 seq_printf(m
, "Starting frequency: P%d\n",
1031 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
1032 seq_printf(m
, "Max P-state: P%d\n",
1033 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
1034 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
1035 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
1036 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
1037 seq_printf(m
, "Render standby enabled: %s\n",
1038 (rstdbyctl
& RCX_SW_EXIT
) ? "no" : "yes");
1039 seq_puts(m
, "Current RS state: ");
1040 switch (rstdbyctl
& RSX_STATUS_MASK
) {
1042 seq_puts(m
, "on\n");
1044 case RSX_STATUS_RC1
:
1045 seq_puts(m
, "RC1\n");
1047 case RSX_STATUS_RC1E
:
1048 seq_puts(m
, "RC1E\n");
1050 case RSX_STATUS_RS1
:
1051 seq_puts(m
, "RS1\n");
1053 case RSX_STATUS_RS2
:
1054 seq_puts(m
, "RS2 (RC6)\n");
1056 case RSX_STATUS_RS3
:
1057 seq_puts(m
, "RC3 (RC6+)\n");
1060 seq_puts(m
, "unknown\n");
1067 static int gen6_drpc_info(struct seq_file
*m
)
1070 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1071 struct drm_device
*dev
= node
->minor
->dev
;
1072 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1073 u32 rpmodectl1
, gt_core_status
, rcctl1
, rc6vids
= 0;
1074 unsigned forcewake_count
;
1077 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1081 spin_lock_irq(&dev_priv
->uncore
.lock
);
1082 forcewake_count
= dev_priv
->uncore
.forcewake_count
;
1083 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1085 if (forcewake_count
) {
1086 seq_puts(m
, "RC information inaccurate because somebody "
1087 "holds a forcewake reference \n");
1089 /* NB: we cannot use forcewake, else we read the wrong values */
1090 while (count
++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK
) & 1))
1092 seq_printf(m
, "RC information accurate: %s\n", yesno(count
< 51));
1095 gt_core_status
= readl(dev_priv
->regs
+ GEN6_GT_CORE_STATUS
);
1096 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4, true);
1098 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1099 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1100 mutex_unlock(&dev
->struct_mutex
);
1101 mutex_lock(&dev_priv
->rps
.hw_lock
);
1102 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
1103 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1105 seq_printf(m
, "Video Turbo Mode: %s\n",
1106 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1107 seq_printf(m
, "HW control enabled: %s\n",
1108 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1109 seq_printf(m
, "SW control enabled: %s\n",
1110 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1111 GEN6_RP_MEDIA_SW_MODE
));
1112 seq_printf(m
, "RC1e Enabled: %s\n",
1113 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1114 seq_printf(m
, "RC6 Enabled: %s\n",
1115 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1116 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1117 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1118 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1119 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1120 seq_puts(m
, "Current RC state: ");
1121 switch (gt_core_status
& GEN6_RCn_MASK
) {
1123 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1124 seq_puts(m
, "Core Power Down\n");
1126 seq_puts(m
, "on\n");
1129 seq_puts(m
, "RC3\n");
1132 seq_puts(m
, "RC6\n");
1135 seq_puts(m
, "RC7\n");
1138 seq_puts(m
, "Unknown\n");
1142 seq_printf(m
, "Core Power Down: %s\n",
1143 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1145 /* Not exactly sure what this is */
1146 seq_printf(m
, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1147 I915_READ(GEN6_GT_GFX_RC6_LOCKED
));
1148 seq_printf(m
, "RC6 residency since boot: %u\n",
1149 I915_READ(GEN6_GT_GFX_RC6
));
1150 seq_printf(m
, "RC6+ residency since boot: %u\n",
1151 I915_READ(GEN6_GT_GFX_RC6p
));
1152 seq_printf(m
, "RC6++ residency since boot: %u\n",
1153 I915_READ(GEN6_GT_GFX_RC6pp
));
1155 seq_printf(m
, "RC6 voltage: %dmV\n",
1156 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1157 seq_printf(m
, "RC6+ voltage: %dmV\n",
1158 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1159 seq_printf(m
, "RC6++ voltage: %dmV\n",
1160 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1164 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1166 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1167 struct drm_device
*dev
= node
->minor
->dev
;
1169 if (IS_GEN6(dev
) || IS_GEN7(dev
))
1170 return gen6_drpc_info(m
);
1172 return ironlake_drpc_info(m
);
1175 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1177 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1178 struct drm_device
*dev
= node
->minor
->dev
;
1179 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1181 if (!I915_HAS_FBC(dev
)) {
1182 seq_puts(m
, "FBC unsupported on this chipset\n");
1186 if (intel_fbc_enabled(dev
)) {
1187 seq_puts(m
, "FBC enabled\n");
1189 seq_puts(m
, "FBC disabled: ");
1190 switch (dev_priv
->fbc
.no_fbc_reason
) {
1192 seq_puts(m
, "FBC actived, but currently disabled in hardware");
1194 case FBC_UNSUPPORTED
:
1195 seq_puts(m
, "unsupported by this chipset");
1198 seq_puts(m
, "no outputs");
1200 case FBC_STOLEN_TOO_SMALL
:
1201 seq_puts(m
, "not enough stolen memory");
1203 case FBC_UNSUPPORTED_MODE
:
1204 seq_puts(m
, "mode not supported");
1206 case FBC_MODE_TOO_LARGE
:
1207 seq_puts(m
, "mode too large");
1210 seq_puts(m
, "FBC unsupported on plane");
1213 seq_puts(m
, "scanout buffer not tiled");
1215 case FBC_MULTIPLE_PIPES
:
1216 seq_puts(m
, "multiple pipes are enabled");
1218 case FBC_MODULE_PARAM
:
1219 seq_puts(m
, "disabled per module param (default off)");
1221 case FBC_CHIP_DEFAULT
:
1222 seq_puts(m
, "disabled per chip default");
1225 seq_puts(m
, "unknown reason");
1232 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1234 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1235 struct drm_device
*dev
= node
->minor
->dev
;
1236 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1238 if (!HAS_IPS(dev
)) {
1239 seq_puts(m
, "not supported\n");
1243 if (I915_READ(IPS_CTL
) & IPS_ENABLE
)
1244 seq_puts(m
, "enabled\n");
1246 seq_puts(m
, "disabled\n");
1251 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1253 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1254 struct drm_device
*dev
= node
->minor
->dev
;
1255 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1256 bool sr_enabled
= false;
1258 if (HAS_PCH_SPLIT(dev
))
1259 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1260 else if (IS_CRESTLINE(dev
) || IS_I945G(dev
) || IS_I945GM(dev
))
1261 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1262 else if (IS_I915GM(dev
))
1263 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1264 else if (IS_PINEVIEW(dev
))
1265 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1267 seq_printf(m
, "self-refresh: %s\n",
1268 sr_enabled
? "enabled" : "disabled");
1273 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1275 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1276 struct drm_device
*dev
= node
->minor
->dev
;
1277 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1278 unsigned long temp
, chipset
, gfx
;
1284 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1288 temp
= i915_mch_val(dev_priv
);
1289 chipset
= i915_chipset_val(dev_priv
);
1290 gfx
= i915_gfx_val(dev_priv
);
1291 mutex_unlock(&dev
->struct_mutex
);
1293 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1294 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1295 seq_printf(m
, "GFX power: %ld\n", gfx
);
1296 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1301 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1303 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1304 struct drm_device
*dev
= node
->minor
->dev
;
1305 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1307 int gpu_freq
, ia_freq
;
1309 if (!(IS_GEN6(dev
) || IS_GEN7(dev
))) {
1310 seq_puts(m
, "unsupported on this chipset\n");
1314 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1318 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1320 for (gpu_freq
= dev_priv
->rps
.min_delay
;
1321 gpu_freq
<= dev_priv
->rps
.max_delay
;
1324 sandybridge_pcode_read(dev_priv
,
1325 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1327 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1328 gpu_freq
* GT_FREQUENCY_MULTIPLIER
,
1329 ((ia_freq
>> 0) & 0xff) * 100,
1330 ((ia_freq
>> 8) & 0xff) * 100);
1333 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1338 static int i915_gfxec(struct seq_file
*m
, void *unused
)
1340 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1341 struct drm_device
*dev
= node
->minor
->dev
;
1342 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1345 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1349 seq_printf(m
, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1351 mutex_unlock(&dev
->struct_mutex
);
1356 static int i915_opregion(struct seq_file
*m
, void *unused
)
1358 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1359 struct drm_device
*dev
= node
->minor
->dev
;
1360 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1361 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1362 void *data
= kmalloc(OPREGION_SIZE
, GFP_KERNEL
);
1368 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1372 if (opregion
->header
) {
1373 memcpy_fromio(data
, opregion
->header
, OPREGION_SIZE
);
1374 seq_write(m
, data
, OPREGION_SIZE
);
1377 mutex_unlock(&dev
->struct_mutex
);
1384 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1386 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1387 struct drm_device
*dev
= node
->minor
->dev
;
1388 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1389 struct intel_fbdev
*ifbdev
;
1390 struct intel_framebuffer
*fb
;
1393 ret
= mutex_lock_interruptible(&dev
->mode_config
.mutex
);
1397 ifbdev
= dev_priv
->fbdev
;
1398 fb
= to_intel_framebuffer(ifbdev
->helper
.fb
);
1400 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1404 fb
->base
.bits_per_pixel
,
1405 atomic_read(&fb
->base
.refcount
.refcount
));
1406 describe_obj(m
, fb
->obj
);
1408 mutex_unlock(&dev
->mode_config
.mutex
);
1410 mutex_lock(&dev
->mode_config
.fb_lock
);
1411 list_for_each_entry(fb
, &dev
->mode_config
.fb_list
, base
.head
) {
1412 if (&fb
->base
== ifbdev
->helper
.fb
)
1415 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1419 fb
->base
.bits_per_pixel
,
1420 atomic_read(&fb
->base
.refcount
.refcount
));
1421 describe_obj(m
, fb
->obj
);
1424 mutex_unlock(&dev
->mode_config
.fb_lock
);
1429 static int i915_context_status(struct seq_file
*m
, void *unused
)
1431 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1432 struct drm_device
*dev
= node
->minor
->dev
;
1433 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1434 struct intel_ring_buffer
*ring
;
1437 ret
= mutex_lock_interruptible(&dev
->mode_config
.mutex
);
1441 if (dev_priv
->ips
.pwrctx
) {
1442 seq_puts(m
, "power context ");
1443 describe_obj(m
, dev_priv
->ips
.pwrctx
);
1447 if (dev_priv
->ips
.renderctx
) {
1448 seq_puts(m
, "render context ");
1449 describe_obj(m
, dev_priv
->ips
.renderctx
);
1453 for_each_ring(ring
, dev_priv
, i
) {
1454 if (ring
->default_context
) {
1455 seq_printf(m
, "HW default context %s ring ", ring
->name
);
1456 describe_obj(m
, ring
->default_context
->obj
);
1461 mutex_unlock(&dev
->mode_config
.mutex
);
1466 static int i915_gen6_forcewake_count_info(struct seq_file
*m
, void *data
)
1468 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1469 struct drm_device
*dev
= node
->minor
->dev
;
1470 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1471 unsigned forcewake_count
;
1473 spin_lock_irq(&dev_priv
->uncore
.lock
);
1474 forcewake_count
= dev_priv
->uncore
.forcewake_count
;
1475 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1477 seq_printf(m
, "forcewake count = %u\n", forcewake_count
);
1482 static const char *swizzle_string(unsigned swizzle
)
1485 case I915_BIT_6_SWIZZLE_NONE
:
1487 case I915_BIT_6_SWIZZLE_9
:
1489 case I915_BIT_6_SWIZZLE_9_10
:
1490 return "bit9/bit10";
1491 case I915_BIT_6_SWIZZLE_9_11
:
1492 return "bit9/bit11";
1493 case I915_BIT_6_SWIZZLE_9_10_11
:
1494 return "bit9/bit10/bit11";
1495 case I915_BIT_6_SWIZZLE_9_17
:
1496 return "bit9/bit17";
1497 case I915_BIT_6_SWIZZLE_9_10_17
:
1498 return "bit9/bit10/bit17";
1499 case I915_BIT_6_SWIZZLE_UNKNOWN
:
1506 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
1508 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1509 struct drm_device
*dev
= node
->minor
->dev
;
1510 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1513 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1517 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
1518 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
1519 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
1520 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
1522 if (IS_GEN3(dev
) || IS_GEN4(dev
)) {
1523 seq_printf(m
, "DDC = 0x%08x\n",
1525 seq_printf(m
, "C0DRB3 = 0x%04x\n",
1526 I915_READ16(C0DRB3
));
1527 seq_printf(m
, "C1DRB3 = 0x%04x\n",
1528 I915_READ16(C1DRB3
));
1529 } else if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1530 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
1531 I915_READ(MAD_DIMM_C0
));
1532 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
1533 I915_READ(MAD_DIMM_C1
));
1534 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
1535 I915_READ(MAD_DIMM_C2
));
1536 seq_printf(m
, "TILECTL = 0x%08x\n",
1537 I915_READ(TILECTL
));
1538 seq_printf(m
, "ARB_MODE = 0x%08x\n",
1539 I915_READ(ARB_MODE
));
1540 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
1541 I915_READ(DISP_ARB_CTL
));
1543 mutex_unlock(&dev
->struct_mutex
);
1548 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
1550 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1551 struct drm_device
*dev
= node
->minor
->dev
;
1552 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1553 struct intel_ring_buffer
*ring
;
1557 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1560 if (INTEL_INFO(dev
)->gen
== 6)
1561 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
1563 for_each_ring(ring
, dev_priv
, i
) {
1564 seq_printf(m
, "%s\n", ring
->name
);
1565 if (INTEL_INFO(dev
)->gen
== 7)
1566 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring
)));
1567 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring
)));
1568 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring
)));
1569 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring
)));
1571 if (dev_priv
->mm
.aliasing_ppgtt
) {
1572 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
1574 seq_puts(m
, "aliasing PPGTT:\n");
1575 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd_offset
);
1577 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
1578 mutex_unlock(&dev
->struct_mutex
);
1583 static int i915_dpio_info(struct seq_file
*m
, void *data
)
1585 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1586 struct drm_device
*dev
= node
->minor
->dev
;
1587 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1591 if (!IS_VALLEYVIEW(dev
)) {
1592 seq_puts(m
, "unsupported\n");
1596 ret
= mutex_lock_interruptible(&dev_priv
->dpio_lock
);
1600 seq_printf(m
, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL
));
1602 seq_printf(m
, "DPIO_DIV_A: 0x%08x\n",
1603 vlv_dpio_read(dev_priv
, _DPIO_DIV_A
));
1604 seq_printf(m
, "DPIO_DIV_B: 0x%08x\n",
1605 vlv_dpio_read(dev_priv
, _DPIO_DIV_B
));
1607 seq_printf(m
, "DPIO_REFSFR_A: 0x%08x\n",
1608 vlv_dpio_read(dev_priv
, _DPIO_REFSFR_A
));
1609 seq_printf(m
, "DPIO_REFSFR_B: 0x%08x\n",
1610 vlv_dpio_read(dev_priv
, _DPIO_REFSFR_B
));
1612 seq_printf(m
, "DPIO_CORE_CLK_A: 0x%08x\n",
1613 vlv_dpio_read(dev_priv
, _DPIO_CORE_CLK_A
));
1614 seq_printf(m
, "DPIO_CORE_CLK_B: 0x%08x\n",
1615 vlv_dpio_read(dev_priv
, _DPIO_CORE_CLK_B
));
1617 seq_printf(m
, "DPIO_LPF_COEFF_A: 0x%08x\n",
1618 vlv_dpio_read(dev_priv
, _DPIO_LPF_COEFF_A
));
1619 seq_printf(m
, "DPIO_LPF_COEFF_B: 0x%08x\n",
1620 vlv_dpio_read(dev_priv
, _DPIO_LPF_COEFF_B
));
1622 seq_printf(m
, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1623 vlv_dpio_read(dev_priv
, DPIO_FASTCLK_DISABLE
));
1625 mutex_unlock(&dev_priv
->dpio_lock
);
1630 static int i915_llc(struct seq_file
*m
, void *data
)
1632 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1633 struct drm_device
*dev
= node
->minor
->dev
;
1634 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1636 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1637 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev
)));
1638 seq_printf(m
, "eLLC: %zuMB\n", dev_priv
->ellc_size
);
1643 static int i915_edp_psr_status(struct seq_file
*m
, void *data
)
1645 struct drm_info_node
*node
= m
->private;
1646 struct drm_device
*dev
= node
->minor
->dev
;
1647 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1648 u32 psrstat
, psrperf
;
1650 if (!IS_HASWELL(dev
)) {
1651 seq_puts(m
, "PSR not supported on this platform\n");
1652 } else if (IS_HASWELL(dev
) && I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
) {
1653 seq_puts(m
, "PSR enabled\n");
1655 seq_puts(m
, "PSR disabled: ");
1656 switch (dev_priv
->no_psr_reason
) {
1658 seq_puts(m
, "not supported on this platform");
1661 seq_puts(m
, "not supported by panel");
1663 case PSR_MODULE_PARAM
:
1664 seq_puts(m
, "disabled by flag");
1666 case PSR_CRTC_NOT_ACTIVE
:
1667 seq_puts(m
, "crtc not active");
1669 case PSR_PWR_WELL_ENABLED
:
1670 seq_puts(m
, "power well enabled");
1673 seq_puts(m
, "not tiled");
1675 case PSR_SPRITE_ENABLED
:
1676 seq_puts(m
, "sprite enabled");
1678 case PSR_S3D_ENABLED
:
1679 seq_puts(m
, "stereo 3d enabled");
1681 case PSR_INTERLACED_ENABLED
:
1682 seq_puts(m
, "interlaced enabled");
1684 case PSR_HSW_NOT_DDIA
:
1685 seq_puts(m
, "HSW ties PSR to DDI A (eDP)");
1688 seq_puts(m
, "unknown reason");
1694 psrstat
= I915_READ(EDP_PSR_STATUS_CTL
);
1696 seq_puts(m
, "PSR Current State: ");
1697 switch (psrstat
& EDP_PSR_STATUS_STATE_MASK
) {
1698 case EDP_PSR_STATUS_STATE_IDLE
:
1699 seq_puts(m
, "Reset state\n");
1701 case EDP_PSR_STATUS_STATE_SRDONACK
:
1702 seq_puts(m
, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
1704 case EDP_PSR_STATUS_STATE_SRDENT
:
1705 seq_puts(m
, "SRD entry\n");
1707 case EDP_PSR_STATUS_STATE_BUFOFF
:
1708 seq_puts(m
, "Wait for buffer turn off\n");
1710 case EDP_PSR_STATUS_STATE_BUFON
:
1711 seq_puts(m
, "Wait for buffer turn on\n");
1713 case EDP_PSR_STATUS_STATE_AUXACK
:
1714 seq_puts(m
, "Wait for AUX to acknowledge on SRD exit\n");
1716 case EDP_PSR_STATUS_STATE_SRDOFFACK
:
1717 seq_puts(m
, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
1720 seq_puts(m
, "Unknown\n");
1724 seq_puts(m
, "Link Status: ");
1725 switch (psrstat
& EDP_PSR_STATUS_LINK_MASK
) {
1726 case EDP_PSR_STATUS_LINK_FULL_OFF
:
1727 seq_puts(m
, "Link is fully off\n");
1729 case EDP_PSR_STATUS_LINK_FULL_ON
:
1730 seq_puts(m
, "Link is fully on\n");
1732 case EDP_PSR_STATUS_LINK_STANDBY
:
1733 seq_puts(m
, "Link is in standby\n");
1736 seq_puts(m
, "Unknown\n");
1740 seq_printf(m
, "PSR Entry Count: %u\n",
1741 psrstat
>> EDP_PSR_STATUS_COUNT_SHIFT
&
1742 EDP_PSR_STATUS_COUNT_MASK
);
1744 seq_printf(m
, "Max Sleep Timer Counter: %u\n",
1745 psrstat
>> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT
&
1746 EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK
);
1748 seq_printf(m
, "Had AUX error: %s\n",
1749 yesno(psrstat
& EDP_PSR_STATUS_AUX_ERROR
));
1751 seq_printf(m
, "Sending AUX: %s\n",
1752 yesno(psrstat
& EDP_PSR_STATUS_AUX_SENDING
));
1754 seq_printf(m
, "Sending Idle: %s\n",
1755 yesno(psrstat
& EDP_PSR_STATUS_SENDING_IDLE
));
1757 seq_printf(m
, "Sending TP2 TP3: %s\n",
1758 yesno(psrstat
& EDP_PSR_STATUS_SENDING_TP2_TP3
));
1760 seq_printf(m
, "Sending TP1: %s\n",
1761 yesno(psrstat
& EDP_PSR_STATUS_SENDING_TP1
));
1763 seq_printf(m
, "Idle Count: %u\n",
1764 psrstat
& EDP_PSR_STATUS_IDLE_MASK
);
1766 psrperf
= (I915_READ(EDP_PSR_PERF_CNT
)) & EDP_PSR_PERF_CNT_MASK
;
1767 seq_printf(m
, "Performance Counter: %u\n", psrperf
);
1773 i915_wedged_get(void *data
, u64
*val
)
1775 struct drm_device
*dev
= data
;
1776 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1778 *val
= atomic_read(&dev_priv
->gpu_error
.reset_counter
);
1784 i915_wedged_set(void *data
, u64 val
)
1786 struct drm_device
*dev
= data
;
1788 DRM_INFO("Manually setting wedged to %llu\n", val
);
1789 i915_handle_error(dev
, val
);
1794 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
1795 i915_wedged_get
, i915_wedged_set
,
1799 i915_ring_stop_get(void *data
, u64
*val
)
1801 struct drm_device
*dev
= data
;
1802 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1804 *val
= dev_priv
->gpu_error
.stop_rings
;
1810 i915_ring_stop_set(void *data
, u64 val
)
1812 struct drm_device
*dev
= data
;
1813 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1816 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val
);
1818 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1822 dev_priv
->gpu_error
.stop_rings
= val
;
1823 mutex_unlock(&dev
->struct_mutex
);
1828 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops
,
1829 i915_ring_stop_get
, i915_ring_stop_set
,
1832 #define DROP_UNBOUND 0x1
1833 #define DROP_BOUND 0x2
1834 #define DROP_RETIRE 0x4
1835 #define DROP_ACTIVE 0x8
1836 #define DROP_ALL (DROP_UNBOUND | \
1841 i915_drop_caches_get(void *data
, u64
*val
)
1849 i915_drop_caches_set(void *data
, u64 val
)
1851 struct drm_device
*dev
= data
;
1852 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1853 struct drm_i915_gem_object
*obj
, *next
;
1854 struct i915_address_space
*vm
;
1855 struct i915_vma
*vma
, *x
;
1858 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val
);
1860 /* No need to check and wait for gpu resets, only libdrm auto-restarts
1861 * on ioctls on -EAGAIN. */
1862 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1866 if (val
& DROP_ACTIVE
) {
1867 ret
= i915_gpu_idle(dev
);
1872 if (val
& (DROP_RETIRE
| DROP_ACTIVE
))
1873 i915_gem_retire_requests(dev
);
1875 if (val
& DROP_BOUND
) {
1876 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
) {
1877 list_for_each_entry_safe(vma
, x
, &vm
->inactive_list
,
1879 if (vma
->obj
->pin_count
)
1882 ret
= i915_vma_unbind(vma
);
1889 if (val
& DROP_UNBOUND
) {
1890 list_for_each_entry_safe(obj
, next
, &dev_priv
->mm
.unbound_list
,
1892 if (obj
->pages_pin_count
== 0) {
1893 ret
= i915_gem_object_put_pages(obj
);
1900 mutex_unlock(&dev
->struct_mutex
);
1905 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
1906 i915_drop_caches_get
, i915_drop_caches_set
,
1910 i915_max_freq_get(void *data
, u64
*val
)
1912 struct drm_device
*dev
= data
;
1913 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1916 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
1919 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1923 if (IS_VALLEYVIEW(dev
))
1924 *val
= vlv_gpu_freq(dev_priv
->mem_freq
,
1925 dev_priv
->rps
.max_delay
);
1927 *val
= dev_priv
->rps
.max_delay
* GT_FREQUENCY_MULTIPLIER
;
1928 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1934 i915_max_freq_set(void *data
, u64 val
)
1936 struct drm_device
*dev
= data
;
1937 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1940 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
1943 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val
);
1945 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1950 * Turbo will still be enabled, but won't go above the set value.
1952 if (IS_VALLEYVIEW(dev
)) {
1953 val
= vlv_freq_opcode(dev_priv
->mem_freq
, val
);
1954 dev_priv
->rps
.max_delay
= val
;
1955 gen6_set_rps(dev
, val
);
1957 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
1958 dev_priv
->rps
.max_delay
= val
;
1959 gen6_set_rps(dev
, val
);
1962 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1967 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops
,
1968 i915_max_freq_get
, i915_max_freq_set
,
1972 i915_min_freq_get(void *data
, u64
*val
)
1974 struct drm_device
*dev
= data
;
1975 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1978 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
1981 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1985 if (IS_VALLEYVIEW(dev
))
1986 *val
= vlv_gpu_freq(dev_priv
->mem_freq
,
1987 dev_priv
->rps
.min_delay
);
1989 *val
= dev_priv
->rps
.min_delay
* GT_FREQUENCY_MULTIPLIER
;
1990 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1996 i915_min_freq_set(void *data
, u64 val
)
1998 struct drm_device
*dev
= data
;
1999 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2002 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2005 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val
);
2007 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
2012 * Turbo will still be enabled, but won't go below the set value.
2014 if (IS_VALLEYVIEW(dev
)) {
2015 val
= vlv_freq_opcode(dev_priv
->mem_freq
, val
);
2016 dev_priv
->rps
.min_delay
= val
;
2017 valleyview_set_rps(dev
, val
);
2019 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
2020 dev_priv
->rps
.min_delay
= val
;
2021 gen6_set_rps(dev
, val
);
2023 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2028 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops
,
2029 i915_min_freq_get
, i915_min_freq_set
,
2033 i915_cache_sharing_get(void *data
, u64
*val
)
2035 struct drm_device
*dev
= data
;
2036 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2040 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2043 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2047 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
2048 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
2050 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
2056 i915_cache_sharing_set(void *data
, u64 val
)
2058 struct drm_device
*dev
= data
;
2059 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2062 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2068 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
2070 /* Update the cache sharing policy here as well */
2071 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
2072 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
2073 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
2074 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
2079 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
2080 i915_cache_sharing_get
, i915_cache_sharing_set
,
2083 /* As the drm_debugfs_init() routines are called before dev->dev_private is
2084 * allocated we need to hook into the minor for release. */
2086 drm_add_fake_info_node(struct drm_minor
*minor
,
2090 struct drm_info_node
*node
;
2092 node
= kmalloc(sizeof(struct drm_info_node
), GFP_KERNEL
);
2094 debugfs_remove(ent
);
2098 node
->minor
= minor
;
2100 node
->info_ent
= (void *) key
;
2102 mutex_lock(&minor
->debugfs_lock
);
2103 list_add(&node
->list
, &minor
->debugfs_list
);
2104 mutex_unlock(&minor
->debugfs_lock
);
2109 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
2111 struct drm_device
*dev
= inode
->i_private
;
2112 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2114 if (INTEL_INFO(dev
)->gen
< 6)
2117 gen6_gt_force_wake_get(dev_priv
);
2122 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
2124 struct drm_device
*dev
= inode
->i_private
;
2125 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2127 if (INTEL_INFO(dev
)->gen
< 6)
2130 gen6_gt_force_wake_put(dev_priv
);
2135 static const struct file_operations i915_forcewake_fops
= {
2136 .owner
= THIS_MODULE
,
2137 .open
= i915_forcewake_open
,
2138 .release
= i915_forcewake_release
,
2141 static int i915_forcewake_create(struct dentry
*root
, struct drm_minor
*minor
)
2143 struct drm_device
*dev
= minor
->dev
;
2146 ent
= debugfs_create_file("i915_forcewake_user",
2149 &i915_forcewake_fops
);
2151 return PTR_ERR(ent
);
2153 return drm_add_fake_info_node(minor
, ent
, &i915_forcewake_fops
);
2156 static int i915_debugfs_create(struct dentry
*root
,
2157 struct drm_minor
*minor
,
2159 const struct file_operations
*fops
)
2161 struct drm_device
*dev
= minor
->dev
;
2164 ent
= debugfs_create_file(name
,
2169 return PTR_ERR(ent
);
2171 return drm_add_fake_info_node(minor
, ent
, fops
);
2174 static struct drm_info_list i915_debugfs_list
[] = {
2175 {"i915_capabilities", i915_capabilities
, 0},
2176 {"i915_gem_objects", i915_gem_object_info
, 0},
2177 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
2178 {"i915_gem_pinned", i915_gem_gtt_info
, 0, (void *) PINNED_LIST
},
2179 {"i915_gem_active", i915_gem_object_list_info
, 0, (void *) ACTIVE_LIST
},
2180 {"i915_gem_inactive", i915_gem_object_list_info
, 0, (void *) INACTIVE_LIST
},
2181 {"i915_gem_stolen", i915_gem_stolen_list_info
},
2182 {"i915_gem_pageflip", i915_gem_pageflip_info
, 0},
2183 {"i915_gem_request", i915_gem_request_info
, 0},
2184 {"i915_gem_seqno", i915_gem_seqno_info
, 0},
2185 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
2186 {"i915_gem_interrupt", i915_interrupt_info
, 0},
2187 {"i915_gem_hws", i915_hws_info
, 0, (void *)RCS
},
2188 {"i915_gem_hws_blt", i915_hws_info
, 0, (void *)BCS
},
2189 {"i915_gem_hws_bsd", i915_hws_info
, 0, (void *)VCS
},
2190 {"i915_gem_hws_vebox", i915_hws_info
, 0, (void *)VECS
},
2191 {"i915_rstdby_delays", i915_rstdby_delays
, 0},
2192 {"i915_cur_delayinfo", i915_cur_delayinfo
, 0},
2193 {"i915_delayfreq_table", i915_delayfreq_table
, 0},
2194 {"i915_inttoext_table", i915_inttoext_table
, 0},
2195 {"i915_drpc_info", i915_drpc_info
, 0},
2196 {"i915_emon_status", i915_emon_status
, 0},
2197 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
2198 {"i915_gfxec", i915_gfxec
, 0},
2199 {"i915_fbc_status", i915_fbc_status
, 0},
2200 {"i915_ips_status", i915_ips_status
, 0},
2201 {"i915_sr_status", i915_sr_status
, 0},
2202 {"i915_opregion", i915_opregion
, 0},
2203 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
2204 {"i915_context_status", i915_context_status
, 0},
2205 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info
, 0},
2206 {"i915_swizzle_info", i915_swizzle_info
, 0},
2207 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
2208 {"i915_dpio", i915_dpio_info
, 0},
2209 {"i915_llc", i915_llc
, 0},
2210 {"i915_edp_psr_status", i915_edp_psr_status
, 0},
2212 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2214 static struct i915_debugfs_files
{
2216 const struct file_operations
*fops
;
2217 } i915_debugfs_files
[] = {
2218 {"i915_wedged", &i915_wedged_fops
},
2219 {"i915_max_freq", &i915_max_freq_fops
},
2220 {"i915_min_freq", &i915_min_freq_fops
},
2221 {"i915_cache_sharing", &i915_cache_sharing_fops
},
2222 {"i915_ring_stop", &i915_ring_stop_fops
},
2223 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
2224 {"i915_error_state", &i915_error_state_fops
},
2225 {"i915_next_seqno", &i915_next_seqno_fops
},
2228 int i915_debugfs_init(struct drm_minor
*minor
)
2232 ret
= i915_forcewake_create(minor
->debugfs_root
, minor
);
2236 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
2237 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
2238 i915_debugfs_files
[i
].name
,
2239 i915_debugfs_files
[i
].fops
);
2244 return drm_debugfs_create_files(i915_debugfs_list
,
2245 I915_DEBUGFS_ENTRIES
,
2246 minor
->debugfs_root
, minor
);
2249 void i915_debugfs_cleanup(struct drm_minor
*minor
)
2253 drm_debugfs_remove_files(i915_debugfs_list
,
2254 I915_DEBUGFS_ENTRIES
, minor
);
2255 drm_debugfs_remove_files((struct drm_info_list
*) &i915_forcewake_fops
,
2257 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
2258 struct drm_info_list
*info_list
=
2259 (struct drm_info_list
*) i915_debugfs_files
[i
].fops
;
2261 drm_debugfs_remove_files(info_list
, 1, minor
);
2265 #endif /* CONFIG_DEBUG_FS */