2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/list_sort.h>
35 #include "intel_drv.h"
36 #include "intel_ringbuffer.h"
37 #include <drm/i915_drm.h>
40 #define DRM_I915_RING_DEBUG 1
43 #if defined(CONFIG_DEBUG_FS)
51 static const char *yesno(int v
)
53 return v
? "yes" : "no";
56 static int i915_capabilities(struct seq_file
*m
, void *data
)
58 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
59 struct drm_device
*dev
= node
->minor
->dev
;
60 const struct intel_device_info
*info
= INTEL_INFO(dev
);
62 seq_printf(m
, "gen: %d\n", info
->gen
);
63 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev
));
64 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65 #define SEP_SEMICOLON ;
66 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_SEMICOLON
);
73 static const char *get_pin_flag(struct drm_i915_gem_object
*obj
)
75 if (obj
->user_pin_count
> 0)
77 else if (obj
->pin_count
> 0)
83 static const char *get_tiling_flag(struct drm_i915_gem_object
*obj
)
85 switch (obj
->tiling_mode
) {
87 case I915_TILING_NONE
: return " ";
88 case I915_TILING_X
: return "X";
89 case I915_TILING_Y
: return "Y";
93 static inline const char *get_global_flag(struct drm_i915_gem_object
*obj
)
95 return obj
->has_global_gtt_mapping
? "g" : " ";
99 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
101 struct i915_vma
*vma
;
102 seq_printf(m
, "%pK: %s%s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
105 get_tiling_flag(obj
),
106 get_global_flag(obj
),
107 obj
->base
.size
/ 1024,
108 obj
->base
.read_domains
,
109 obj
->base
.write_domain
,
110 obj
->last_read_seqno
,
111 obj
->last_write_seqno
,
112 obj
->last_fenced_seqno
,
113 i915_cache_level_str(obj
->cache_level
),
114 obj
->dirty
? " dirty" : "",
115 obj
->madv
== I915_MADV_DONTNEED
? " purgeable" : "");
117 seq_printf(m
, " (name: %d)", obj
->base
.name
);
119 seq_printf(m
, " (pinned x %d)", obj
->pin_count
);
120 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
)
121 seq_printf(m
, " (fence: %d)", obj
->fence_reg
);
122 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
123 if (!i915_is_ggtt(vma
->vm
))
127 seq_printf(m
, "gtt offset: %08lx, size: %08lx)",
128 vma
->node
.start
, vma
->node
.size
);
131 seq_printf(m
, " (stolen: %08lx)", obj
->stolen
->start
);
132 if (obj
->pin_mappable
|| obj
->fault_mappable
) {
134 if (obj
->pin_mappable
)
136 if (obj
->fault_mappable
)
139 seq_printf(m
, " (%s mappable)", s
);
141 if (obj
->ring
!= NULL
)
142 seq_printf(m
, " (%s)", obj
->ring
->name
);
145 static int i915_gem_object_list_info(struct seq_file
*m
, void *data
)
147 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
148 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
149 struct list_head
*head
;
150 struct drm_device
*dev
= node
->minor
->dev
;
151 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
152 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
153 struct i915_vma
*vma
;
154 size_t total_obj_size
, total_gtt_size
;
157 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
161 /* FIXME: the user of this interface might want more than just GGTT */
164 seq_puts(m
, "Active:\n");
165 head
= &vm
->active_list
;
168 seq_puts(m
, "Inactive:\n");
169 head
= &vm
->inactive_list
;
172 mutex_unlock(&dev
->struct_mutex
);
176 total_obj_size
= total_gtt_size
= count
= 0;
177 list_for_each_entry(vma
, head
, mm_list
) {
179 describe_obj(m
, vma
->obj
);
181 total_obj_size
+= vma
->obj
->base
.size
;
182 total_gtt_size
+= vma
->node
.size
;
185 mutex_unlock(&dev
->struct_mutex
);
187 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
188 count
, total_obj_size
, total_gtt_size
);
192 static int obj_rank_by_stolen(void *priv
,
193 struct list_head
*A
, struct list_head
*B
)
195 struct drm_i915_gem_object
*a
=
196 container_of(A
, struct drm_i915_gem_object
, exec_list
);
197 struct drm_i915_gem_object
*b
=
198 container_of(B
, struct drm_i915_gem_object
, exec_list
);
200 return a
->stolen
->start
- b
->stolen
->start
;
203 static int i915_gem_stolen_list_info(struct seq_file
*m
, void *data
)
205 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
206 struct drm_device
*dev
= node
->minor
->dev
;
207 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
208 struct drm_i915_gem_object
*obj
;
209 size_t total_obj_size
, total_gtt_size
;
213 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
217 total_obj_size
= total_gtt_size
= count
= 0;
218 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
219 if (obj
->stolen
== NULL
)
222 list_add(&obj
->exec_list
, &stolen
);
224 total_obj_size
+= obj
->base
.size
;
225 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
228 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
229 if (obj
->stolen
== NULL
)
232 list_add(&obj
->exec_list
, &stolen
);
234 total_obj_size
+= obj
->base
.size
;
237 list_sort(NULL
, &stolen
, obj_rank_by_stolen
);
238 seq_puts(m
, "Stolen:\n");
239 while (!list_empty(&stolen
)) {
240 obj
= list_first_entry(&stolen
, typeof(*obj
), exec_list
);
242 describe_obj(m
, obj
);
244 list_del_init(&obj
->exec_list
);
246 mutex_unlock(&dev
->struct_mutex
);
248 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
249 count
, total_obj_size
, total_gtt_size
);
253 #define count_objects(list, member) do { \
254 list_for_each_entry(obj, list, member) { \
255 size += i915_gem_obj_ggtt_size(obj); \
257 if (obj->map_and_fenceable) { \
258 mappable_size += i915_gem_obj_ggtt_size(obj); \
266 size_t total
, active
, inactive
, unbound
;
269 static int per_file_stats(int id
, void *ptr
, void *data
)
271 struct drm_i915_gem_object
*obj
= ptr
;
272 struct file_stats
*stats
= data
;
275 stats
->total
+= obj
->base
.size
;
277 if (i915_gem_obj_ggtt_bound(obj
)) {
278 if (!list_empty(&obj
->ring_list
))
279 stats
->active
+= obj
->base
.size
;
281 stats
->inactive
+= obj
->base
.size
;
283 if (!list_empty(&obj
->global_list
))
284 stats
->unbound
+= obj
->base
.size
;
290 #define count_vmas(list, member) do { \
291 list_for_each_entry(vma, list, member) { \
292 size += i915_gem_obj_ggtt_size(vma->obj); \
294 if (vma->obj->map_and_fenceable) { \
295 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
301 static int i915_gem_object_info(struct seq_file
*m
, void* data
)
303 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
304 struct drm_device
*dev
= node
->minor
->dev
;
305 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
306 u32 count
, mappable_count
, purgeable_count
;
307 size_t size
, mappable_size
, purgeable_size
;
308 struct drm_i915_gem_object
*obj
;
309 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
310 struct drm_file
*file
;
311 struct i915_vma
*vma
;
314 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
318 seq_printf(m
, "%u objects, %zu bytes\n",
319 dev_priv
->mm
.object_count
,
320 dev_priv
->mm
.object_memory
);
322 size
= count
= mappable_size
= mappable_count
= 0;
323 count_objects(&dev_priv
->mm
.bound_list
, global_list
);
324 seq_printf(m
, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
325 count
, mappable_count
, size
, mappable_size
);
327 size
= count
= mappable_size
= mappable_count
= 0;
328 count_vmas(&vm
->active_list
, mm_list
);
329 seq_printf(m
, " %u [%u] active objects, %zu [%zu] bytes\n",
330 count
, mappable_count
, size
, mappable_size
);
332 size
= count
= mappable_size
= mappable_count
= 0;
333 count_vmas(&vm
->inactive_list
, mm_list
);
334 seq_printf(m
, " %u [%u] inactive objects, %zu [%zu] bytes\n",
335 count
, mappable_count
, size
, mappable_size
);
337 size
= count
= purgeable_size
= purgeable_count
= 0;
338 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
339 size
+= obj
->base
.size
, ++count
;
340 if (obj
->madv
== I915_MADV_DONTNEED
)
341 purgeable_size
+= obj
->base
.size
, ++purgeable_count
;
343 seq_printf(m
, "%u unbound objects, %zu bytes\n", count
, size
);
345 size
= count
= mappable_size
= mappable_count
= 0;
346 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
347 if (obj
->fault_mappable
) {
348 size
+= i915_gem_obj_ggtt_size(obj
);
351 if (obj
->pin_mappable
) {
352 mappable_size
+= i915_gem_obj_ggtt_size(obj
);
355 if (obj
->madv
== I915_MADV_DONTNEED
) {
356 purgeable_size
+= obj
->base
.size
;
360 seq_printf(m
, "%u purgeable objects, %zu bytes\n",
361 purgeable_count
, purgeable_size
);
362 seq_printf(m
, "%u pinned mappable objects, %zu bytes\n",
363 mappable_count
, mappable_size
);
364 seq_printf(m
, "%u fault mappable objects, %zu bytes\n",
367 seq_printf(m
, "%zu [%lu] gtt total\n",
368 dev_priv
->gtt
.base
.total
,
369 dev_priv
->gtt
.mappable_end
- dev_priv
->gtt
.base
.start
);
372 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
373 struct file_stats stats
;
375 memset(&stats
, 0, sizeof(stats
));
376 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
377 seq_printf(m
, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
378 get_pid_task(file
->pid
, PIDTYPE_PID
)->comm
,
386 mutex_unlock(&dev
->struct_mutex
);
391 static int i915_gem_gtt_info(struct seq_file
*m
, void *data
)
393 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
394 struct drm_device
*dev
= node
->minor
->dev
;
395 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
396 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
397 struct drm_i915_gem_object
*obj
;
398 size_t total_obj_size
, total_gtt_size
;
401 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
405 total_obj_size
= total_gtt_size
= count
= 0;
406 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
407 if (list
== PINNED_LIST
&& obj
->pin_count
== 0)
411 describe_obj(m
, obj
);
413 total_obj_size
+= obj
->base
.size
;
414 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
418 mutex_unlock(&dev
->struct_mutex
);
420 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
421 count
, total_obj_size
, total_gtt_size
);
426 static int i915_gem_pageflip_info(struct seq_file
*m
, void *data
)
428 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
429 struct drm_device
*dev
= node
->minor
->dev
;
431 struct intel_crtc
*crtc
;
433 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, base
.head
) {
434 const char pipe
= pipe_name(crtc
->pipe
);
435 const char plane
= plane_name(crtc
->plane
);
436 struct intel_unpin_work
*work
;
438 spin_lock_irqsave(&dev
->event_lock
, flags
);
439 work
= crtc
->unpin_work
;
441 seq_printf(m
, "No flip due on pipe %c (plane %c)\n",
444 if (atomic_read(&work
->pending
) < INTEL_FLIP_COMPLETE
) {
445 seq_printf(m
, "Flip queued on pipe %c (plane %c)\n",
448 seq_printf(m
, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
451 if (work
->enable_stall_check
)
452 seq_puts(m
, "Stall check enabled, ");
454 seq_puts(m
, "Stall check waiting for page flip ioctl, ");
455 seq_printf(m
, "%d prepares\n", atomic_read(&work
->pending
));
457 if (work
->old_fb_obj
) {
458 struct drm_i915_gem_object
*obj
= work
->old_fb_obj
;
460 seq_printf(m
, "Old framebuffer gtt_offset 0x%08lx\n",
461 i915_gem_obj_ggtt_offset(obj
));
463 if (work
->pending_flip_obj
) {
464 struct drm_i915_gem_object
*obj
= work
->pending_flip_obj
;
466 seq_printf(m
, "New framebuffer gtt_offset 0x%08lx\n",
467 i915_gem_obj_ggtt_offset(obj
));
470 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
476 static int i915_gem_request_info(struct seq_file
*m
, void *data
)
478 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
479 struct drm_device
*dev
= node
->minor
->dev
;
480 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
481 struct intel_ring_buffer
*ring
;
482 struct drm_i915_gem_request
*gem_request
;
485 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
490 for_each_ring(ring
, dev_priv
, i
) {
491 if (list_empty(&ring
->request_list
))
494 seq_printf(m
, "%s requests:\n", ring
->name
);
495 list_for_each_entry(gem_request
,
498 seq_printf(m
, " %d @ %d\n",
500 (int) (jiffies
- gem_request
->emitted_jiffies
));
504 mutex_unlock(&dev
->struct_mutex
);
507 seq_puts(m
, "No requests\n");
512 static void i915_ring_seqno_info(struct seq_file
*m
,
513 struct intel_ring_buffer
*ring
)
515 if (ring
->get_seqno
) {
516 seq_printf(m
, "Current sequence (%s): %u\n",
517 ring
->name
, ring
->get_seqno(ring
, false));
521 static int i915_gem_seqno_info(struct seq_file
*m
, void *data
)
523 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
524 struct drm_device
*dev
= node
->minor
->dev
;
525 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
526 struct intel_ring_buffer
*ring
;
529 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
533 for_each_ring(ring
, dev_priv
, i
)
534 i915_ring_seqno_info(m
, ring
);
536 mutex_unlock(&dev
->struct_mutex
);
542 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
544 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
545 struct drm_device
*dev
= node
->minor
->dev
;
546 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
547 struct intel_ring_buffer
*ring
;
550 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
554 if (IS_VALLEYVIEW(dev
)) {
555 seq_printf(m
, "Display IER:\t%08x\n",
557 seq_printf(m
, "Display IIR:\t%08x\n",
559 seq_printf(m
, "Display IIR_RW:\t%08x\n",
560 I915_READ(VLV_IIR_RW
));
561 seq_printf(m
, "Display IMR:\t%08x\n",
564 seq_printf(m
, "Pipe %c stat:\t%08x\n",
566 I915_READ(PIPESTAT(pipe
)));
568 seq_printf(m
, "Master IER:\t%08x\n",
569 I915_READ(VLV_MASTER_IER
));
571 seq_printf(m
, "Render IER:\t%08x\n",
573 seq_printf(m
, "Render IIR:\t%08x\n",
575 seq_printf(m
, "Render IMR:\t%08x\n",
578 seq_printf(m
, "PM IER:\t\t%08x\n",
579 I915_READ(GEN6_PMIER
));
580 seq_printf(m
, "PM IIR:\t\t%08x\n",
581 I915_READ(GEN6_PMIIR
));
582 seq_printf(m
, "PM IMR:\t\t%08x\n",
583 I915_READ(GEN6_PMIMR
));
585 seq_printf(m
, "Port hotplug:\t%08x\n",
586 I915_READ(PORT_HOTPLUG_EN
));
587 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
588 I915_READ(VLV_DPFLIPSTAT
));
589 seq_printf(m
, "DPINVGTT:\t%08x\n",
590 I915_READ(DPINVGTT
));
592 } else if (!HAS_PCH_SPLIT(dev
)) {
593 seq_printf(m
, "Interrupt enable: %08x\n",
595 seq_printf(m
, "Interrupt identity: %08x\n",
597 seq_printf(m
, "Interrupt mask: %08x\n",
600 seq_printf(m
, "Pipe %c stat: %08x\n",
602 I915_READ(PIPESTAT(pipe
)));
604 seq_printf(m
, "North Display Interrupt enable: %08x\n",
606 seq_printf(m
, "North Display Interrupt identity: %08x\n",
608 seq_printf(m
, "North Display Interrupt mask: %08x\n",
610 seq_printf(m
, "South Display Interrupt enable: %08x\n",
612 seq_printf(m
, "South Display Interrupt identity: %08x\n",
614 seq_printf(m
, "South Display Interrupt mask: %08x\n",
616 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
618 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
620 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
623 seq_printf(m
, "Interrupts received: %d\n",
624 atomic_read(&dev_priv
->irq_received
));
625 for_each_ring(ring
, dev_priv
, i
) {
626 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
628 "Graphics Interrupt mask (%s): %08x\n",
629 ring
->name
, I915_READ_IMR(ring
));
631 i915_ring_seqno_info(m
, ring
);
633 mutex_unlock(&dev
->struct_mutex
);
638 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
640 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
641 struct drm_device
*dev
= node
->minor
->dev
;
642 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
645 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
649 seq_printf(m
, "Reserved fences = %d\n", dev_priv
->fence_reg_start
);
650 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
651 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
652 struct drm_i915_gem_object
*obj
= dev_priv
->fence_regs
[i
].obj
;
654 seq_printf(m
, "Fence %d, pin count = %d, object = ",
655 i
, dev_priv
->fence_regs
[i
].pin_count
);
657 seq_puts(m
, "unused");
659 describe_obj(m
, obj
);
663 mutex_unlock(&dev
->struct_mutex
);
667 static int i915_hws_info(struct seq_file
*m
, void *data
)
669 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
670 struct drm_device
*dev
= node
->minor
->dev
;
671 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
672 struct intel_ring_buffer
*ring
;
676 ring
= &dev_priv
->ring
[(uintptr_t)node
->info_ent
->data
];
677 hws
= ring
->status_page
.page_addr
;
681 for (i
= 0; i
< 4096 / sizeof(u32
) / 4; i
+= 4) {
682 seq_printf(m
, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
684 hws
[i
], hws
[i
+ 1], hws
[i
+ 2], hws
[i
+ 3]);
690 i915_error_state_write(struct file
*filp
,
691 const char __user
*ubuf
,
695 struct i915_error_state_file_priv
*error_priv
= filp
->private_data
;
696 struct drm_device
*dev
= error_priv
->dev
;
699 DRM_DEBUG_DRIVER("Resetting error state\n");
701 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
705 i915_destroy_error_state(dev
);
706 mutex_unlock(&dev
->struct_mutex
);
711 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
713 struct drm_device
*dev
= inode
->i_private
;
714 struct i915_error_state_file_priv
*error_priv
;
716 error_priv
= kzalloc(sizeof(*error_priv
), GFP_KERNEL
);
720 error_priv
->dev
= dev
;
722 i915_error_state_get(dev
, error_priv
);
724 file
->private_data
= error_priv
;
729 static int i915_error_state_release(struct inode
*inode
, struct file
*file
)
731 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
733 i915_error_state_put(error_priv
);
739 static ssize_t
i915_error_state_read(struct file
*file
, char __user
*userbuf
,
740 size_t count
, loff_t
*pos
)
742 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
743 struct drm_i915_error_state_buf error_str
;
745 ssize_t ret_count
= 0;
748 ret
= i915_error_state_buf_init(&error_str
, count
, *pos
);
752 ret
= i915_error_state_to_str(&error_str
, error_priv
);
756 ret_count
= simple_read_from_buffer(userbuf
, count
, &tmp_pos
,
763 *pos
= error_str
.start
+ ret_count
;
765 i915_error_state_buf_release(&error_str
);
766 return ret
?: ret_count
;
769 static const struct file_operations i915_error_state_fops
= {
770 .owner
= THIS_MODULE
,
771 .open
= i915_error_state_open
,
772 .read
= i915_error_state_read
,
773 .write
= i915_error_state_write
,
774 .llseek
= default_llseek
,
775 .release
= i915_error_state_release
,
779 i915_next_seqno_get(void *data
, u64
*val
)
781 struct drm_device
*dev
= data
;
782 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
785 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
789 *val
= dev_priv
->next_seqno
;
790 mutex_unlock(&dev
->struct_mutex
);
796 i915_next_seqno_set(void *data
, u64 val
)
798 struct drm_device
*dev
= data
;
801 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
805 ret
= i915_gem_set_seqno(dev
, val
);
806 mutex_unlock(&dev
->struct_mutex
);
811 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
812 i915_next_seqno_get
, i915_next_seqno_set
,
815 static int i915_rstdby_delays(struct seq_file
*m
, void *unused
)
817 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
818 struct drm_device
*dev
= node
->minor
->dev
;
819 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
823 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
827 crstanddelay
= I915_READ16(CRSTANDVID
);
829 mutex_unlock(&dev
->struct_mutex
);
831 seq_printf(m
, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay
>> 8) & 0x3f, (crstanddelay
& 0x3f));
836 static int i915_cur_delayinfo(struct seq_file
*m
, void *unused
)
838 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
839 struct drm_device
*dev
= node
->minor
->dev
;
840 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
844 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
845 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
847 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
848 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
849 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
851 seq_printf(m
, "Current P-state: %d\n",
852 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
853 } else if ((IS_GEN6(dev
) || IS_GEN7(dev
)) && !IS_VALLEYVIEW(dev
)) {
854 u32 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
855 u32 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
856 u32 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
858 u32 rpupei
, rpcurup
, rpprevup
;
859 u32 rpdownei
, rpcurdown
, rpprevdown
;
862 /* RPSTAT1 is in the GT power well */
863 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
867 gen6_gt_force_wake_get(dev_priv
);
869 rpstat
= I915_READ(GEN6_RPSTAT1
);
870 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
);
871 rpcurup
= I915_READ(GEN6_RP_CUR_UP
);
872 rpprevup
= I915_READ(GEN6_RP_PREV_UP
);
873 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
);
874 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
);
875 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
);
877 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
879 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
880 cagf
*= GT_FREQUENCY_MULTIPLIER
;
882 gen6_gt_force_wake_put(dev_priv
);
883 mutex_unlock(&dev
->struct_mutex
);
885 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
886 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
887 seq_printf(m
, "Render p-state ratio: %d\n",
888 (gt_perf_status
& 0xff00) >> 8);
889 seq_printf(m
, "Render p-state VID: %d\n",
890 gt_perf_status
& 0xff);
891 seq_printf(m
, "Render p-state limit: %d\n",
892 rp_state_limits
& 0xff);
893 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
894 seq_printf(m
, "RP CUR UP EI: %dus\n", rpupei
&
896 seq_printf(m
, "RP CUR UP: %dus\n", rpcurup
&
897 GEN6_CURBSYTAVG_MASK
);
898 seq_printf(m
, "RP PREV UP: %dus\n", rpprevup
&
899 GEN6_CURBSYTAVG_MASK
);
900 seq_printf(m
, "RP CUR DOWN EI: %dus\n", rpdownei
&
902 seq_printf(m
, "RP CUR DOWN: %dus\n", rpcurdown
&
903 GEN6_CURBSYTAVG_MASK
);
904 seq_printf(m
, "RP PREV DOWN: %dus\n", rpprevdown
&
905 GEN6_CURBSYTAVG_MASK
);
907 max_freq
= (rp_state_cap
& 0xff0000) >> 16;
908 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
909 max_freq
* GT_FREQUENCY_MULTIPLIER
);
911 max_freq
= (rp_state_cap
& 0xff00) >> 8;
912 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
913 max_freq
* GT_FREQUENCY_MULTIPLIER
);
915 max_freq
= rp_state_cap
& 0xff;
916 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
917 max_freq
* GT_FREQUENCY_MULTIPLIER
);
919 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
920 dev_priv
->rps
.hw_max
* GT_FREQUENCY_MULTIPLIER
);
921 } else if (IS_VALLEYVIEW(dev
)) {
924 mutex_lock(&dev_priv
->rps
.hw_lock
);
925 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
926 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
927 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
929 val
= vlv_punit_read(dev_priv
, PUNIT_FUSE_BUS1
);
930 seq_printf(m
, "max GPU freq: %d MHz\n",
931 vlv_gpu_freq(dev_priv
->mem_freq
, val
));
933 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
);
934 seq_printf(m
, "min GPU freq: %d MHz\n",
935 vlv_gpu_freq(dev_priv
->mem_freq
, val
));
937 seq_printf(m
, "current GPU freq: %d MHz\n",
938 vlv_gpu_freq(dev_priv
->mem_freq
,
939 (freq_sts
>> 8) & 0xff));
940 mutex_unlock(&dev_priv
->rps
.hw_lock
);
942 seq_puts(m
, "no P-state info available\n");
948 static int i915_delayfreq_table(struct seq_file
*m
, void *unused
)
950 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
951 struct drm_device
*dev
= node
->minor
->dev
;
952 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
956 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
960 for (i
= 0; i
< 16; i
++) {
961 delayfreq
= I915_READ(PXVFREQ_BASE
+ i
* 4);
962 seq_printf(m
, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i
, delayfreq
,
963 (delayfreq
& PXVFREQ_PX_MASK
) >> PXVFREQ_PX_SHIFT
);
966 mutex_unlock(&dev
->struct_mutex
);
971 static inline int MAP_TO_MV(int map
)
973 return 1250 - (map
* 25);
976 static int i915_inttoext_table(struct seq_file
*m
, void *unused
)
978 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
979 struct drm_device
*dev
= node
->minor
->dev
;
980 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
984 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
988 for (i
= 1; i
<= 32; i
++) {
989 inttoext
= I915_READ(INTTOEXT_BASE_ILK
+ i
* 4);
990 seq_printf(m
, "INTTOEXT%02d: 0x%08x\n", i
, inttoext
);
993 mutex_unlock(&dev
->struct_mutex
);
998 static int ironlake_drpc_info(struct seq_file
*m
)
1000 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1001 struct drm_device
*dev
= node
->minor
->dev
;
1002 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1003 u32 rgvmodectl
, rstdbyctl
;
1007 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1011 rgvmodectl
= I915_READ(MEMMODECTL
);
1012 rstdbyctl
= I915_READ(RSTDBYCTL
);
1013 crstandvid
= I915_READ16(CRSTANDVID
);
1015 mutex_unlock(&dev
->struct_mutex
);
1017 seq_printf(m
, "HD boost: %s\n", (rgvmodectl
& MEMMODE_BOOST_EN
) ?
1019 seq_printf(m
, "Boost freq: %d\n",
1020 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
1021 MEMMODE_BOOST_FREQ_SHIFT
);
1022 seq_printf(m
, "HW control enabled: %s\n",
1023 rgvmodectl
& MEMMODE_HWIDLE_EN
? "yes" : "no");
1024 seq_printf(m
, "SW control enabled: %s\n",
1025 rgvmodectl
& MEMMODE_SWMODE_EN
? "yes" : "no");
1026 seq_printf(m
, "Gated voltage change: %s\n",
1027 rgvmodectl
& MEMMODE_RCLK_GATE
? "yes" : "no");
1028 seq_printf(m
, "Starting frequency: P%d\n",
1029 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
1030 seq_printf(m
, "Max P-state: P%d\n",
1031 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
1032 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
1033 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
1034 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
1035 seq_printf(m
, "Render standby enabled: %s\n",
1036 (rstdbyctl
& RCX_SW_EXIT
) ? "no" : "yes");
1037 seq_puts(m
, "Current RS state: ");
1038 switch (rstdbyctl
& RSX_STATUS_MASK
) {
1040 seq_puts(m
, "on\n");
1042 case RSX_STATUS_RC1
:
1043 seq_puts(m
, "RC1\n");
1045 case RSX_STATUS_RC1E
:
1046 seq_puts(m
, "RC1E\n");
1048 case RSX_STATUS_RS1
:
1049 seq_puts(m
, "RS1\n");
1051 case RSX_STATUS_RS2
:
1052 seq_puts(m
, "RS2 (RC6)\n");
1054 case RSX_STATUS_RS3
:
1055 seq_puts(m
, "RC3 (RC6+)\n");
1058 seq_puts(m
, "unknown\n");
1065 static int gen6_drpc_info(struct seq_file
*m
)
1068 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1069 struct drm_device
*dev
= node
->minor
->dev
;
1070 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1071 u32 rpmodectl1
, gt_core_status
, rcctl1
, rc6vids
= 0;
1072 unsigned forcewake_count
;
1075 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1079 spin_lock_irq(&dev_priv
->uncore
.lock
);
1080 forcewake_count
= dev_priv
->uncore
.forcewake_count
;
1081 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1083 if (forcewake_count
) {
1084 seq_puts(m
, "RC information inaccurate because somebody "
1085 "holds a forcewake reference \n");
1087 /* NB: we cannot use forcewake, else we read the wrong values */
1088 while (count
++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK
) & 1))
1090 seq_printf(m
, "RC information accurate: %s\n", yesno(count
< 51));
1093 gt_core_status
= readl(dev_priv
->regs
+ GEN6_GT_CORE_STATUS
);
1094 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4, true);
1096 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1097 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1098 mutex_unlock(&dev
->struct_mutex
);
1099 mutex_lock(&dev_priv
->rps
.hw_lock
);
1100 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
1101 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1103 seq_printf(m
, "Video Turbo Mode: %s\n",
1104 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1105 seq_printf(m
, "HW control enabled: %s\n",
1106 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1107 seq_printf(m
, "SW control enabled: %s\n",
1108 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1109 GEN6_RP_MEDIA_SW_MODE
));
1110 seq_printf(m
, "RC1e Enabled: %s\n",
1111 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1112 seq_printf(m
, "RC6 Enabled: %s\n",
1113 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1114 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1115 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1116 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1117 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1118 seq_puts(m
, "Current RC state: ");
1119 switch (gt_core_status
& GEN6_RCn_MASK
) {
1121 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1122 seq_puts(m
, "Core Power Down\n");
1124 seq_puts(m
, "on\n");
1127 seq_puts(m
, "RC3\n");
1130 seq_puts(m
, "RC6\n");
1133 seq_puts(m
, "RC7\n");
1136 seq_puts(m
, "Unknown\n");
1140 seq_printf(m
, "Core Power Down: %s\n",
1141 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1143 /* Not exactly sure what this is */
1144 seq_printf(m
, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1145 I915_READ(GEN6_GT_GFX_RC6_LOCKED
));
1146 seq_printf(m
, "RC6 residency since boot: %u\n",
1147 I915_READ(GEN6_GT_GFX_RC6
));
1148 seq_printf(m
, "RC6+ residency since boot: %u\n",
1149 I915_READ(GEN6_GT_GFX_RC6p
));
1150 seq_printf(m
, "RC6++ residency since boot: %u\n",
1151 I915_READ(GEN6_GT_GFX_RC6pp
));
1153 seq_printf(m
, "RC6 voltage: %dmV\n",
1154 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1155 seq_printf(m
, "RC6+ voltage: %dmV\n",
1156 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1157 seq_printf(m
, "RC6++ voltage: %dmV\n",
1158 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1162 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1164 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1165 struct drm_device
*dev
= node
->minor
->dev
;
1167 if (IS_GEN6(dev
) || IS_GEN7(dev
))
1168 return gen6_drpc_info(m
);
1170 return ironlake_drpc_info(m
);
1173 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1175 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1176 struct drm_device
*dev
= node
->minor
->dev
;
1177 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1179 if (!I915_HAS_FBC(dev
)) {
1180 seq_puts(m
, "FBC unsupported on this chipset\n");
1184 if (intel_fbc_enabled(dev
)) {
1185 seq_puts(m
, "FBC enabled\n");
1187 seq_puts(m
, "FBC disabled: ");
1188 switch (dev_priv
->fbc
.no_fbc_reason
) {
1190 seq_puts(m
, "FBC actived, but currently disabled in hardware");
1192 case FBC_UNSUPPORTED
:
1193 seq_puts(m
, "unsupported by this chipset");
1196 seq_puts(m
, "no outputs");
1198 case FBC_STOLEN_TOO_SMALL
:
1199 seq_puts(m
, "not enough stolen memory");
1201 case FBC_UNSUPPORTED_MODE
:
1202 seq_puts(m
, "mode not supported");
1204 case FBC_MODE_TOO_LARGE
:
1205 seq_puts(m
, "mode too large");
1208 seq_puts(m
, "FBC unsupported on plane");
1211 seq_puts(m
, "scanout buffer not tiled");
1213 case FBC_MULTIPLE_PIPES
:
1214 seq_puts(m
, "multiple pipes are enabled");
1216 case FBC_MODULE_PARAM
:
1217 seq_puts(m
, "disabled per module param (default off)");
1219 case FBC_CHIP_DEFAULT
:
1220 seq_puts(m
, "disabled per chip default");
1223 seq_puts(m
, "unknown reason");
1230 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1232 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1233 struct drm_device
*dev
= node
->minor
->dev
;
1234 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1236 if (!HAS_IPS(dev
)) {
1237 seq_puts(m
, "not supported\n");
1241 if (I915_READ(IPS_CTL
) & IPS_ENABLE
)
1242 seq_puts(m
, "enabled\n");
1244 seq_puts(m
, "disabled\n");
1249 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1251 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1252 struct drm_device
*dev
= node
->minor
->dev
;
1253 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1254 bool sr_enabled
= false;
1256 if (HAS_PCH_SPLIT(dev
))
1257 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1258 else if (IS_CRESTLINE(dev
) || IS_I945G(dev
) || IS_I945GM(dev
))
1259 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1260 else if (IS_I915GM(dev
))
1261 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1262 else if (IS_PINEVIEW(dev
))
1263 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1265 seq_printf(m
, "self-refresh: %s\n",
1266 sr_enabled
? "enabled" : "disabled");
1271 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1273 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1274 struct drm_device
*dev
= node
->minor
->dev
;
1275 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1276 unsigned long temp
, chipset
, gfx
;
1282 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1286 temp
= i915_mch_val(dev_priv
);
1287 chipset
= i915_chipset_val(dev_priv
);
1288 gfx
= i915_gfx_val(dev_priv
);
1289 mutex_unlock(&dev
->struct_mutex
);
1291 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1292 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1293 seq_printf(m
, "GFX power: %ld\n", gfx
);
1294 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1299 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1301 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1302 struct drm_device
*dev
= node
->minor
->dev
;
1303 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1305 int gpu_freq
, ia_freq
;
1307 if (!(IS_GEN6(dev
) || IS_GEN7(dev
))) {
1308 seq_puts(m
, "unsupported on this chipset\n");
1312 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1316 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1318 for (gpu_freq
= dev_priv
->rps
.min_delay
;
1319 gpu_freq
<= dev_priv
->rps
.max_delay
;
1322 sandybridge_pcode_read(dev_priv
,
1323 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1325 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1326 gpu_freq
* GT_FREQUENCY_MULTIPLIER
,
1327 ((ia_freq
>> 0) & 0xff) * 100,
1328 ((ia_freq
>> 8) & 0xff) * 100);
1331 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1336 static int i915_gfxec(struct seq_file
*m
, void *unused
)
1338 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1339 struct drm_device
*dev
= node
->minor
->dev
;
1340 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1343 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1347 seq_printf(m
, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1349 mutex_unlock(&dev
->struct_mutex
);
1354 static int i915_opregion(struct seq_file
*m
, void *unused
)
1356 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1357 struct drm_device
*dev
= node
->minor
->dev
;
1358 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1359 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1360 void *data
= kmalloc(OPREGION_SIZE
, GFP_KERNEL
);
1366 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1370 if (opregion
->header
) {
1371 memcpy_fromio(data
, opregion
->header
, OPREGION_SIZE
);
1372 seq_write(m
, data
, OPREGION_SIZE
);
1375 mutex_unlock(&dev
->struct_mutex
);
1382 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1384 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1385 struct drm_device
*dev
= node
->minor
->dev
;
1386 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1387 struct intel_fbdev
*ifbdev
;
1388 struct intel_framebuffer
*fb
;
1391 ret
= mutex_lock_interruptible(&dev
->mode_config
.mutex
);
1395 ifbdev
= dev_priv
->fbdev
;
1396 fb
= to_intel_framebuffer(ifbdev
->helper
.fb
);
1398 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1402 fb
->base
.bits_per_pixel
,
1403 atomic_read(&fb
->base
.refcount
.refcount
));
1404 describe_obj(m
, fb
->obj
);
1406 mutex_unlock(&dev
->mode_config
.mutex
);
1408 mutex_lock(&dev
->mode_config
.fb_lock
);
1409 list_for_each_entry(fb
, &dev
->mode_config
.fb_list
, base
.head
) {
1410 if (&fb
->base
== ifbdev
->helper
.fb
)
1413 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1417 fb
->base
.bits_per_pixel
,
1418 atomic_read(&fb
->base
.refcount
.refcount
));
1419 describe_obj(m
, fb
->obj
);
1422 mutex_unlock(&dev
->mode_config
.fb_lock
);
1427 static int i915_context_status(struct seq_file
*m
, void *unused
)
1429 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1430 struct drm_device
*dev
= node
->minor
->dev
;
1431 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1432 struct intel_ring_buffer
*ring
;
1435 ret
= mutex_lock_interruptible(&dev
->mode_config
.mutex
);
1439 if (dev_priv
->ips
.pwrctx
) {
1440 seq_puts(m
, "power context ");
1441 describe_obj(m
, dev_priv
->ips
.pwrctx
);
1445 if (dev_priv
->ips
.renderctx
) {
1446 seq_puts(m
, "render context ");
1447 describe_obj(m
, dev_priv
->ips
.renderctx
);
1451 for_each_ring(ring
, dev_priv
, i
) {
1452 if (ring
->default_context
) {
1453 seq_printf(m
, "HW default context %s ring ", ring
->name
);
1454 describe_obj(m
, ring
->default_context
->obj
);
1459 mutex_unlock(&dev
->mode_config
.mutex
);
1464 static int i915_gen6_forcewake_count_info(struct seq_file
*m
, void *data
)
1466 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1467 struct drm_device
*dev
= node
->minor
->dev
;
1468 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1469 unsigned forcewake_count
;
1471 spin_lock_irq(&dev_priv
->uncore
.lock
);
1472 forcewake_count
= dev_priv
->uncore
.forcewake_count
;
1473 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1475 seq_printf(m
, "forcewake count = %u\n", forcewake_count
);
1480 static const char *swizzle_string(unsigned swizzle
)
1483 case I915_BIT_6_SWIZZLE_NONE
:
1485 case I915_BIT_6_SWIZZLE_9
:
1487 case I915_BIT_6_SWIZZLE_9_10
:
1488 return "bit9/bit10";
1489 case I915_BIT_6_SWIZZLE_9_11
:
1490 return "bit9/bit11";
1491 case I915_BIT_6_SWIZZLE_9_10_11
:
1492 return "bit9/bit10/bit11";
1493 case I915_BIT_6_SWIZZLE_9_17
:
1494 return "bit9/bit17";
1495 case I915_BIT_6_SWIZZLE_9_10_17
:
1496 return "bit9/bit10/bit17";
1497 case I915_BIT_6_SWIZZLE_UNKNOWN
:
1504 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
1506 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1507 struct drm_device
*dev
= node
->minor
->dev
;
1508 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1511 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1515 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
1516 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
1517 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
1518 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
1520 if (IS_GEN3(dev
) || IS_GEN4(dev
)) {
1521 seq_printf(m
, "DDC = 0x%08x\n",
1523 seq_printf(m
, "C0DRB3 = 0x%04x\n",
1524 I915_READ16(C0DRB3
));
1525 seq_printf(m
, "C1DRB3 = 0x%04x\n",
1526 I915_READ16(C1DRB3
));
1527 } else if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1528 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
1529 I915_READ(MAD_DIMM_C0
));
1530 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
1531 I915_READ(MAD_DIMM_C1
));
1532 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
1533 I915_READ(MAD_DIMM_C2
));
1534 seq_printf(m
, "TILECTL = 0x%08x\n",
1535 I915_READ(TILECTL
));
1536 seq_printf(m
, "ARB_MODE = 0x%08x\n",
1537 I915_READ(ARB_MODE
));
1538 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
1539 I915_READ(DISP_ARB_CTL
));
1541 mutex_unlock(&dev
->struct_mutex
);
1546 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
1548 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1549 struct drm_device
*dev
= node
->minor
->dev
;
1550 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1551 struct intel_ring_buffer
*ring
;
1555 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1558 if (INTEL_INFO(dev
)->gen
== 6)
1559 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
1561 for_each_ring(ring
, dev_priv
, i
) {
1562 seq_printf(m
, "%s\n", ring
->name
);
1563 if (INTEL_INFO(dev
)->gen
== 7)
1564 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring
)));
1565 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring
)));
1566 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring
)));
1567 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring
)));
1569 if (dev_priv
->mm
.aliasing_ppgtt
) {
1570 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
1572 seq_puts(m
, "aliasing PPGTT:\n");
1573 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd_offset
);
1575 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
1576 mutex_unlock(&dev
->struct_mutex
);
1581 static int i915_dpio_info(struct seq_file
*m
, void *data
)
1583 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1584 struct drm_device
*dev
= node
->minor
->dev
;
1585 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1589 if (!IS_VALLEYVIEW(dev
)) {
1590 seq_puts(m
, "unsupported\n");
1594 ret
= mutex_lock_interruptible(&dev_priv
->dpio_lock
);
1598 seq_printf(m
, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL
));
1600 seq_printf(m
, "DPIO_DIV_A: 0x%08x\n",
1601 vlv_dpio_read(dev_priv
, _DPIO_DIV_A
));
1602 seq_printf(m
, "DPIO_DIV_B: 0x%08x\n",
1603 vlv_dpio_read(dev_priv
, _DPIO_DIV_B
));
1605 seq_printf(m
, "DPIO_REFSFR_A: 0x%08x\n",
1606 vlv_dpio_read(dev_priv
, _DPIO_REFSFR_A
));
1607 seq_printf(m
, "DPIO_REFSFR_B: 0x%08x\n",
1608 vlv_dpio_read(dev_priv
, _DPIO_REFSFR_B
));
1610 seq_printf(m
, "DPIO_CORE_CLK_A: 0x%08x\n",
1611 vlv_dpio_read(dev_priv
, _DPIO_CORE_CLK_A
));
1612 seq_printf(m
, "DPIO_CORE_CLK_B: 0x%08x\n",
1613 vlv_dpio_read(dev_priv
, _DPIO_CORE_CLK_B
));
1615 seq_printf(m
, "DPIO_LPF_COEFF_A: 0x%08x\n",
1616 vlv_dpio_read(dev_priv
, _DPIO_LPF_COEFF_A
));
1617 seq_printf(m
, "DPIO_LPF_COEFF_B: 0x%08x\n",
1618 vlv_dpio_read(dev_priv
, _DPIO_LPF_COEFF_B
));
1620 seq_printf(m
, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1621 vlv_dpio_read(dev_priv
, DPIO_FASTCLK_DISABLE
));
1623 mutex_unlock(&dev_priv
->dpio_lock
);
1628 static int i915_llc(struct seq_file
*m
, void *data
)
1630 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1631 struct drm_device
*dev
= node
->minor
->dev
;
1632 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1634 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1635 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev
)));
1636 seq_printf(m
, "eLLC: %zuMB\n", dev_priv
->ellc_size
);
1641 static int i915_edp_psr_status(struct seq_file
*m
, void *data
)
1643 struct drm_info_node
*node
= m
->private;
1644 struct drm_device
*dev
= node
->minor
->dev
;
1645 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1646 u32 psrstat
, psrperf
;
1648 if (!IS_HASWELL(dev
)) {
1649 seq_puts(m
, "PSR not supported on this platform\n");
1650 } else if (IS_HASWELL(dev
) && I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
) {
1651 seq_puts(m
, "PSR enabled\n");
1653 seq_puts(m
, "PSR disabled: ");
1654 switch (dev_priv
->no_psr_reason
) {
1656 seq_puts(m
, "not supported on this platform");
1659 seq_puts(m
, "not supported by panel");
1661 case PSR_MODULE_PARAM
:
1662 seq_puts(m
, "disabled by flag");
1664 case PSR_CRTC_NOT_ACTIVE
:
1665 seq_puts(m
, "crtc not active");
1667 case PSR_PWR_WELL_ENABLED
:
1668 seq_puts(m
, "power well enabled");
1671 seq_puts(m
, "not tiled");
1673 case PSR_SPRITE_ENABLED
:
1674 seq_puts(m
, "sprite enabled");
1676 case PSR_S3D_ENABLED
:
1677 seq_puts(m
, "stereo 3d enabled");
1679 case PSR_INTERLACED_ENABLED
:
1680 seq_puts(m
, "interlaced enabled");
1682 case PSR_HSW_NOT_DDIA
:
1683 seq_puts(m
, "HSW ties PSR to DDI A (eDP)");
1686 seq_puts(m
, "unknown reason");
1692 psrstat
= I915_READ(EDP_PSR_STATUS_CTL
);
1694 seq_puts(m
, "PSR Current State: ");
1695 switch (psrstat
& EDP_PSR_STATUS_STATE_MASK
) {
1696 case EDP_PSR_STATUS_STATE_IDLE
:
1697 seq_puts(m
, "Reset state\n");
1699 case EDP_PSR_STATUS_STATE_SRDONACK
:
1700 seq_puts(m
, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
1702 case EDP_PSR_STATUS_STATE_SRDENT
:
1703 seq_puts(m
, "SRD entry\n");
1705 case EDP_PSR_STATUS_STATE_BUFOFF
:
1706 seq_puts(m
, "Wait for buffer turn off\n");
1708 case EDP_PSR_STATUS_STATE_BUFON
:
1709 seq_puts(m
, "Wait for buffer turn on\n");
1711 case EDP_PSR_STATUS_STATE_AUXACK
:
1712 seq_puts(m
, "Wait for AUX to acknowledge on SRD exit\n");
1714 case EDP_PSR_STATUS_STATE_SRDOFFACK
:
1715 seq_puts(m
, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
1718 seq_puts(m
, "Unknown\n");
1722 seq_puts(m
, "Link Status: ");
1723 switch (psrstat
& EDP_PSR_STATUS_LINK_MASK
) {
1724 case EDP_PSR_STATUS_LINK_FULL_OFF
:
1725 seq_puts(m
, "Link is fully off\n");
1727 case EDP_PSR_STATUS_LINK_FULL_ON
:
1728 seq_puts(m
, "Link is fully on\n");
1730 case EDP_PSR_STATUS_LINK_STANDBY
:
1731 seq_puts(m
, "Link is in standby\n");
1734 seq_puts(m
, "Unknown\n");
1738 seq_printf(m
, "PSR Entry Count: %u\n",
1739 psrstat
>> EDP_PSR_STATUS_COUNT_SHIFT
&
1740 EDP_PSR_STATUS_COUNT_MASK
);
1742 seq_printf(m
, "Max Sleep Timer Counter: %u\n",
1743 psrstat
>> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT
&
1744 EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK
);
1746 seq_printf(m
, "Had AUX error: %s\n",
1747 yesno(psrstat
& EDP_PSR_STATUS_AUX_ERROR
));
1749 seq_printf(m
, "Sending AUX: %s\n",
1750 yesno(psrstat
& EDP_PSR_STATUS_AUX_SENDING
));
1752 seq_printf(m
, "Sending Idle: %s\n",
1753 yesno(psrstat
& EDP_PSR_STATUS_SENDING_IDLE
));
1755 seq_printf(m
, "Sending TP2 TP3: %s\n",
1756 yesno(psrstat
& EDP_PSR_STATUS_SENDING_TP2_TP3
));
1758 seq_printf(m
, "Sending TP1: %s\n",
1759 yesno(psrstat
& EDP_PSR_STATUS_SENDING_TP1
));
1761 seq_printf(m
, "Idle Count: %u\n",
1762 psrstat
& EDP_PSR_STATUS_IDLE_MASK
);
1764 psrperf
= (I915_READ(EDP_PSR_PERF_CNT
)) & EDP_PSR_PERF_CNT_MASK
;
1765 seq_printf(m
, "Performance Counter: %u\n", psrperf
);
1771 i915_wedged_get(void *data
, u64
*val
)
1773 struct drm_device
*dev
= data
;
1774 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1776 *val
= atomic_read(&dev_priv
->gpu_error
.reset_counter
);
1782 i915_wedged_set(void *data
, u64 val
)
1784 struct drm_device
*dev
= data
;
1786 DRM_INFO("Manually setting wedged to %llu\n", val
);
1787 i915_handle_error(dev
, val
);
1792 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
1793 i915_wedged_get
, i915_wedged_set
,
1797 i915_ring_stop_get(void *data
, u64
*val
)
1799 struct drm_device
*dev
= data
;
1800 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1802 *val
= dev_priv
->gpu_error
.stop_rings
;
1808 i915_ring_stop_set(void *data
, u64 val
)
1810 struct drm_device
*dev
= data
;
1811 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1814 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val
);
1816 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1820 dev_priv
->gpu_error
.stop_rings
= val
;
1821 mutex_unlock(&dev
->struct_mutex
);
1826 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops
,
1827 i915_ring_stop_get
, i915_ring_stop_set
,
1830 #define DROP_UNBOUND 0x1
1831 #define DROP_BOUND 0x2
1832 #define DROP_RETIRE 0x4
1833 #define DROP_ACTIVE 0x8
1834 #define DROP_ALL (DROP_UNBOUND | \
1839 i915_drop_caches_get(void *data
, u64
*val
)
1847 i915_drop_caches_set(void *data
, u64 val
)
1849 struct drm_device
*dev
= data
;
1850 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1851 struct drm_i915_gem_object
*obj
, *next
;
1852 struct i915_address_space
*vm
;
1853 struct i915_vma
*vma
, *x
;
1856 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val
);
1858 /* No need to check and wait for gpu resets, only libdrm auto-restarts
1859 * on ioctls on -EAGAIN. */
1860 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1864 if (val
& DROP_ACTIVE
) {
1865 ret
= i915_gpu_idle(dev
);
1870 if (val
& (DROP_RETIRE
| DROP_ACTIVE
))
1871 i915_gem_retire_requests(dev
);
1873 if (val
& DROP_BOUND
) {
1874 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
) {
1875 list_for_each_entry_safe(vma
, x
, &vm
->inactive_list
,
1877 if (vma
->obj
->pin_count
)
1880 ret
= i915_vma_unbind(vma
);
1887 if (val
& DROP_UNBOUND
) {
1888 list_for_each_entry_safe(obj
, next
, &dev_priv
->mm
.unbound_list
,
1890 if (obj
->pages_pin_count
== 0) {
1891 ret
= i915_gem_object_put_pages(obj
);
1898 mutex_unlock(&dev
->struct_mutex
);
1903 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
1904 i915_drop_caches_get
, i915_drop_caches_set
,
1908 i915_max_freq_get(void *data
, u64
*val
)
1910 struct drm_device
*dev
= data
;
1911 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1914 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
1917 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1921 if (IS_VALLEYVIEW(dev
))
1922 *val
= vlv_gpu_freq(dev_priv
->mem_freq
,
1923 dev_priv
->rps
.max_delay
);
1925 *val
= dev_priv
->rps
.max_delay
* GT_FREQUENCY_MULTIPLIER
;
1926 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1932 i915_max_freq_set(void *data
, u64 val
)
1934 struct drm_device
*dev
= data
;
1935 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1938 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
1941 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val
);
1943 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1948 * Turbo will still be enabled, but won't go above the set value.
1950 if (IS_VALLEYVIEW(dev
)) {
1951 val
= vlv_freq_opcode(dev_priv
->mem_freq
, val
);
1952 dev_priv
->rps
.max_delay
= val
;
1953 gen6_set_rps(dev
, val
);
1955 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
1956 dev_priv
->rps
.max_delay
= val
;
1957 gen6_set_rps(dev
, val
);
1960 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1965 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops
,
1966 i915_max_freq_get
, i915_max_freq_set
,
1970 i915_min_freq_get(void *data
, u64
*val
)
1972 struct drm_device
*dev
= data
;
1973 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1976 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
1979 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1983 if (IS_VALLEYVIEW(dev
))
1984 *val
= vlv_gpu_freq(dev_priv
->mem_freq
,
1985 dev_priv
->rps
.min_delay
);
1987 *val
= dev_priv
->rps
.min_delay
* GT_FREQUENCY_MULTIPLIER
;
1988 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1994 i915_min_freq_set(void *data
, u64 val
)
1996 struct drm_device
*dev
= data
;
1997 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2000 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2003 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val
);
2005 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
2010 * Turbo will still be enabled, but won't go below the set value.
2012 if (IS_VALLEYVIEW(dev
)) {
2013 val
= vlv_freq_opcode(dev_priv
->mem_freq
, val
);
2014 dev_priv
->rps
.min_delay
= val
;
2015 valleyview_set_rps(dev
, val
);
2017 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
2018 dev_priv
->rps
.min_delay
= val
;
2019 gen6_set_rps(dev
, val
);
2021 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2026 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops
,
2027 i915_min_freq_get
, i915_min_freq_set
,
2031 i915_cache_sharing_get(void *data
, u64
*val
)
2033 struct drm_device
*dev
= data
;
2034 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2038 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2041 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2045 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
2046 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
2048 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
2054 i915_cache_sharing_set(void *data
, u64 val
)
2056 struct drm_device
*dev
= data
;
2057 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2060 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2066 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
2068 /* Update the cache sharing policy here as well */
2069 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
2070 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
2071 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
2072 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
2077 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
2078 i915_cache_sharing_get
, i915_cache_sharing_set
,
2081 /* As the drm_debugfs_init() routines are called before dev->dev_private is
2082 * allocated we need to hook into the minor for release. */
2084 drm_add_fake_info_node(struct drm_minor
*minor
,
2088 struct drm_info_node
*node
;
2090 node
= kmalloc(sizeof(struct drm_info_node
), GFP_KERNEL
);
2092 debugfs_remove(ent
);
2096 node
->minor
= minor
;
2098 node
->info_ent
= (void *) key
;
2100 mutex_lock(&minor
->debugfs_lock
);
2101 list_add(&node
->list
, &minor
->debugfs_list
);
2102 mutex_unlock(&minor
->debugfs_lock
);
2107 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
2109 struct drm_device
*dev
= inode
->i_private
;
2110 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2112 if (INTEL_INFO(dev
)->gen
< 6)
2115 gen6_gt_force_wake_get(dev_priv
);
2120 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
2122 struct drm_device
*dev
= inode
->i_private
;
2123 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2125 if (INTEL_INFO(dev
)->gen
< 6)
2128 gen6_gt_force_wake_put(dev_priv
);
2133 static const struct file_operations i915_forcewake_fops
= {
2134 .owner
= THIS_MODULE
,
2135 .open
= i915_forcewake_open
,
2136 .release
= i915_forcewake_release
,
2139 static int i915_forcewake_create(struct dentry
*root
, struct drm_minor
*minor
)
2141 struct drm_device
*dev
= minor
->dev
;
2144 ent
= debugfs_create_file("i915_forcewake_user",
2147 &i915_forcewake_fops
);
2149 return PTR_ERR(ent
);
2151 return drm_add_fake_info_node(minor
, ent
, &i915_forcewake_fops
);
2154 static int i915_debugfs_create(struct dentry
*root
,
2155 struct drm_minor
*minor
,
2157 const struct file_operations
*fops
)
2159 struct drm_device
*dev
= minor
->dev
;
2162 ent
= debugfs_create_file(name
,
2167 return PTR_ERR(ent
);
2169 return drm_add_fake_info_node(minor
, ent
, fops
);
2172 static struct drm_info_list i915_debugfs_list
[] = {
2173 {"i915_capabilities", i915_capabilities
, 0},
2174 {"i915_gem_objects", i915_gem_object_info
, 0},
2175 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
2176 {"i915_gem_pinned", i915_gem_gtt_info
, 0, (void *) PINNED_LIST
},
2177 {"i915_gem_active", i915_gem_object_list_info
, 0, (void *) ACTIVE_LIST
},
2178 {"i915_gem_inactive", i915_gem_object_list_info
, 0, (void *) INACTIVE_LIST
},
2179 {"i915_gem_stolen", i915_gem_stolen_list_info
},
2180 {"i915_gem_pageflip", i915_gem_pageflip_info
, 0},
2181 {"i915_gem_request", i915_gem_request_info
, 0},
2182 {"i915_gem_seqno", i915_gem_seqno_info
, 0},
2183 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
2184 {"i915_gem_interrupt", i915_interrupt_info
, 0},
2185 {"i915_gem_hws", i915_hws_info
, 0, (void *)RCS
},
2186 {"i915_gem_hws_blt", i915_hws_info
, 0, (void *)BCS
},
2187 {"i915_gem_hws_bsd", i915_hws_info
, 0, (void *)VCS
},
2188 {"i915_gem_hws_vebox", i915_hws_info
, 0, (void *)VECS
},
2189 {"i915_rstdby_delays", i915_rstdby_delays
, 0},
2190 {"i915_cur_delayinfo", i915_cur_delayinfo
, 0},
2191 {"i915_delayfreq_table", i915_delayfreq_table
, 0},
2192 {"i915_inttoext_table", i915_inttoext_table
, 0},
2193 {"i915_drpc_info", i915_drpc_info
, 0},
2194 {"i915_emon_status", i915_emon_status
, 0},
2195 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
2196 {"i915_gfxec", i915_gfxec
, 0},
2197 {"i915_fbc_status", i915_fbc_status
, 0},
2198 {"i915_ips_status", i915_ips_status
, 0},
2199 {"i915_sr_status", i915_sr_status
, 0},
2200 {"i915_opregion", i915_opregion
, 0},
2201 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
2202 {"i915_context_status", i915_context_status
, 0},
2203 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info
, 0},
2204 {"i915_swizzle_info", i915_swizzle_info
, 0},
2205 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
2206 {"i915_dpio", i915_dpio_info
, 0},
2207 {"i915_llc", i915_llc
, 0},
2208 {"i915_edp_psr_status", i915_edp_psr_status
, 0},
2210 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2212 static struct i915_debugfs_files
{
2214 const struct file_operations
*fops
;
2215 } i915_debugfs_files
[] = {
2216 {"i915_wedged", &i915_wedged_fops
},
2217 {"i915_max_freq", &i915_max_freq_fops
},
2218 {"i915_min_freq", &i915_min_freq_fops
},
2219 {"i915_cache_sharing", &i915_cache_sharing_fops
},
2220 {"i915_ring_stop", &i915_ring_stop_fops
},
2221 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
2222 {"i915_error_state", &i915_error_state_fops
},
2223 {"i915_next_seqno", &i915_next_seqno_fops
},
2226 int i915_debugfs_init(struct drm_minor
*minor
)
2230 ret
= i915_forcewake_create(minor
->debugfs_root
, minor
);
2234 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
2235 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
2236 i915_debugfs_files
[i
].name
,
2237 i915_debugfs_files
[i
].fops
);
2242 return drm_debugfs_create_files(i915_debugfs_list
,
2243 I915_DEBUGFS_ENTRIES
,
2244 minor
->debugfs_root
, minor
);
2247 void i915_debugfs_cleanup(struct drm_minor
*minor
)
2251 drm_debugfs_remove_files(i915_debugfs_list
,
2252 I915_DEBUGFS_ENTRIES
, minor
);
2253 drm_debugfs_remove_files((struct drm_info_list
*) &i915_forcewake_fops
,
2255 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
2256 struct drm_info_list
*info_list
=
2257 (struct drm_info_list
*) i915_debugfs_files
[i
].fops
;
2259 drm_debugfs_remove_files(info_list
, 1, minor
);
2263 #endif /* CONFIG_DEBUG_FS */