2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <generated/utsrelease.h>
35 #include "intel_drv.h"
36 #include "intel_ringbuffer.h"
37 #include <drm/i915_drm.h>
40 #define DRM_I915_RING_DEBUG 1
43 #if defined(CONFIG_DEBUG_FS)
51 static const char *yesno(int v
)
53 return v
? "yes" : "no";
56 static int i915_capabilities(struct seq_file
*m
, void *data
)
58 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
59 struct drm_device
*dev
= node
->minor
->dev
;
60 const struct intel_device_info
*info
= INTEL_INFO(dev
);
62 seq_printf(m
, "gen: %d\n", info
->gen
);
63 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev
));
64 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65 #define SEP_SEMICOLON ;
66 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_SEMICOLON
);
73 static const char *get_pin_flag(struct drm_i915_gem_object
*obj
)
75 if (obj
->user_pin_count
> 0)
77 else if (obj
->pin_count
> 0)
83 static const char *get_tiling_flag(struct drm_i915_gem_object
*obj
)
85 switch (obj
->tiling_mode
) {
87 case I915_TILING_NONE
: return " ";
88 case I915_TILING_X
: return "X";
89 case I915_TILING_Y
: return "Y";
93 static const char *cache_level_str(int type
)
96 case I915_CACHE_NONE
: return " uncached";
97 case I915_CACHE_LLC
: return " snooped (LLC)";
98 case I915_CACHE_LLC_MLC
: return " snooped (LLC+MLC)";
104 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
106 seq_printf(m
, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
109 get_tiling_flag(obj
),
110 obj
->base
.size
/ 1024,
111 obj
->base
.read_domains
,
112 obj
->base
.write_domain
,
113 obj
->last_read_seqno
,
114 obj
->last_write_seqno
,
115 obj
->last_fenced_seqno
,
116 cache_level_str(obj
->cache_level
),
117 obj
->dirty
? " dirty" : "",
118 obj
->madv
== I915_MADV_DONTNEED
? " purgeable" : "");
120 seq_printf(m
, " (name: %d)", obj
->base
.name
);
122 seq_printf(m
, " (pinned x %d)", obj
->pin_count
);
123 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
)
124 seq_printf(m
, " (fence: %d)", obj
->fence_reg
);
125 if (obj
->gtt_space
!= NULL
)
126 seq_printf(m
, " (gtt offset: %08x, size: %08x)",
127 obj
->gtt_offset
, (unsigned int)obj
->gtt_space
->size
);
129 seq_printf(m
, " (stolen: %08lx)", obj
->stolen
->start
);
130 if (obj
->pin_mappable
|| obj
->fault_mappable
) {
132 if (obj
->pin_mappable
)
134 if (obj
->fault_mappable
)
137 seq_printf(m
, " (%s mappable)", s
);
139 if (obj
->ring
!= NULL
)
140 seq_printf(m
, " (%s)", obj
->ring
->name
);
143 static int i915_gem_object_list_info(struct seq_file
*m
, void *data
)
145 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
146 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
147 struct list_head
*head
;
148 struct drm_device
*dev
= node
->minor
->dev
;
149 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
150 struct drm_i915_gem_object
*obj
;
151 size_t total_obj_size
, total_gtt_size
;
154 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
160 seq_printf(m
, "Active:\n");
161 head
= &dev_priv
->mm
.active_list
;
164 seq_printf(m
, "Inactive:\n");
165 head
= &dev_priv
->mm
.inactive_list
;
168 mutex_unlock(&dev
->struct_mutex
);
172 total_obj_size
= total_gtt_size
= count
= 0;
173 list_for_each_entry(obj
, head
, mm_list
) {
175 describe_obj(m
, obj
);
177 total_obj_size
+= obj
->base
.size
;
178 total_gtt_size
+= obj
->gtt_space
->size
;
181 mutex_unlock(&dev
->struct_mutex
);
183 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
184 count
, total_obj_size
, total_gtt_size
);
188 #define count_objects(list, member) do { \
189 list_for_each_entry(obj, list, member) { \
190 size += obj->gtt_space->size; \
192 if (obj->map_and_fenceable) { \
193 mappable_size += obj->gtt_space->size; \
201 size_t total
, active
, inactive
, unbound
;
204 static int per_file_stats(int id
, void *ptr
, void *data
)
206 struct drm_i915_gem_object
*obj
= ptr
;
207 struct file_stats
*stats
= data
;
210 stats
->total
+= obj
->base
.size
;
212 if (obj
->gtt_space
) {
213 if (!list_empty(&obj
->ring_list
))
214 stats
->active
+= obj
->base
.size
;
216 stats
->inactive
+= obj
->base
.size
;
218 if (!list_empty(&obj
->global_list
))
219 stats
->unbound
+= obj
->base
.size
;
225 static int i915_gem_object_info(struct seq_file
*m
, void* data
)
227 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
228 struct drm_device
*dev
= node
->minor
->dev
;
229 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
230 u32 count
, mappable_count
, purgeable_count
;
231 size_t size
, mappable_size
, purgeable_size
;
232 struct drm_i915_gem_object
*obj
;
233 struct drm_file
*file
;
236 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
240 seq_printf(m
, "%u objects, %zu bytes\n",
241 dev_priv
->mm
.object_count
,
242 dev_priv
->mm
.object_memory
);
244 size
= count
= mappable_size
= mappable_count
= 0;
245 count_objects(&dev_priv
->mm
.bound_list
, global_list
);
246 seq_printf(m
, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
247 count
, mappable_count
, size
, mappable_size
);
249 size
= count
= mappable_size
= mappable_count
= 0;
250 count_objects(&dev_priv
->mm
.active_list
, mm_list
);
251 seq_printf(m
, " %u [%u] active objects, %zu [%zu] bytes\n",
252 count
, mappable_count
, size
, mappable_size
);
254 size
= count
= mappable_size
= mappable_count
= 0;
255 count_objects(&dev_priv
->mm
.inactive_list
, mm_list
);
256 seq_printf(m
, " %u [%u] inactive objects, %zu [%zu] bytes\n",
257 count
, mappable_count
, size
, mappable_size
);
259 size
= count
= purgeable_size
= purgeable_count
= 0;
260 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
261 size
+= obj
->base
.size
, ++count
;
262 if (obj
->madv
== I915_MADV_DONTNEED
)
263 purgeable_size
+= obj
->base
.size
, ++purgeable_count
;
265 seq_printf(m
, "%u unbound objects, %zu bytes\n", count
, size
);
267 size
= count
= mappable_size
= mappable_count
= 0;
268 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
269 if (obj
->fault_mappable
) {
270 size
+= obj
->gtt_space
->size
;
273 if (obj
->pin_mappable
) {
274 mappable_size
+= obj
->gtt_space
->size
;
277 if (obj
->madv
== I915_MADV_DONTNEED
) {
278 purgeable_size
+= obj
->base
.size
;
282 seq_printf(m
, "%u purgeable objects, %zu bytes\n",
283 purgeable_count
, purgeable_size
);
284 seq_printf(m
, "%u pinned mappable objects, %zu bytes\n",
285 mappable_count
, mappable_size
);
286 seq_printf(m
, "%u fault mappable objects, %zu bytes\n",
289 seq_printf(m
, "%zu [%lu] gtt total\n",
291 dev_priv
->gtt
.mappable_end
- dev_priv
->gtt
.start
);
294 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
295 struct file_stats stats
;
297 memset(&stats
, 0, sizeof(stats
));
298 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
299 seq_printf(m
, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
300 get_pid_task(file
->pid
, PIDTYPE_PID
)->comm
,
308 mutex_unlock(&dev
->struct_mutex
);
313 static int i915_gem_gtt_info(struct seq_file
*m
, void* data
)
315 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
316 struct drm_device
*dev
= node
->minor
->dev
;
317 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
318 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
319 struct drm_i915_gem_object
*obj
;
320 size_t total_obj_size
, total_gtt_size
;
323 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
327 total_obj_size
= total_gtt_size
= count
= 0;
328 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
329 if (list
== PINNED_LIST
&& obj
->pin_count
== 0)
333 describe_obj(m
, obj
);
335 total_obj_size
+= obj
->base
.size
;
336 total_gtt_size
+= obj
->gtt_space
->size
;
340 mutex_unlock(&dev
->struct_mutex
);
342 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
343 count
, total_obj_size
, total_gtt_size
);
348 static int i915_gem_pageflip_info(struct seq_file
*m
, void *data
)
350 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
351 struct drm_device
*dev
= node
->minor
->dev
;
353 struct intel_crtc
*crtc
;
355 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, base
.head
) {
356 const char pipe
= pipe_name(crtc
->pipe
);
357 const char plane
= plane_name(crtc
->plane
);
358 struct intel_unpin_work
*work
;
360 spin_lock_irqsave(&dev
->event_lock
, flags
);
361 work
= crtc
->unpin_work
;
363 seq_printf(m
, "No flip due on pipe %c (plane %c)\n",
366 if (atomic_read(&work
->pending
) < INTEL_FLIP_COMPLETE
) {
367 seq_printf(m
, "Flip queued on pipe %c (plane %c)\n",
370 seq_printf(m
, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
373 if (work
->enable_stall_check
)
374 seq_printf(m
, "Stall check enabled, ");
376 seq_printf(m
, "Stall check waiting for page flip ioctl, ");
377 seq_printf(m
, "%d prepares\n", atomic_read(&work
->pending
));
379 if (work
->old_fb_obj
) {
380 struct drm_i915_gem_object
*obj
= work
->old_fb_obj
;
382 seq_printf(m
, "Old framebuffer gtt_offset 0x%08x\n", obj
->gtt_offset
);
384 if (work
->pending_flip_obj
) {
385 struct drm_i915_gem_object
*obj
= work
->pending_flip_obj
;
387 seq_printf(m
, "New framebuffer gtt_offset 0x%08x\n", obj
->gtt_offset
);
390 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
396 static int i915_gem_request_info(struct seq_file
*m
, void *data
)
398 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
399 struct drm_device
*dev
= node
->minor
->dev
;
400 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
401 struct intel_ring_buffer
*ring
;
402 struct drm_i915_gem_request
*gem_request
;
405 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
410 for_each_ring(ring
, dev_priv
, i
) {
411 if (list_empty(&ring
->request_list
))
414 seq_printf(m
, "%s requests:\n", ring
->name
);
415 list_for_each_entry(gem_request
,
418 seq_printf(m
, " %d @ %d\n",
420 (int) (jiffies
- gem_request
->emitted_jiffies
));
424 mutex_unlock(&dev
->struct_mutex
);
427 seq_printf(m
, "No requests\n");
432 static void i915_ring_seqno_info(struct seq_file
*m
,
433 struct intel_ring_buffer
*ring
)
435 if (ring
->get_seqno
) {
436 seq_printf(m
, "Current sequence (%s): %u\n",
437 ring
->name
, ring
->get_seqno(ring
, false));
441 static int i915_gem_seqno_info(struct seq_file
*m
, void *data
)
443 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
444 struct drm_device
*dev
= node
->minor
->dev
;
445 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
446 struct intel_ring_buffer
*ring
;
449 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
453 for_each_ring(ring
, dev_priv
, i
)
454 i915_ring_seqno_info(m
, ring
);
456 mutex_unlock(&dev
->struct_mutex
);
462 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
464 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
465 struct drm_device
*dev
= node
->minor
->dev
;
466 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
467 struct intel_ring_buffer
*ring
;
470 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
474 if (IS_VALLEYVIEW(dev
)) {
475 seq_printf(m
, "Display IER:\t%08x\n",
477 seq_printf(m
, "Display IIR:\t%08x\n",
479 seq_printf(m
, "Display IIR_RW:\t%08x\n",
480 I915_READ(VLV_IIR_RW
));
481 seq_printf(m
, "Display IMR:\t%08x\n",
484 seq_printf(m
, "Pipe %c stat:\t%08x\n",
486 I915_READ(PIPESTAT(pipe
)));
488 seq_printf(m
, "Master IER:\t%08x\n",
489 I915_READ(VLV_MASTER_IER
));
491 seq_printf(m
, "Render IER:\t%08x\n",
493 seq_printf(m
, "Render IIR:\t%08x\n",
495 seq_printf(m
, "Render IMR:\t%08x\n",
498 seq_printf(m
, "PM IER:\t\t%08x\n",
499 I915_READ(GEN6_PMIER
));
500 seq_printf(m
, "PM IIR:\t\t%08x\n",
501 I915_READ(GEN6_PMIIR
));
502 seq_printf(m
, "PM IMR:\t\t%08x\n",
503 I915_READ(GEN6_PMIMR
));
505 seq_printf(m
, "Port hotplug:\t%08x\n",
506 I915_READ(PORT_HOTPLUG_EN
));
507 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
508 I915_READ(VLV_DPFLIPSTAT
));
509 seq_printf(m
, "DPINVGTT:\t%08x\n",
510 I915_READ(DPINVGTT
));
512 } else if (!HAS_PCH_SPLIT(dev
)) {
513 seq_printf(m
, "Interrupt enable: %08x\n",
515 seq_printf(m
, "Interrupt identity: %08x\n",
517 seq_printf(m
, "Interrupt mask: %08x\n",
520 seq_printf(m
, "Pipe %c stat: %08x\n",
522 I915_READ(PIPESTAT(pipe
)));
524 seq_printf(m
, "North Display Interrupt enable: %08x\n",
526 seq_printf(m
, "North Display Interrupt identity: %08x\n",
528 seq_printf(m
, "North Display Interrupt mask: %08x\n",
530 seq_printf(m
, "South Display Interrupt enable: %08x\n",
532 seq_printf(m
, "South Display Interrupt identity: %08x\n",
534 seq_printf(m
, "South Display Interrupt mask: %08x\n",
536 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
538 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
540 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
543 seq_printf(m
, "Interrupts received: %d\n",
544 atomic_read(&dev_priv
->irq_received
));
545 for_each_ring(ring
, dev_priv
, i
) {
546 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
548 "Graphics Interrupt mask (%s): %08x\n",
549 ring
->name
, I915_READ_IMR(ring
));
551 i915_ring_seqno_info(m
, ring
);
553 mutex_unlock(&dev
->struct_mutex
);
558 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
560 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
561 struct drm_device
*dev
= node
->minor
->dev
;
562 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
565 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
569 seq_printf(m
, "Reserved fences = %d\n", dev_priv
->fence_reg_start
);
570 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
571 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
572 struct drm_i915_gem_object
*obj
= dev_priv
->fence_regs
[i
].obj
;
574 seq_printf(m
, "Fence %d, pin count = %d, object = ",
575 i
, dev_priv
->fence_regs
[i
].pin_count
);
577 seq_printf(m
, "unused");
579 describe_obj(m
, obj
);
583 mutex_unlock(&dev
->struct_mutex
);
587 static int i915_hws_info(struct seq_file
*m
, void *data
)
589 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
590 struct drm_device
*dev
= node
->minor
->dev
;
591 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
592 struct intel_ring_buffer
*ring
;
596 ring
= &dev_priv
->ring
[(uintptr_t)node
->info_ent
->data
];
597 hws
= ring
->status_page
.page_addr
;
601 for (i
= 0; i
< 4096 / sizeof(u32
) / 4; i
+= 4) {
602 seq_printf(m
, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
604 hws
[i
], hws
[i
+ 1], hws
[i
+ 2], hws
[i
+ 3]);
609 static const char *ring_str(int ring
)
612 case RCS
: return "render";
613 case VCS
: return "bsd";
614 case BCS
: return "blt";
615 case VECS
: return "vebox";
620 static const char *pin_flag(int pinned
)
630 static const char *tiling_flag(int tiling
)
634 case I915_TILING_NONE
: return "";
635 case I915_TILING_X
: return " X";
636 case I915_TILING_Y
: return " Y";
640 static const char *dirty_flag(int dirty
)
642 return dirty
? " dirty" : "";
645 static const char *purgeable_flag(int purgeable
)
647 return purgeable
? " purgeable" : "";
650 static void i915_error_vprintf(struct drm_i915_error_state_buf
*e
,
651 const char *f
, va_list args
)
655 if (!e
->err
&& WARN(e
->bytes
> (e
->size
- 1), "overflow")) {
660 if (e
->bytes
== e
->size
- 1 || e
->err
)
663 /* Seek the first printf which is hits start position */
664 if (e
->pos
< e
->start
) {
665 len
= vsnprintf(NULL
, 0, f
, args
);
666 if (e
->pos
+ len
<= e
->start
) {
671 /* First vsnprintf needs to fit in full for memmove*/
672 if (len
>= e
->size
) {
678 len
= vsnprintf(e
->buf
+ e
->bytes
, e
->size
- e
->bytes
, f
, args
);
679 if (len
>= e
->size
- e
->bytes
)
680 len
= e
->size
- e
->bytes
- 1;
682 /* If this is first printf in this window, adjust it so that
683 * start position matches start of the buffer
685 if (e
->pos
< e
->start
) {
686 const size_t off
= e
->start
- e
->pos
;
688 /* Should not happen but be paranoid */
689 if (off
> len
|| e
->bytes
) {
694 memmove(e
->buf
, e
->buf
+ off
, len
- off
);
695 e
->bytes
= len
- off
;
704 void i915_error_printf(struct drm_i915_error_state_buf
*e
, const char *f
, ...)
709 i915_error_vprintf(e
, f
, args
);
713 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
715 static void print_error_buffers(struct drm_i915_error_state_buf
*m
,
717 struct drm_i915_error_buffer
*err
,
720 err_printf(m
, "%s [%d]:\n", name
, count
);
723 err_printf(m
, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s",
728 err
->rseqno
, err
->wseqno
,
729 pin_flag(err
->pinned
),
730 tiling_flag(err
->tiling
),
731 dirty_flag(err
->dirty
),
732 purgeable_flag(err
->purgeable
),
733 err
->ring
!= -1 ? " " : "",
735 cache_level_str(err
->cache_level
));
738 err_printf(m
, " (name: %d)", err
->name
);
739 if (err
->fence_reg
!= I915_FENCE_REG_NONE
)
740 err_printf(m
, " (fence: %d)", err
->fence_reg
);
747 static void i915_ring_error_state(struct drm_i915_error_state_buf
*m
,
748 struct drm_device
*dev
,
749 struct drm_i915_error_state
*error
,
752 BUG_ON(ring
>= I915_NUM_RINGS
); /* shut up confused gcc */
753 err_printf(m
, "%s command stream:\n", ring_str(ring
));
754 err_printf(m
, " HEAD: 0x%08x\n", error
->head
[ring
]);
755 err_printf(m
, " TAIL: 0x%08x\n", error
->tail
[ring
]);
756 err_printf(m
, " CTL: 0x%08x\n", error
->ctl
[ring
]);
757 err_printf(m
, " ACTHD: 0x%08x\n", error
->acthd
[ring
]);
758 err_printf(m
, " IPEIR: 0x%08x\n", error
->ipeir
[ring
]);
759 err_printf(m
, " IPEHR: 0x%08x\n", error
->ipehr
[ring
]);
760 err_printf(m
, " INSTDONE: 0x%08x\n", error
->instdone
[ring
]);
761 if (ring
== RCS
&& INTEL_INFO(dev
)->gen
>= 4)
762 err_printf(m
, " BBADDR: 0x%08llx\n", error
->bbaddr
);
764 if (INTEL_INFO(dev
)->gen
>= 4)
765 err_printf(m
, " INSTPS: 0x%08x\n", error
->instps
[ring
]);
766 err_printf(m
, " INSTPM: 0x%08x\n", error
->instpm
[ring
]);
767 err_printf(m
, " FADDR: 0x%08x\n", error
->faddr
[ring
]);
768 if (INTEL_INFO(dev
)->gen
>= 6) {
769 err_printf(m
, " RC PSMI: 0x%08x\n", error
->rc_psmi
[ring
]);
770 err_printf(m
, " FAULT_REG: 0x%08x\n", error
->fault_reg
[ring
]);
771 err_printf(m
, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
772 error
->semaphore_mboxes
[ring
][0],
773 error
->semaphore_seqno
[ring
][0]);
774 err_printf(m
, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
775 error
->semaphore_mboxes
[ring
][1],
776 error
->semaphore_seqno
[ring
][1]);
778 err_printf(m
, " seqno: 0x%08x\n", error
->seqno
[ring
]);
779 err_printf(m
, " waiting: %s\n", yesno(error
->waiting
[ring
]));
780 err_printf(m
, " ring->head: 0x%08x\n", error
->cpu_ring_head
[ring
]);
781 err_printf(m
, " ring->tail: 0x%08x\n", error
->cpu_ring_tail
[ring
]);
784 struct i915_error_state_file_priv
{
785 struct drm_device
*dev
;
786 struct drm_i915_error_state
*error
;
790 static int i915_error_state(struct i915_error_state_file_priv
*error_priv
,
791 struct drm_i915_error_state_buf
*m
)
794 struct drm_device
*dev
= error_priv
->dev
;
795 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
796 struct drm_i915_error_state
*error
= error_priv
->error
;
797 struct intel_ring_buffer
*ring
;
798 int i
, j
, page
, offset
, elt
;
801 err_printf(m
, "no error state collected\n");
805 err_printf(m
, "Time: %ld s %ld us\n", error
->time
.tv_sec
,
806 error
->time
.tv_usec
);
807 err_printf(m
, "Kernel: " UTS_RELEASE
"\n");
808 err_printf(m
, "PCI ID: 0x%04x\n", dev
->pci_device
);
809 err_printf(m
, "EIR: 0x%08x\n", error
->eir
);
810 err_printf(m
, "IER: 0x%08x\n", error
->ier
);
811 err_printf(m
, "PGTBL_ER: 0x%08x\n", error
->pgtbl_er
);
812 err_printf(m
, "FORCEWAKE: 0x%08x\n", error
->forcewake
);
813 err_printf(m
, "DERRMR: 0x%08x\n", error
->derrmr
);
814 err_printf(m
, "CCID: 0x%08x\n", error
->ccid
);
816 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
817 err_printf(m
, " fence[%d] = %08llx\n", i
, error
->fence
[i
]);
819 for (i
= 0; i
< ARRAY_SIZE(error
->extra_instdone
); i
++)
820 err_printf(m
, " INSTDONE_%d: 0x%08x\n", i
,
821 error
->extra_instdone
[i
]);
823 if (INTEL_INFO(dev
)->gen
>= 6) {
824 err_printf(m
, "ERROR: 0x%08x\n", error
->error
);
825 err_printf(m
, "DONE_REG: 0x%08x\n", error
->done_reg
);
828 if (INTEL_INFO(dev
)->gen
== 7)
829 err_printf(m
, "ERR_INT: 0x%08x\n", error
->err_int
);
831 for_each_ring(ring
, dev_priv
, i
)
832 i915_ring_error_state(m
, dev
, error
, i
);
834 if (error
->active_bo
)
835 print_error_buffers(m
, "Active",
837 error
->active_bo_count
);
839 if (error
->pinned_bo
)
840 print_error_buffers(m
, "Pinned",
842 error
->pinned_bo_count
);
844 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
845 struct drm_i915_error_object
*obj
;
847 if ((obj
= error
->ring
[i
].batchbuffer
)) {
848 err_printf(m
, "%s --- gtt_offset = 0x%08x\n",
849 dev_priv
->ring
[i
].name
,
852 for (page
= 0; page
< obj
->page_count
; page
++) {
853 for (elt
= 0; elt
< PAGE_SIZE
/4; elt
++) {
854 err_printf(m
, "%08x : %08x\n", offset
,
855 obj
->pages
[page
][elt
]);
861 if (error
->ring
[i
].num_requests
) {
862 err_printf(m
, "%s --- %d requests\n",
863 dev_priv
->ring
[i
].name
,
864 error
->ring
[i
].num_requests
);
865 for (j
= 0; j
< error
->ring
[i
].num_requests
; j
++) {
866 err_printf(m
, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
867 error
->ring
[i
].requests
[j
].seqno
,
868 error
->ring
[i
].requests
[j
].jiffies
,
869 error
->ring
[i
].requests
[j
].tail
);
873 if ((obj
= error
->ring
[i
].ringbuffer
)) {
874 err_printf(m
, "%s --- ringbuffer = 0x%08x\n",
875 dev_priv
->ring
[i
].name
,
878 for (page
= 0; page
< obj
->page_count
; page
++) {
879 for (elt
= 0; elt
< PAGE_SIZE
/4; elt
++) {
880 err_printf(m
, "%08x : %08x\n",
882 obj
->pages
[page
][elt
]);
888 obj
= error
->ring
[i
].ctx
;
890 err_printf(m
, "%s --- HW Context = 0x%08x\n",
891 dev_priv
->ring
[i
].name
,
894 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
895 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
898 obj
->pages
[0][elt
+1],
899 obj
->pages
[0][elt
+2],
900 obj
->pages
[0][elt
+3]);
907 intel_overlay_print_error_state(m
, error
->overlay
);
910 intel_display_print_error_state(m
, dev
, error
->display
);
916 i915_error_state_write(struct file
*filp
,
917 const char __user
*ubuf
,
921 struct i915_error_state_file_priv
*error_priv
= filp
->private_data
;
922 struct drm_device
*dev
= error_priv
->dev
;
925 DRM_DEBUG_DRIVER("Resetting error state\n");
927 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
931 i915_destroy_error_state(dev
);
932 mutex_unlock(&dev
->struct_mutex
);
937 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
939 struct drm_device
*dev
= inode
->i_private
;
940 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
941 struct i915_error_state_file_priv
*error_priv
;
944 error_priv
= kzalloc(sizeof(*error_priv
), GFP_KERNEL
);
948 error_priv
->dev
= dev
;
950 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
951 error_priv
->error
= dev_priv
->gpu_error
.first_error
;
952 if (error_priv
->error
)
953 kref_get(&error_priv
->error
->ref
);
954 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
956 file
->private_data
= error_priv
;
961 static int i915_error_state_release(struct inode
*inode
, struct file
*file
)
963 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
965 if (error_priv
->error
)
966 kref_put(&error_priv
->error
->ref
, i915_error_state_free
);
972 static ssize_t
i915_error_state_read(struct file
*file
, char __user
*userbuf
,
973 size_t count
, loff_t
*pos
)
975 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
976 struct drm_i915_error_state_buf error_str
;
978 ssize_t ret_count
= 0;
981 memset(&error_str
, 0, sizeof(error_str
));
983 /* We need to have enough room to store any i915_error_state printf
984 * so that we can move it to start position.
986 error_str
.size
= count
+ 1 > PAGE_SIZE
? count
+ 1 : PAGE_SIZE
;
987 error_str
.buf
= kmalloc(error_str
.size
,
988 GFP_TEMPORARY
| __GFP_NORETRY
| __GFP_NOWARN
);
990 if (error_str
.buf
== NULL
) {
991 error_str
.size
= PAGE_SIZE
;
992 error_str
.buf
= kmalloc(error_str
.size
, GFP_TEMPORARY
);
995 if (error_str
.buf
== NULL
) {
996 error_str
.size
= 128;
997 error_str
.buf
= kmalloc(error_str
.size
, GFP_TEMPORARY
);
1000 if (error_str
.buf
== NULL
)
1003 error_str
.start
= *pos
;
1005 ret
= i915_error_state(error_priv
, &error_str
);
1009 if (error_str
.bytes
== 0 && error_str
.err
) {
1010 ret
= error_str
.err
;
1014 ret_count
= simple_read_from_buffer(userbuf
, count
, &tmp_pos
,
1021 *pos
= error_str
.start
+ ret_count
;
1023 kfree(error_str
.buf
);
1024 return ret
?: ret_count
;
1027 static const struct file_operations i915_error_state_fops
= {
1028 .owner
= THIS_MODULE
,
1029 .open
= i915_error_state_open
,
1030 .read
= i915_error_state_read
,
1031 .write
= i915_error_state_write
,
1032 .llseek
= default_llseek
,
1033 .release
= i915_error_state_release
,
1037 i915_next_seqno_get(void *data
, u64
*val
)
1039 struct drm_device
*dev
= data
;
1040 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1043 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1047 *val
= dev_priv
->next_seqno
;
1048 mutex_unlock(&dev
->struct_mutex
);
1054 i915_next_seqno_set(void *data
, u64 val
)
1056 struct drm_device
*dev
= data
;
1059 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1063 ret
= i915_gem_set_seqno(dev
, val
);
1064 mutex_unlock(&dev
->struct_mutex
);
1069 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
1070 i915_next_seqno_get
, i915_next_seqno_set
,
1073 static int i915_rstdby_delays(struct seq_file
*m
, void *unused
)
1075 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1076 struct drm_device
*dev
= node
->minor
->dev
;
1077 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1081 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1085 crstanddelay
= I915_READ16(CRSTANDVID
);
1087 mutex_unlock(&dev
->struct_mutex
);
1089 seq_printf(m
, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay
>> 8) & 0x3f, (crstanddelay
& 0x3f));
1094 static int i915_cur_delayinfo(struct seq_file
*m
, void *unused
)
1096 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1097 struct drm_device
*dev
= node
->minor
->dev
;
1098 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1102 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
1103 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
1105 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
1106 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
1107 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
1109 seq_printf(m
, "Current P-state: %d\n",
1110 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
1111 } else if ((IS_GEN6(dev
) || IS_GEN7(dev
)) && !IS_VALLEYVIEW(dev
)) {
1112 u32 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
1113 u32 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
1114 u32 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
1116 u32 rpupei
, rpcurup
, rpprevup
;
1117 u32 rpdownei
, rpcurdown
, rpprevdown
;
1120 /* RPSTAT1 is in the GT power well */
1121 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1125 gen6_gt_force_wake_get(dev_priv
);
1127 rpstat
= I915_READ(GEN6_RPSTAT1
);
1128 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
);
1129 rpcurup
= I915_READ(GEN6_RP_CUR_UP
);
1130 rpprevup
= I915_READ(GEN6_RP_PREV_UP
);
1131 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
);
1132 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
);
1133 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
);
1134 if (IS_HASWELL(dev
))
1135 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
1137 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
1138 cagf
*= GT_FREQUENCY_MULTIPLIER
;
1140 gen6_gt_force_wake_put(dev_priv
);
1141 mutex_unlock(&dev
->struct_mutex
);
1143 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
1144 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
1145 seq_printf(m
, "Render p-state ratio: %d\n",
1146 (gt_perf_status
& 0xff00) >> 8);
1147 seq_printf(m
, "Render p-state VID: %d\n",
1148 gt_perf_status
& 0xff);
1149 seq_printf(m
, "Render p-state limit: %d\n",
1150 rp_state_limits
& 0xff);
1151 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
1152 seq_printf(m
, "RP CUR UP EI: %dus\n", rpupei
&
1153 GEN6_CURICONT_MASK
);
1154 seq_printf(m
, "RP CUR UP: %dus\n", rpcurup
&
1155 GEN6_CURBSYTAVG_MASK
);
1156 seq_printf(m
, "RP PREV UP: %dus\n", rpprevup
&
1157 GEN6_CURBSYTAVG_MASK
);
1158 seq_printf(m
, "RP CUR DOWN EI: %dus\n", rpdownei
&
1160 seq_printf(m
, "RP CUR DOWN: %dus\n", rpcurdown
&
1161 GEN6_CURBSYTAVG_MASK
);
1162 seq_printf(m
, "RP PREV DOWN: %dus\n", rpprevdown
&
1163 GEN6_CURBSYTAVG_MASK
);
1165 max_freq
= (rp_state_cap
& 0xff0000) >> 16;
1166 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
1167 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1169 max_freq
= (rp_state_cap
& 0xff00) >> 8;
1170 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
1171 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1173 max_freq
= rp_state_cap
& 0xff;
1174 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
1175 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1177 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
1178 dev_priv
->rps
.hw_max
* GT_FREQUENCY_MULTIPLIER
);
1179 } else if (IS_VALLEYVIEW(dev
)) {
1182 mutex_lock(&dev_priv
->rps
.hw_lock
);
1183 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
1184 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
1185 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
1187 val
= vlv_punit_read(dev_priv
, PUNIT_FUSE_BUS1
);
1188 seq_printf(m
, "max GPU freq: %d MHz\n",
1189 vlv_gpu_freq(dev_priv
->mem_freq
, val
));
1191 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
);
1192 seq_printf(m
, "min GPU freq: %d MHz\n",
1193 vlv_gpu_freq(dev_priv
->mem_freq
, val
));
1195 seq_printf(m
, "current GPU freq: %d MHz\n",
1196 vlv_gpu_freq(dev_priv
->mem_freq
,
1197 (freq_sts
>> 8) & 0xff));
1198 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1200 seq_printf(m
, "no P-state info available\n");
1206 static int i915_delayfreq_table(struct seq_file
*m
, void *unused
)
1208 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1209 struct drm_device
*dev
= node
->minor
->dev
;
1210 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1214 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1218 for (i
= 0; i
< 16; i
++) {
1219 delayfreq
= I915_READ(PXVFREQ_BASE
+ i
* 4);
1220 seq_printf(m
, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i
, delayfreq
,
1221 (delayfreq
& PXVFREQ_PX_MASK
) >> PXVFREQ_PX_SHIFT
);
1224 mutex_unlock(&dev
->struct_mutex
);
1229 static inline int MAP_TO_MV(int map
)
1231 return 1250 - (map
* 25);
1234 static int i915_inttoext_table(struct seq_file
*m
, void *unused
)
1236 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1237 struct drm_device
*dev
= node
->minor
->dev
;
1238 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1242 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1246 for (i
= 1; i
<= 32; i
++) {
1247 inttoext
= I915_READ(INTTOEXT_BASE_ILK
+ i
* 4);
1248 seq_printf(m
, "INTTOEXT%02d: 0x%08x\n", i
, inttoext
);
1251 mutex_unlock(&dev
->struct_mutex
);
1256 static int ironlake_drpc_info(struct seq_file
*m
)
1258 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1259 struct drm_device
*dev
= node
->minor
->dev
;
1260 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1261 u32 rgvmodectl
, rstdbyctl
;
1265 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1269 rgvmodectl
= I915_READ(MEMMODECTL
);
1270 rstdbyctl
= I915_READ(RSTDBYCTL
);
1271 crstandvid
= I915_READ16(CRSTANDVID
);
1273 mutex_unlock(&dev
->struct_mutex
);
1275 seq_printf(m
, "HD boost: %s\n", (rgvmodectl
& MEMMODE_BOOST_EN
) ?
1277 seq_printf(m
, "Boost freq: %d\n",
1278 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
1279 MEMMODE_BOOST_FREQ_SHIFT
);
1280 seq_printf(m
, "HW control enabled: %s\n",
1281 rgvmodectl
& MEMMODE_HWIDLE_EN
? "yes" : "no");
1282 seq_printf(m
, "SW control enabled: %s\n",
1283 rgvmodectl
& MEMMODE_SWMODE_EN
? "yes" : "no");
1284 seq_printf(m
, "Gated voltage change: %s\n",
1285 rgvmodectl
& MEMMODE_RCLK_GATE
? "yes" : "no");
1286 seq_printf(m
, "Starting frequency: P%d\n",
1287 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
1288 seq_printf(m
, "Max P-state: P%d\n",
1289 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
1290 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
1291 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
1292 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
1293 seq_printf(m
, "Render standby enabled: %s\n",
1294 (rstdbyctl
& RCX_SW_EXIT
) ? "no" : "yes");
1295 seq_printf(m
, "Current RS state: ");
1296 switch (rstdbyctl
& RSX_STATUS_MASK
) {
1298 seq_printf(m
, "on\n");
1300 case RSX_STATUS_RC1
:
1301 seq_printf(m
, "RC1\n");
1303 case RSX_STATUS_RC1E
:
1304 seq_printf(m
, "RC1E\n");
1306 case RSX_STATUS_RS1
:
1307 seq_printf(m
, "RS1\n");
1309 case RSX_STATUS_RS2
:
1310 seq_printf(m
, "RS2 (RC6)\n");
1312 case RSX_STATUS_RS3
:
1313 seq_printf(m
, "RC3 (RC6+)\n");
1316 seq_printf(m
, "unknown\n");
1323 static int gen6_drpc_info(struct seq_file
*m
)
1326 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1327 struct drm_device
*dev
= node
->minor
->dev
;
1328 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1329 u32 rpmodectl1
, gt_core_status
, rcctl1
, rc6vids
= 0;
1330 unsigned forcewake_count
;
1334 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1338 spin_lock_irq(&dev_priv
->gt_lock
);
1339 forcewake_count
= dev_priv
->forcewake_count
;
1340 spin_unlock_irq(&dev_priv
->gt_lock
);
1342 if (forcewake_count
) {
1343 seq_printf(m
, "RC information inaccurate because somebody "
1344 "holds a forcewake reference \n");
1346 /* NB: we cannot use forcewake, else we read the wrong values */
1347 while (count
++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK
) & 1))
1349 seq_printf(m
, "RC information accurate: %s\n", yesno(count
< 51));
1352 gt_core_status
= readl(dev_priv
->regs
+ GEN6_GT_CORE_STATUS
);
1353 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4);
1355 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1356 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1357 mutex_unlock(&dev
->struct_mutex
);
1358 mutex_lock(&dev_priv
->rps
.hw_lock
);
1359 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
1360 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1362 seq_printf(m
, "Video Turbo Mode: %s\n",
1363 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1364 seq_printf(m
, "HW control enabled: %s\n",
1365 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1366 seq_printf(m
, "SW control enabled: %s\n",
1367 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1368 GEN6_RP_MEDIA_SW_MODE
));
1369 seq_printf(m
, "RC1e Enabled: %s\n",
1370 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1371 seq_printf(m
, "RC6 Enabled: %s\n",
1372 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1373 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1374 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1375 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1376 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1377 seq_printf(m
, "Current RC state: ");
1378 switch (gt_core_status
& GEN6_RCn_MASK
) {
1380 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1381 seq_printf(m
, "Core Power Down\n");
1383 seq_printf(m
, "on\n");
1386 seq_printf(m
, "RC3\n");
1389 seq_printf(m
, "RC6\n");
1392 seq_printf(m
, "RC7\n");
1395 seq_printf(m
, "Unknown\n");
1399 seq_printf(m
, "Core Power Down: %s\n",
1400 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1402 /* Not exactly sure what this is */
1403 seq_printf(m
, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1404 I915_READ(GEN6_GT_GFX_RC6_LOCKED
));
1405 seq_printf(m
, "RC6 residency since boot: %u\n",
1406 I915_READ(GEN6_GT_GFX_RC6
));
1407 seq_printf(m
, "RC6+ residency since boot: %u\n",
1408 I915_READ(GEN6_GT_GFX_RC6p
));
1409 seq_printf(m
, "RC6++ residency since boot: %u\n",
1410 I915_READ(GEN6_GT_GFX_RC6pp
));
1412 seq_printf(m
, "RC6 voltage: %dmV\n",
1413 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1414 seq_printf(m
, "RC6+ voltage: %dmV\n",
1415 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1416 seq_printf(m
, "RC6++ voltage: %dmV\n",
1417 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1421 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1423 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1424 struct drm_device
*dev
= node
->minor
->dev
;
1426 if (IS_GEN6(dev
) || IS_GEN7(dev
))
1427 return gen6_drpc_info(m
);
1429 return ironlake_drpc_info(m
);
1432 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1434 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1435 struct drm_device
*dev
= node
->minor
->dev
;
1436 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1438 if (!I915_HAS_FBC(dev
)) {
1439 seq_printf(m
, "FBC unsupported on this chipset\n");
1443 if (intel_fbc_enabled(dev
)) {
1444 seq_printf(m
, "FBC enabled\n");
1446 seq_printf(m
, "FBC disabled: ");
1447 switch (dev_priv
->no_fbc_reason
) {
1449 seq_printf(m
, "no outputs");
1451 case FBC_STOLEN_TOO_SMALL
:
1452 seq_printf(m
, "not enough stolen memory");
1454 case FBC_UNSUPPORTED_MODE
:
1455 seq_printf(m
, "mode not supported");
1457 case FBC_MODE_TOO_LARGE
:
1458 seq_printf(m
, "mode too large");
1461 seq_printf(m
, "FBC unsupported on plane");
1464 seq_printf(m
, "scanout buffer not tiled");
1466 case FBC_MULTIPLE_PIPES
:
1467 seq_printf(m
, "multiple pipes are enabled");
1469 case FBC_MODULE_PARAM
:
1470 seq_printf(m
, "disabled per module param (default off)");
1473 seq_printf(m
, "unknown reason");
1475 seq_printf(m
, "\n");
1480 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1482 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1483 struct drm_device
*dev
= node
->minor
->dev
;
1484 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1487 seq_puts(m
, "not supported\n");
1491 if (I915_READ(IPS_CTL
) & IPS_ENABLE
)
1492 seq_puts(m
, "enabled\n");
1494 seq_puts(m
, "disabled\n");
1499 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1501 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1502 struct drm_device
*dev
= node
->minor
->dev
;
1503 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1504 bool sr_enabled
= false;
1506 if (HAS_PCH_SPLIT(dev
))
1507 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1508 else if (IS_CRESTLINE(dev
) || IS_I945G(dev
) || IS_I945GM(dev
))
1509 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1510 else if (IS_I915GM(dev
))
1511 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1512 else if (IS_PINEVIEW(dev
))
1513 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1515 seq_printf(m
, "self-refresh: %s\n",
1516 sr_enabled
? "enabled" : "disabled");
1521 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1523 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1524 struct drm_device
*dev
= node
->minor
->dev
;
1525 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1526 unsigned long temp
, chipset
, gfx
;
1532 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1536 temp
= i915_mch_val(dev_priv
);
1537 chipset
= i915_chipset_val(dev_priv
);
1538 gfx
= i915_gfx_val(dev_priv
);
1539 mutex_unlock(&dev
->struct_mutex
);
1541 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1542 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1543 seq_printf(m
, "GFX power: %ld\n", gfx
);
1544 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1549 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1551 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1552 struct drm_device
*dev
= node
->minor
->dev
;
1553 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1555 int gpu_freq
, ia_freq
;
1557 if (!(IS_GEN6(dev
) || IS_GEN7(dev
))) {
1558 seq_printf(m
, "unsupported on this chipset\n");
1562 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1566 seq_printf(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1568 for (gpu_freq
= dev_priv
->rps
.min_delay
;
1569 gpu_freq
<= dev_priv
->rps
.max_delay
;
1572 sandybridge_pcode_read(dev_priv
,
1573 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1575 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1576 gpu_freq
* GT_FREQUENCY_MULTIPLIER
,
1577 ((ia_freq
>> 0) & 0xff) * 100,
1578 ((ia_freq
>> 8) & 0xff) * 100);
1581 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1586 static int i915_gfxec(struct seq_file
*m
, void *unused
)
1588 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1589 struct drm_device
*dev
= node
->minor
->dev
;
1590 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1593 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1597 seq_printf(m
, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1599 mutex_unlock(&dev
->struct_mutex
);
1604 static int i915_opregion(struct seq_file
*m
, void *unused
)
1606 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1607 struct drm_device
*dev
= node
->minor
->dev
;
1608 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1609 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1610 void *data
= kmalloc(OPREGION_SIZE
, GFP_KERNEL
);
1616 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1620 if (opregion
->header
) {
1621 memcpy_fromio(data
, opregion
->header
, OPREGION_SIZE
);
1622 seq_write(m
, data
, OPREGION_SIZE
);
1625 mutex_unlock(&dev
->struct_mutex
);
1632 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1634 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1635 struct drm_device
*dev
= node
->minor
->dev
;
1636 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1637 struct intel_fbdev
*ifbdev
;
1638 struct intel_framebuffer
*fb
;
1641 ret
= mutex_lock_interruptible(&dev
->mode_config
.mutex
);
1645 ifbdev
= dev_priv
->fbdev
;
1646 fb
= to_intel_framebuffer(ifbdev
->helper
.fb
);
1648 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1652 fb
->base
.bits_per_pixel
,
1653 atomic_read(&fb
->base
.refcount
.refcount
));
1654 describe_obj(m
, fb
->obj
);
1655 seq_printf(m
, "\n");
1656 mutex_unlock(&dev
->mode_config
.mutex
);
1658 mutex_lock(&dev
->mode_config
.fb_lock
);
1659 list_for_each_entry(fb
, &dev
->mode_config
.fb_list
, base
.head
) {
1660 if (&fb
->base
== ifbdev
->helper
.fb
)
1663 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1667 fb
->base
.bits_per_pixel
,
1668 atomic_read(&fb
->base
.refcount
.refcount
));
1669 describe_obj(m
, fb
->obj
);
1670 seq_printf(m
, "\n");
1672 mutex_unlock(&dev
->mode_config
.fb_lock
);
1677 static int i915_context_status(struct seq_file
*m
, void *unused
)
1679 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1680 struct drm_device
*dev
= node
->minor
->dev
;
1681 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1682 struct intel_ring_buffer
*ring
;
1685 ret
= mutex_lock_interruptible(&dev
->mode_config
.mutex
);
1689 if (dev_priv
->ips
.pwrctx
) {
1690 seq_printf(m
, "power context ");
1691 describe_obj(m
, dev_priv
->ips
.pwrctx
);
1692 seq_printf(m
, "\n");
1695 if (dev_priv
->ips
.renderctx
) {
1696 seq_printf(m
, "render context ");
1697 describe_obj(m
, dev_priv
->ips
.renderctx
);
1698 seq_printf(m
, "\n");
1701 for_each_ring(ring
, dev_priv
, i
) {
1702 if (ring
->default_context
) {
1703 seq_printf(m
, "HW default context %s ring ", ring
->name
);
1704 describe_obj(m
, ring
->default_context
->obj
);
1705 seq_printf(m
, "\n");
1709 mutex_unlock(&dev
->mode_config
.mutex
);
1714 static int i915_gen6_forcewake_count_info(struct seq_file
*m
, void *data
)
1716 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1717 struct drm_device
*dev
= node
->minor
->dev
;
1718 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1719 unsigned forcewake_count
;
1721 spin_lock_irq(&dev_priv
->gt_lock
);
1722 forcewake_count
= dev_priv
->forcewake_count
;
1723 spin_unlock_irq(&dev_priv
->gt_lock
);
1725 seq_printf(m
, "forcewake count = %u\n", forcewake_count
);
1730 static const char *swizzle_string(unsigned swizzle
)
1733 case I915_BIT_6_SWIZZLE_NONE
:
1735 case I915_BIT_6_SWIZZLE_9
:
1737 case I915_BIT_6_SWIZZLE_9_10
:
1738 return "bit9/bit10";
1739 case I915_BIT_6_SWIZZLE_9_11
:
1740 return "bit9/bit11";
1741 case I915_BIT_6_SWIZZLE_9_10_11
:
1742 return "bit9/bit10/bit11";
1743 case I915_BIT_6_SWIZZLE_9_17
:
1744 return "bit9/bit17";
1745 case I915_BIT_6_SWIZZLE_9_10_17
:
1746 return "bit9/bit10/bit17";
1747 case I915_BIT_6_SWIZZLE_UNKNOWN
:
1754 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
1756 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1757 struct drm_device
*dev
= node
->minor
->dev
;
1758 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1761 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1765 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
1766 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
1767 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
1768 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
1770 if (IS_GEN3(dev
) || IS_GEN4(dev
)) {
1771 seq_printf(m
, "DDC = 0x%08x\n",
1773 seq_printf(m
, "C0DRB3 = 0x%04x\n",
1774 I915_READ16(C0DRB3
));
1775 seq_printf(m
, "C1DRB3 = 0x%04x\n",
1776 I915_READ16(C1DRB3
));
1777 } else if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1778 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
1779 I915_READ(MAD_DIMM_C0
));
1780 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
1781 I915_READ(MAD_DIMM_C1
));
1782 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
1783 I915_READ(MAD_DIMM_C2
));
1784 seq_printf(m
, "TILECTL = 0x%08x\n",
1785 I915_READ(TILECTL
));
1786 seq_printf(m
, "ARB_MODE = 0x%08x\n",
1787 I915_READ(ARB_MODE
));
1788 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
1789 I915_READ(DISP_ARB_CTL
));
1791 mutex_unlock(&dev
->struct_mutex
);
1796 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
1798 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1799 struct drm_device
*dev
= node
->minor
->dev
;
1800 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1801 struct intel_ring_buffer
*ring
;
1805 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1808 if (INTEL_INFO(dev
)->gen
== 6)
1809 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
1811 for_each_ring(ring
, dev_priv
, i
) {
1812 seq_printf(m
, "%s\n", ring
->name
);
1813 if (INTEL_INFO(dev
)->gen
== 7)
1814 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring
)));
1815 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring
)));
1816 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring
)));
1817 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring
)));
1819 if (dev_priv
->mm
.aliasing_ppgtt
) {
1820 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
1822 seq_printf(m
, "aliasing PPGTT:\n");
1823 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd_offset
);
1825 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
1826 mutex_unlock(&dev
->struct_mutex
);
1831 static int i915_dpio_info(struct seq_file
*m
, void *data
)
1833 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1834 struct drm_device
*dev
= node
->minor
->dev
;
1835 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1839 if (!IS_VALLEYVIEW(dev
)) {
1840 seq_printf(m
, "unsupported\n");
1844 ret
= mutex_lock_interruptible(&dev_priv
->dpio_lock
);
1848 seq_printf(m
, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL
));
1850 seq_printf(m
, "DPIO_DIV_A: 0x%08x\n",
1851 vlv_dpio_read(dev_priv
, _DPIO_DIV_A
));
1852 seq_printf(m
, "DPIO_DIV_B: 0x%08x\n",
1853 vlv_dpio_read(dev_priv
, _DPIO_DIV_B
));
1855 seq_printf(m
, "DPIO_REFSFR_A: 0x%08x\n",
1856 vlv_dpio_read(dev_priv
, _DPIO_REFSFR_A
));
1857 seq_printf(m
, "DPIO_REFSFR_B: 0x%08x\n",
1858 vlv_dpio_read(dev_priv
, _DPIO_REFSFR_B
));
1860 seq_printf(m
, "DPIO_CORE_CLK_A: 0x%08x\n",
1861 vlv_dpio_read(dev_priv
, _DPIO_CORE_CLK_A
));
1862 seq_printf(m
, "DPIO_CORE_CLK_B: 0x%08x\n",
1863 vlv_dpio_read(dev_priv
, _DPIO_CORE_CLK_B
));
1865 seq_printf(m
, "DPIO_LFP_COEFF_A: 0x%08x\n",
1866 vlv_dpio_read(dev_priv
, _DPIO_LFP_COEFF_A
));
1867 seq_printf(m
, "DPIO_LFP_COEFF_B: 0x%08x\n",
1868 vlv_dpio_read(dev_priv
, _DPIO_LFP_COEFF_B
));
1870 seq_printf(m
, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1871 vlv_dpio_read(dev_priv
, DPIO_FASTCLK_DISABLE
));
1873 mutex_unlock(&dev_priv
->dpio_lock
);
1879 i915_wedged_get(void *data
, u64
*val
)
1881 struct drm_device
*dev
= data
;
1882 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1884 *val
= atomic_read(&dev_priv
->gpu_error
.reset_counter
);
1890 i915_wedged_set(void *data
, u64 val
)
1892 struct drm_device
*dev
= data
;
1894 DRM_INFO("Manually setting wedged to %llu\n", val
);
1895 i915_handle_error(dev
, val
);
1900 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
1901 i915_wedged_get
, i915_wedged_set
,
1905 i915_ring_stop_get(void *data
, u64
*val
)
1907 struct drm_device
*dev
= data
;
1908 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1910 *val
= dev_priv
->gpu_error
.stop_rings
;
1916 i915_ring_stop_set(void *data
, u64 val
)
1918 struct drm_device
*dev
= data
;
1919 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1922 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val
);
1924 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1928 dev_priv
->gpu_error
.stop_rings
= val
;
1929 mutex_unlock(&dev
->struct_mutex
);
1934 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops
,
1935 i915_ring_stop_get
, i915_ring_stop_set
,
1938 #define DROP_UNBOUND 0x1
1939 #define DROP_BOUND 0x2
1940 #define DROP_RETIRE 0x4
1941 #define DROP_ACTIVE 0x8
1942 #define DROP_ALL (DROP_UNBOUND | \
1947 i915_drop_caches_get(void *data
, u64
*val
)
1955 i915_drop_caches_set(void *data
, u64 val
)
1957 struct drm_device
*dev
= data
;
1958 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1959 struct drm_i915_gem_object
*obj
, *next
;
1962 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val
);
1964 /* No need to check and wait for gpu resets, only libdrm auto-restarts
1965 * on ioctls on -EAGAIN. */
1966 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1970 if (val
& DROP_ACTIVE
) {
1971 ret
= i915_gpu_idle(dev
);
1976 if (val
& (DROP_RETIRE
| DROP_ACTIVE
))
1977 i915_gem_retire_requests(dev
);
1979 if (val
& DROP_BOUND
) {
1980 list_for_each_entry_safe(obj
, next
, &dev_priv
->mm
.inactive_list
, mm_list
)
1981 if (obj
->pin_count
== 0) {
1982 ret
= i915_gem_object_unbind(obj
);
1988 if (val
& DROP_UNBOUND
) {
1989 list_for_each_entry_safe(obj
, next
, &dev_priv
->mm
.unbound_list
,
1991 if (obj
->pages_pin_count
== 0) {
1992 ret
= i915_gem_object_put_pages(obj
);
1999 mutex_unlock(&dev
->struct_mutex
);
2004 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
2005 i915_drop_caches_get
, i915_drop_caches_set
,
2009 i915_max_freq_get(void *data
, u64
*val
)
2011 struct drm_device
*dev
= data
;
2012 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2015 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2018 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
2022 if (IS_VALLEYVIEW(dev
))
2023 *val
= vlv_gpu_freq(dev_priv
->mem_freq
,
2024 dev_priv
->rps
.max_delay
);
2026 *val
= dev_priv
->rps
.max_delay
* GT_FREQUENCY_MULTIPLIER
;
2027 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2033 i915_max_freq_set(void *data
, u64 val
)
2035 struct drm_device
*dev
= data
;
2036 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2039 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2042 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val
);
2044 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
2049 * Turbo will still be enabled, but won't go above the set value.
2051 if (IS_VALLEYVIEW(dev
)) {
2052 val
= vlv_freq_opcode(dev_priv
->mem_freq
, val
);
2053 dev_priv
->rps
.max_delay
= val
;
2054 gen6_set_rps(dev
, val
);
2056 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
2057 dev_priv
->rps
.max_delay
= val
;
2058 gen6_set_rps(dev
, val
);
2061 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2066 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops
,
2067 i915_max_freq_get
, i915_max_freq_set
,
2071 i915_min_freq_get(void *data
, u64
*val
)
2073 struct drm_device
*dev
= data
;
2074 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2077 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2080 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
2084 if (IS_VALLEYVIEW(dev
))
2085 *val
= vlv_gpu_freq(dev_priv
->mem_freq
,
2086 dev_priv
->rps
.min_delay
);
2088 *val
= dev_priv
->rps
.min_delay
* GT_FREQUENCY_MULTIPLIER
;
2089 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2095 i915_min_freq_set(void *data
, u64 val
)
2097 struct drm_device
*dev
= data
;
2098 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2101 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2104 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val
);
2106 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
2111 * Turbo will still be enabled, but won't go below the set value.
2113 if (IS_VALLEYVIEW(dev
)) {
2114 val
= vlv_freq_opcode(dev_priv
->mem_freq
, val
);
2115 dev_priv
->rps
.min_delay
= val
;
2116 valleyview_set_rps(dev
, val
);
2118 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
2119 dev_priv
->rps
.min_delay
= val
;
2120 gen6_set_rps(dev
, val
);
2122 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2127 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops
,
2128 i915_min_freq_get
, i915_min_freq_set
,
2132 i915_cache_sharing_get(void *data
, u64
*val
)
2134 struct drm_device
*dev
= data
;
2135 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2139 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2142 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2146 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
2147 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
2149 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
2155 i915_cache_sharing_set(void *data
, u64 val
)
2157 struct drm_device
*dev
= data
;
2158 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2161 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
2167 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
2169 /* Update the cache sharing policy here as well */
2170 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
2171 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
2172 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
2173 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
2178 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
2179 i915_cache_sharing_get
, i915_cache_sharing_set
,
2182 /* As the drm_debugfs_init() routines are called before dev->dev_private is
2183 * allocated we need to hook into the minor for release. */
2185 drm_add_fake_info_node(struct drm_minor
*minor
,
2189 struct drm_info_node
*node
;
2191 node
= kmalloc(sizeof(struct drm_info_node
), GFP_KERNEL
);
2193 debugfs_remove(ent
);
2197 node
->minor
= minor
;
2199 node
->info_ent
= (void *) key
;
2201 mutex_lock(&minor
->debugfs_lock
);
2202 list_add(&node
->list
, &minor
->debugfs_list
);
2203 mutex_unlock(&minor
->debugfs_lock
);
2208 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
2210 struct drm_device
*dev
= inode
->i_private
;
2211 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2213 if (INTEL_INFO(dev
)->gen
< 6)
2216 gen6_gt_force_wake_get(dev_priv
);
2221 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
2223 struct drm_device
*dev
= inode
->i_private
;
2224 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2226 if (INTEL_INFO(dev
)->gen
< 6)
2229 gen6_gt_force_wake_put(dev_priv
);
2234 static const struct file_operations i915_forcewake_fops
= {
2235 .owner
= THIS_MODULE
,
2236 .open
= i915_forcewake_open
,
2237 .release
= i915_forcewake_release
,
2240 static int i915_forcewake_create(struct dentry
*root
, struct drm_minor
*minor
)
2242 struct drm_device
*dev
= minor
->dev
;
2245 ent
= debugfs_create_file("i915_forcewake_user",
2248 &i915_forcewake_fops
);
2250 return PTR_ERR(ent
);
2252 return drm_add_fake_info_node(minor
, ent
, &i915_forcewake_fops
);
2255 static int i915_debugfs_create(struct dentry
*root
,
2256 struct drm_minor
*minor
,
2258 const struct file_operations
*fops
)
2260 struct drm_device
*dev
= minor
->dev
;
2263 ent
= debugfs_create_file(name
,
2268 return PTR_ERR(ent
);
2270 return drm_add_fake_info_node(minor
, ent
, fops
);
2273 static struct drm_info_list i915_debugfs_list
[] = {
2274 {"i915_capabilities", i915_capabilities
, 0},
2275 {"i915_gem_objects", i915_gem_object_info
, 0},
2276 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
2277 {"i915_gem_pinned", i915_gem_gtt_info
, 0, (void *) PINNED_LIST
},
2278 {"i915_gem_active", i915_gem_object_list_info
, 0, (void *) ACTIVE_LIST
},
2279 {"i915_gem_inactive", i915_gem_object_list_info
, 0, (void *) INACTIVE_LIST
},
2280 {"i915_gem_pageflip", i915_gem_pageflip_info
, 0},
2281 {"i915_gem_request", i915_gem_request_info
, 0},
2282 {"i915_gem_seqno", i915_gem_seqno_info
, 0},
2283 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
2284 {"i915_gem_interrupt", i915_interrupt_info
, 0},
2285 {"i915_gem_hws", i915_hws_info
, 0, (void *)RCS
},
2286 {"i915_gem_hws_blt", i915_hws_info
, 0, (void *)BCS
},
2287 {"i915_gem_hws_bsd", i915_hws_info
, 0, (void *)VCS
},
2288 {"i915_gem_hws_vebox", i915_hws_info
, 0, (void *)VECS
},
2289 {"i915_rstdby_delays", i915_rstdby_delays
, 0},
2290 {"i915_cur_delayinfo", i915_cur_delayinfo
, 0},
2291 {"i915_delayfreq_table", i915_delayfreq_table
, 0},
2292 {"i915_inttoext_table", i915_inttoext_table
, 0},
2293 {"i915_drpc_info", i915_drpc_info
, 0},
2294 {"i915_emon_status", i915_emon_status
, 0},
2295 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
2296 {"i915_gfxec", i915_gfxec
, 0},
2297 {"i915_fbc_status", i915_fbc_status
, 0},
2298 {"i915_ips_status", i915_ips_status
, 0},
2299 {"i915_sr_status", i915_sr_status
, 0},
2300 {"i915_opregion", i915_opregion
, 0},
2301 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
2302 {"i915_context_status", i915_context_status
, 0},
2303 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info
, 0},
2304 {"i915_swizzle_info", i915_swizzle_info
, 0},
2305 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
2306 {"i915_dpio", i915_dpio_info
, 0},
2308 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2310 int i915_debugfs_init(struct drm_minor
*minor
)
2314 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
2320 ret
= i915_forcewake_create(minor
->debugfs_root
, minor
);
2324 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
2326 &i915_max_freq_fops
);
2330 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
2332 &i915_min_freq_fops
);
2336 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
2337 "i915_cache_sharing",
2338 &i915_cache_sharing_fops
);
2342 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
2344 &i915_ring_stop_fops
);
2348 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
2349 "i915_gem_drop_caches",
2350 &i915_drop_caches_fops
);
2354 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
2356 &i915_error_state_fops
);
2360 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
2362 &i915_next_seqno_fops
);
2366 return drm_debugfs_create_files(i915_debugfs_list
,
2367 I915_DEBUGFS_ENTRIES
,
2368 minor
->debugfs_root
, minor
);
2371 void i915_debugfs_cleanup(struct drm_minor
*minor
)
2373 drm_debugfs_remove_files(i915_debugfs_list
,
2374 I915_DEBUGFS_ENTRIES
, minor
);
2375 drm_debugfs_remove_files((struct drm_info_list
*) &i915_forcewake_fops
,
2377 drm_debugfs_remove_files((struct drm_info_list
*) &i915_wedged_fops
,
2379 drm_debugfs_remove_files((struct drm_info_list
*) &i915_max_freq_fops
,
2381 drm_debugfs_remove_files((struct drm_info_list
*) &i915_min_freq_fops
,
2383 drm_debugfs_remove_files((struct drm_info_list
*) &i915_cache_sharing_fops
,
2385 drm_debugfs_remove_files((struct drm_info_list
*) &i915_drop_caches_fops
,
2387 drm_debugfs_remove_files((struct drm_info_list
*) &i915_ring_stop_fops
,
2389 drm_debugfs_remove_files((struct drm_info_list
*) &i915_error_state_fops
,
2391 drm_debugfs_remove_files((struct drm_info_list
*) &i915_next_seqno_fops
,
2395 #endif /* CONFIG_DEBUG_FS */