Merge branch 'gma500-next' of git://github.com/patjak/drm-gma500 into drm-next
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/list_sort.h>
34 #include <drm/drmP.h>
35 #include "intel_drv.h"
36 #include "intel_ringbuffer.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39
40 #define DRM_I915_RING_DEBUG 1
41
42
43 #if defined(CONFIG_DEBUG_FS)
44
45 enum {
46 ACTIVE_LIST,
47 INACTIVE_LIST,
48 PINNED_LIST,
49 };
50
51 static const char *yesno(int v)
52 {
53 return v ? "yes" : "no";
54 }
55
56 static int i915_capabilities(struct seq_file *m, void *data)
57 {
58 struct drm_info_node *node = (struct drm_info_node *) m->private;
59 struct drm_device *dev = node->minor->dev;
60 const struct intel_device_info *info = INTEL_INFO(dev);
61
62 seq_printf(m, "gen: %d\n", info->gen);
63 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
64 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65 #define SEP_SEMICOLON ;
66 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
67 #undef PRINT_FLAG
68 #undef SEP_SEMICOLON
69
70 return 0;
71 }
72
73 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
74 {
75 if (obj->user_pin_count > 0)
76 return "P";
77 else if (obj->pin_count > 0)
78 return "p";
79 else
80 return " ";
81 }
82
83 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
84 {
85 switch (obj->tiling_mode) {
86 default:
87 case I915_TILING_NONE: return " ";
88 case I915_TILING_X: return "X";
89 case I915_TILING_Y: return "Y";
90 }
91 }
92
93 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
94 {
95 return obj->has_global_gtt_mapping ? "g" : " ";
96 }
97
98 static void
99 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
100 {
101 struct i915_vma *vma;
102 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
103 &obj->base,
104 get_pin_flag(obj),
105 get_tiling_flag(obj),
106 get_global_flag(obj),
107 obj->base.size / 1024,
108 obj->base.read_domains,
109 obj->base.write_domain,
110 obj->last_read_seqno,
111 obj->last_write_seqno,
112 obj->last_fenced_seqno,
113 i915_cache_level_str(obj->cache_level),
114 obj->dirty ? " dirty" : "",
115 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
116 if (obj->base.name)
117 seq_printf(m, " (name: %d)", obj->base.name);
118 if (obj->pin_count)
119 seq_printf(m, " (pinned x %d)", obj->pin_count);
120 if (obj->fence_reg != I915_FENCE_REG_NONE)
121 seq_printf(m, " (fence: %d)", obj->fence_reg);
122 list_for_each_entry(vma, &obj->vma_list, vma_link) {
123 if (!i915_is_ggtt(vma->vm))
124 seq_puts(m, " (pp");
125 else
126 seq_puts(m, " (g");
127 seq_printf(m, "gtt offset: %08lx, size: %08lx)",
128 vma->node.start, vma->node.size);
129 }
130 if (obj->stolen)
131 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
132 if (obj->pin_mappable || obj->fault_mappable) {
133 char s[3], *t = s;
134 if (obj->pin_mappable)
135 *t++ = 'p';
136 if (obj->fault_mappable)
137 *t++ = 'f';
138 *t = '\0';
139 seq_printf(m, " (%s mappable)", s);
140 }
141 if (obj->ring != NULL)
142 seq_printf(m, " (%s)", obj->ring->name);
143 }
144
145 static int i915_gem_object_list_info(struct seq_file *m, void *data)
146 {
147 struct drm_info_node *node = (struct drm_info_node *) m->private;
148 uintptr_t list = (uintptr_t) node->info_ent->data;
149 struct list_head *head;
150 struct drm_device *dev = node->minor->dev;
151 struct drm_i915_private *dev_priv = dev->dev_private;
152 struct i915_address_space *vm = &dev_priv->gtt.base;
153 struct i915_vma *vma;
154 size_t total_obj_size, total_gtt_size;
155 int count, ret;
156
157 ret = mutex_lock_interruptible(&dev->struct_mutex);
158 if (ret)
159 return ret;
160
161 /* FIXME: the user of this interface might want more than just GGTT */
162 switch (list) {
163 case ACTIVE_LIST:
164 seq_puts(m, "Active:\n");
165 head = &vm->active_list;
166 break;
167 case INACTIVE_LIST:
168 seq_puts(m, "Inactive:\n");
169 head = &vm->inactive_list;
170 break;
171 default:
172 mutex_unlock(&dev->struct_mutex);
173 return -EINVAL;
174 }
175
176 total_obj_size = total_gtt_size = count = 0;
177 list_for_each_entry(vma, head, mm_list) {
178 seq_printf(m, " ");
179 describe_obj(m, vma->obj);
180 seq_printf(m, "\n");
181 total_obj_size += vma->obj->base.size;
182 total_gtt_size += vma->node.size;
183 count++;
184 }
185 mutex_unlock(&dev->struct_mutex);
186
187 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
188 count, total_obj_size, total_gtt_size);
189 return 0;
190 }
191
192 static int obj_rank_by_stolen(void *priv,
193 struct list_head *A, struct list_head *B)
194 {
195 struct drm_i915_gem_object *a =
196 container_of(A, struct drm_i915_gem_object, exec_list);
197 struct drm_i915_gem_object *b =
198 container_of(B, struct drm_i915_gem_object, exec_list);
199
200 return a->stolen->start - b->stolen->start;
201 }
202
203 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
204 {
205 struct drm_info_node *node = (struct drm_info_node *) m->private;
206 struct drm_device *dev = node->minor->dev;
207 struct drm_i915_private *dev_priv = dev->dev_private;
208 struct drm_i915_gem_object *obj;
209 size_t total_obj_size, total_gtt_size;
210 LIST_HEAD(stolen);
211 int count, ret;
212
213 ret = mutex_lock_interruptible(&dev->struct_mutex);
214 if (ret)
215 return ret;
216
217 total_obj_size = total_gtt_size = count = 0;
218 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
219 if (obj->stolen == NULL)
220 continue;
221
222 list_add(&obj->exec_list, &stolen);
223
224 total_obj_size += obj->base.size;
225 total_gtt_size += i915_gem_obj_ggtt_size(obj);
226 count++;
227 }
228 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
229 if (obj->stolen == NULL)
230 continue;
231
232 list_add(&obj->exec_list, &stolen);
233
234 total_obj_size += obj->base.size;
235 count++;
236 }
237 list_sort(NULL, &stolen, obj_rank_by_stolen);
238 seq_puts(m, "Stolen:\n");
239 while (!list_empty(&stolen)) {
240 obj = list_first_entry(&stolen, typeof(*obj), exec_list);
241 seq_puts(m, " ");
242 describe_obj(m, obj);
243 seq_putc(m, '\n');
244 list_del_init(&obj->exec_list);
245 }
246 mutex_unlock(&dev->struct_mutex);
247
248 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
249 count, total_obj_size, total_gtt_size);
250 return 0;
251 }
252
253 #define count_objects(list, member) do { \
254 list_for_each_entry(obj, list, member) { \
255 size += i915_gem_obj_ggtt_size(obj); \
256 ++count; \
257 if (obj->map_and_fenceable) { \
258 mappable_size += i915_gem_obj_ggtt_size(obj); \
259 ++mappable_count; \
260 } \
261 } \
262 } while (0)
263
264 struct file_stats {
265 int count;
266 size_t total, active, inactive, unbound;
267 };
268
269 static int per_file_stats(int id, void *ptr, void *data)
270 {
271 struct drm_i915_gem_object *obj = ptr;
272 struct file_stats *stats = data;
273
274 stats->count++;
275 stats->total += obj->base.size;
276
277 if (i915_gem_obj_ggtt_bound(obj)) {
278 if (!list_empty(&obj->ring_list))
279 stats->active += obj->base.size;
280 else
281 stats->inactive += obj->base.size;
282 } else {
283 if (!list_empty(&obj->global_list))
284 stats->unbound += obj->base.size;
285 }
286
287 return 0;
288 }
289
290 #define count_vmas(list, member) do { \
291 list_for_each_entry(vma, list, member) { \
292 size += i915_gem_obj_ggtt_size(vma->obj); \
293 ++count; \
294 if (vma->obj->map_and_fenceable) { \
295 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
296 ++mappable_count; \
297 } \
298 } \
299 } while (0)
300
301 static int i915_gem_object_info(struct seq_file *m, void* data)
302 {
303 struct drm_info_node *node = (struct drm_info_node *) m->private;
304 struct drm_device *dev = node->minor->dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 u32 count, mappable_count, purgeable_count;
307 size_t size, mappable_size, purgeable_size;
308 struct drm_i915_gem_object *obj;
309 struct i915_address_space *vm = &dev_priv->gtt.base;
310 struct drm_file *file;
311 struct i915_vma *vma;
312 int ret;
313
314 ret = mutex_lock_interruptible(&dev->struct_mutex);
315 if (ret)
316 return ret;
317
318 seq_printf(m, "%u objects, %zu bytes\n",
319 dev_priv->mm.object_count,
320 dev_priv->mm.object_memory);
321
322 size = count = mappable_size = mappable_count = 0;
323 count_objects(&dev_priv->mm.bound_list, global_list);
324 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
325 count, mappable_count, size, mappable_size);
326
327 size = count = mappable_size = mappable_count = 0;
328 count_vmas(&vm->active_list, mm_list);
329 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
330 count, mappable_count, size, mappable_size);
331
332 size = count = mappable_size = mappable_count = 0;
333 count_vmas(&vm->inactive_list, mm_list);
334 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
335 count, mappable_count, size, mappable_size);
336
337 size = count = purgeable_size = purgeable_count = 0;
338 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
339 size += obj->base.size, ++count;
340 if (obj->madv == I915_MADV_DONTNEED)
341 purgeable_size += obj->base.size, ++purgeable_count;
342 }
343 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
344
345 size = count = mappable_size = mappable_count = 0;
346 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
347 if (obj->fault_mappable) {
348 size += i915_gem_obj_ggtt_size(obj);
349 ++count;
350 }
351 if (obj->pin_mappable) {
352 mappable_size += i915_gem_obj_ggtt_size(obj);
353 ++mappable_count;
354 }
355 if (obj->madv == I915_MADV_DONTNEED) {
356 purgeable_size += obj->base.size;
357 ++purgeable_count;
358 }
359 }
360 seq_printf(m, "%u purgeable objects, %zu bytes\n",
361 purgeable_count, purgeable_size);
362 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
363 mappable_count, mappable_size);
364 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
365 count, size);
366
367 seq_printf(m, "%zu [%lu] gtt total\n",
368 dev_priv->gtt.base.total,
369 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
370
371 seq_putc(m, '\n');
372 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
373 struct file_stats stats;
374
375 memset(&stats, 0, sizeof(stats));
376 idr_for_each(&file->object_idr, per_file_stats, &stats);
377 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
378 get_pid_task(file->pid, PIDTYPE_PID)->comm,
379 stats.count,
380 stats.total,
381 stats.active,
382 stats.inactive,
383 stats.unbound);
384 }
385
386 mutex_unlock(&dev->struct_mutex);
387
388 return 0;
389 }
390
391 static int i915_gem_gtt_info(struct seq_file *m, void *data)
392 {
393 struct drm_info_node *node = (struct drm_info_node *) m->private;
394 struct drm_device *dev = node->minor->dev;
395 uintptr_t list = (uintptr_t) node->info_ent->data;
396 struct drm_i915_private *dev_priv = dev->dev_private;
397 struct drm_i915_gem_object *obj;
398 size_t total_obj_size, total_gtt_size;
399 int count, ret;
400
401 ret = mutex_lock_interruptible(&dev->struct_mutex);
402 if (ret)
403 return ret;
404
405 total_obj_size = total_gtt_size = count = 0;
406 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
407 if (list == PINNED_LIST && obj->pin_count == 0)
408 continue;
409
410 seq_puts(m, " ");
411 describe_obj(m, obj);
412 seq_putc(m, '\n');
413 total_obj_size += obj->base.size;
414 total_gtt_size += i915_gem_obj_ggtt_size(obj);
415 count++;
416 }
417
418 mutex_unlock(&dev->struct_mutex);
419
420 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
421 count, total_obj_size, total_gtt_size);
422
423 return 0;
424 }
425
426 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
427 {
428 struct drm_info_node *node = (struct drm_info_node *) m->private;
429 struct drm_device *dev = node->minor->dev;
430 unsigned long flags;
431 struct intel_crtc *crtc;
432
433 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
434 const char pipe = pipe_name(crtc->pipe);
435 const char plane = plane_name(crtc->plane);
436 struct intel_unpin_work *work;
437
438 spin_lock_irqsave(&dev->event_lock, flags);
439 work = crtc->unpin_work;
440 if (work == NULL) {
441 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
442 pipe, plane);
443 } else {
444 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
445 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
446 pipe, plane);
447 } else {
448 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
449 pipe, plane);
450 }
451 if (work->enable_stall_check)
452 seq_puts(m, "Stall check enabled, ");
453 else
454 seq_puts(m, "Stall check waiting for page flip ioctl, ");
455 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
456
457 if (work->old_fb_obj) {
458 struct drm_i915_gem_object *obj = work->old_fb_obj;
459 if (obj)
460 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
461 i915_gem_obj_ggtt_offset(obj));
462 }
463 if (work->pending_flip_obj) {
464 struct drm_i915_gem_object *obj = work->pending_flip_obj;
465 if (obj)
466 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
467 i915_gem_obj_ggtt_offset(obj));
468 }
469 }
470 spin_unlock_irqrestore(&dev->event_lock, flags);
471 }
472
473 return 0;
474 }
475
476 static int i915_gem_request_info(struct seq_file *m, void *data)
477 {
478 struct drm_info_node *node = (struct drm_info_node *) m->private;
479 struct drm_device *dev = node->minor->dev;
480 drm_i915_private_t *dev_priv = dev->dev_private;
481 struct intel_ring_buffer *ring;
482 struct drm_i915_gem_request *gem_request;
483 int ret, count, i;
484
485 ret = mutex_lock_interruptible(&dev->struct_mutex);
486 if (ret)
487 return ret;
488
489 count = 0;
490 for_each_ring(ring, dev_priv, i) {
491 if (list_empty(&ring->request_list))
492 continue;
493
494 seq_printf(m, "%s requests:\n", ring->name);
495 list_for_each_entry(gem_request,
496 &ring->request_list,
497 list) {
498 seq_printf(m, " %d @ %d\n",
499 gem_request->seqno,
500 (int) (jiffies - gem_request->emitted_jiffies));
501 }
502 count++;
503 }
504 mutex_unlock(&dev->struct_mutex);
505
506 if (count == 0)
507 seq_puts(m, "No requests\n");
508
509 return 0;
510 }
511
512 static void i915_ring_seqno_info(struct seq_file *m,
513 struct intel_ring_buffer *ring)
514 {
515 if (ring->get_seqno) {
516 seq_printf(m, "Current sequence (%s): %u\n",
517 ring->name, ring->get_seqno(ring, false));
518 }
519 }
520
521 static int i915_gem_seqno_info(struct seq_file *m, void *data)
522 {
523 struct drm_info_node *node = (struct drm_info_node *) m->private;
524 struct drm_device *dev = node->minor->dev;
525 drm_i915_private_t *dev_priv = dev->dev_private;
526 struct intel_ring_buffer *ring;
527 int ret, i;
528
529 ret = mutex_lock_interruptible(&dev->struct_mutex);
530 if (ret)
531 return ret;
532
533 for_each_ring(ring, dev_priv, i)
534 i915_ring_seqno_info(m, ring);
535
536 mutex_unlock(&dev->struct_mutex);
537
538 return 0;
539 }
540
541
542 static int i915_interrupt_info(struct seq_file *m, void *data)
543 {
544 struct drm_info_node *node = (struct drm_info_node *) m->private;
545 struct drm_device *dev = node->minor->dev;
546 drm_i915_private_t *dev_priv = dev->dev_private;
547 struct intel_ring_buffer *ring;
548 int ret, i, pipe;
549
550 ret = mutex_lock_interruptible(&dev->struct_mutex);
551 if (ret)
552 return ret;
553
554 if (IS_VALLEYVIEW(dev)) {
555 seq_printf(m, "Display IER:\t%08x\n",
556 I915_READ(VLV_IER));
557 seq_printf(m, "Display IIR:\t%08x\n",
558 I915_READ(VLV_IIR));
559 seq_printf(m, "Display IIR_RW:\t%08x\n",
560 I915_READ(VLV_IIR_RW));
561 seq_printf(m, "Display IMR:\t%08x\n",
562 I915_READ(VLV_IMR));
563 for_each_pipe(pipe)
564 seq_printf(m, "Pipe %c stat:\t%08x\n",
565 pipe_name(pipe),
566 I915_READ(PIPESTAT(pipe)));
567
568 seq_printf(m, "Master IER:\t%08x\n",
569 I915_READ(VLV_MASTER_IER));
570
571 seq_printf(m, "Render IER:\t%08x\n",
572 I915_READ(GTIER));
573 seq_printf(m, "Render IIR:\t%08x\n",
574 I915_READ(GTIIR));
575 seq_printf(m, "Render IMR:\t%08x\n",
576 I915_READ(GTIMR));
577
578 seq_printf(m, "PM IER:\t\t%08x\n",
579 I915_READ(GEN6_PMIER));
580 seq_printf(m, "PM IIR:\t\t%08x\n",
581 I915_READ(GEN6_PMIIR));
582 seq_printf(m, "PM IMR:\t\t%08x\n",
583 I915_READ(GEN6_PMIMR));
584
585 seq_printf(m, "Port hotplug:\t%08x\n",
586 I915_READ(PORT_HOTPLUG_EN));
587 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
588 I915_READ(VLV_DPFLIPSTAT));
589 seq_printf(m, "DPINVGTT:\t%08x\n",
590 I915_READ(DPINVGTT));
591
592 } else if (!HAS_PCH_SPLIT(dev)) {
593 seq_printf(m, "Interrupt enable: %08x\n",
594 I915_READ(IER));
595 seq_printf(m, "Interrupt identity: %08x\n",
596 I915_READ(IIR));
597 seq_printf(m, "Interrupt mask: %08x\n",
598 I915_READ(IMR));
599 for_each_pipe(pipe)
600 seq_printf(m, "Pipe %c stat: %08x\n",
601 pipe_name(pipe),
602 I915_READ(PIPESTAT(pipe)));
603 } else {
604 seq_printf(m, "North Display Interrupt enable: %08x\n",
605 I915_READ(DEIER));
606 seq_printf(m, "North Display Interrupt identity: %08x\n",
607 I915_READ(DEIIR));
608 seq_printf(m, "North Display Interrupt mask: %08x\n",
609 I915_READ(DEIMR));
610 seq_printf(m, "South Display Interrupt enable: %08x\n",
611 I915_READ(SDEIER));
612 seq_printf(m, "South Display Interrupt identity: %08x\n",
613 I915_READ(SDEIIR));
614 seq_printf(m, "South Display Interrupt mask: %08x\n",
615 I915_READ(SDEIMR));
616 seq_printf(m, "Graphics Interrupt enable: %08x\n",
617 I915_READ(GTIER));
618 seq_printf(m, "Graphics Interrupt identity: %08x\n",
619 I915_READ(GTIIR));
620 seq_printf(m, "Graphics Interrupt mask: %08x\n",
621 I915_READ(GTIMR));
622 }
623 seq_printf(m, "Interrupts received: %d\n",
624 atomic_read(&dev_priv->irq_received));
625 for_each_ring(ring, dev_priv, i) {
626 if (IS_GEN6(dev) || IS_GEN7(dev)) {
627 seq_printf(m,
628 "Graphics Interrupt mask (%s): %08x\n",
629 ring->name, I915_READ_IMR(ring));
630 }
631 i915_ring_seqno_info(m, ring);
632 }
633 mutex_unlock(&dev->struct_mutex);
634
635 return 0;
636 }
637
638 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
639 {
640 struct drm_info_node *node = (struct drm_info_node *) m->private;
641 struct drm_device *dev = node->minor->dev;
642 drm_i915_private_t *dev_priv = dev->dev_private;
643 int i, ret;
644
645 ret = mutex_lock_interruptible(&dev->struct_mutex);
646 if (ret)
647 return ret;
648
649 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
650 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
651 for (i = 0; i < dev_priv->num_fence_regs; i++) {
652 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
653
654 seq_printf(m, "Fence %d, pin count = %d, object = ",
655 i, dev_priv->fence_regs[i].pin_count);
656 if (obj == NULL)
657 seq_puts(m, "unused");
658 else
659 describe_obj(m, obj);
660 seq_putc(m, '\n');
661 }
662
663 mutex_unlock(&dev->struct_mutex);
664 return 0;
665 }
666
667 static int i915_hws_info(struct seq_file *m, void *data)
668 {
669 struct drm_info_node *node = (struct drm_info_node *) m->private;
670 struct drm_device *dev = node->minor->dev;
671 drm_i915_private_t *dev_priv = dev->dev_private;
672 struct intel_ring_buffer *ring;
673 const u32 *hws;
674 int i;
675
676 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
677 hws = ring->status_page.page_addr;
678 if (hws == NULL)
679 return 0;
680
681 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
682 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
683 i * 4,
684 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
685 }
686 return 0;
687 }
688
689 static ssize_t
690 i915_error_state_write(struct file *filp,
691 const char __user *ubuf,
692 size_t cnt,
693 loff_t *ppos)
694 {
695 struct i915_error_state_file_priv *error_priv = filp->private_data;
696 struct drm_device *dev = error_priv->dev;
697 int ret;
698
699 DRM_DEBUG_DRIVER("Resetting error state\n");
700
701 ret = mutex_lock_interruptible(&dev->struct_mutex);
702 if (ret)
703 return ret;
704
705 i915_destroy_error_state(dev);
706 mutex_unlock(&dev->struct_mutex);
707
708 return cnt;
709 }
710
711 static int i915_error_state_open(struct inode *inode, struct file *file)
712 {
713 struct drm_device *dev = inode->i_private;
714 struct i915_error_state_file_priv *error_priv;
715
716 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
717 if (!error_priv)
718 return -ENOMEM;
719
720 error_priv->dev = dev;
721
722 i915_error_state_get(dev, error_priv);
723
724 file->private_data = error_priv;
725
726 return 0;
727 }
728
729 static int i915_error_state_release(struct inode *inode, struct file *file)
730 {
731 struct i915_error_state_file_priv *error_priv = file->private_data;
732
733 i915_error_state_put(error_priv);
734 kfree(error_priv);
735
736 return 0;
737 }
738
739 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
740 size_t count, loff_t *pos)
741 {
742 struct i915_error_state_file_priv *error_priv = file->private_data;
743 struct drm_i915_error_state_buf error_str;
744 loff_t tmp_pos = 0;
745 ssize_t ret_count = 0;
746 int ret;
747
748 ret = i915_error_state_buf_init(&error_str, count, *pos);
749 if (ret)
750 return ret;
751
752 ret = i915_error_state_to_str(&error_str, error_priv);
753 if (ret)
754 goto out;
755
756 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
757 error_str.buf,
758 error_str.bytes);
759
760 if (ret_count < 0)
761 ret = ret_count;
762 else
763 *pos = error_str.start + ret_count;
764 out:
765 i915_error_state_buf_release(&error_str);
766 return ret ?: ret_count;
767 }
768
769 static const struct file_operations i915_error_state_fops = {
770 .owner = THIS_MODULE,
771 .open = i915_error_state_open,
772 .read = i915_error_state_read,
773 .write = i915_error_state_write,
774 .llseek = default_llseek,
775 .release = i915_error_state_release,
776 };
777
778 static int
779 i915_next_seqno_get(void *data, u64 *val)
780 {
781 struct drm_device *dev = data;
782 drm_i915_private_t *dev_priv = dev->dev_private;
783 int ret;
784
785 ret = mutex_lock_interruptible(&dev->struct_mutex);
786 if (ret)
787 return ret;
788
789 *val = dev_priv->next_seqno;
790 mutex_unlock(&dev->struct_mutex);
791
792 return 0;
793 }
794
795 static int
796 i915_next_seqno_set(void *data, u64 val)
797 {
798 struct drm_device *dev = data;
799 int ret;
800
801 ret = mutex_lock_interruptible(&dev->struct_mutex);
802 if (ret)
803 return ret;
804
805 ret = i915_gem_set_seqno(dev, val);
806 mutex_unlock(&dev->struct_mutex);
807
808 return ret;
809 }
810
811 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
812 i915_next_seqno_get, i915_next_seqno_set,
813 "0x%llx\n");
814
815 static int i915_rstdby_delays(struct seq_file *m, void *unused)
816 {
817 struct drm_info_node *node = (struct drm_info_node *) m->private;
818 struct drm_device *dev = node->minor->dev;
819 drm_i915_private_t *dev_priv = dev->dev_private;
820 u16 crstanddelay;
821 int ret;
822
823 ret = mutex_lock_interruptible(&dev->struct_mutex);
824 if (ret)
825 return ret;
826
827 crstanddelay = I915_READ16(CRSTANDVID);
828
829 mutex_unlock(&dev->struct_mutex);
830
831 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
832
833 return 0;
834 }
835
836 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
837 {
838 struct drm_info_node *node = (struct drm_info_node *) m->private;
839 struct drm_device *dev = node->minor->dev;
840 drm_i915_private_t *dev_priv = dev->dev_private;
841 int ret;
842
843 if (IS_GEN5(dev)) {
844 u16 rgvswctl = I915_READ16(MEMSWCTL);
845 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
846
847 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
848 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
849 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
850 MEMSTAT_VID_SHIFT);
851 seq_printf(m, "Current P-state: %d\n",
852 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
853 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
854 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
855 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
856 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
857 u32 rpstat, cagf;
858 u32 rpupei, rpcurup, rpprevup;
859 u32 rpdownei, rpcurdown, rpprevdown;
860 int max_freq;
861
862 /* RPSTAT1 is in the GT power well */
863 ret = mutex_lock_interruptible(&dev->struct_mutex);
864 if (ret)
865 return ret;
866
867 gen6_gt_force_wake_get(dev_priv);
868
869 rpstat = I915_READ(GEN6_RPSTAT1);
870 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
871 rpcurup = I915_READ(GEN6_RP_CUR_UP);
872 rpprevup = I915_READ(GEN6_RP_PREV_UP);
873 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
874 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
875 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
876 if (IS_HASWELL(dev))
877 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
878 else
879 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
880 cagf *= GT_FREQUENCY_MULTIPLIER;
881
882 gen6_gt_force_wake_put(dev_priv);
883 mutex_unlock(&dev->struct_mutex);
884
885 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
886 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
887 seq_printf(m, "Render p-state ratio: %d\n",
888 (gt_perf_status & 0xff00) >> 8);
889 seq_printf(m, "Render p-state VID: %d\n",
890 gt_perf_status & 0xff);
891 seq_printf(m, "Render p-state limit: %d\n",
892 rp_state_limits & 0xff);
893 seq_printf(m, "CAGF: %dMHz\n", cagf);
894 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
895 GEN6_CURICONT_MASK);
896 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
897 GEN6_CURBSYTAVG_MASK);
898 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
899 GEN6_CURBSYTAVG_MASK);
900 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
901 GEN6_CURIAVG_MASK);
902 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
903 GEN6_CURBSYTAVG_MASK);
904 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
905 GEN6_CURBSYTAVG_MASK);
906
907 max_freq = (rp_state_cap & 0xff0000) >> 16;
908 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
909 max_freq * GT_FREQUENCY_MULTIPLIER);
910
911 max_freq = (rp_state_cap & 0xff00) >> 8;
912 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
913 max_freq * GT_FREQUENCY_MULTIPLIER);
914
915 max_freq = rp_state_cap & 0xff;
916 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
917 max_freq * GT_FREQUENCY_MULTIPLIER);
918
919 seq_printf(m, "Max overclocked frequency: %dMHz\n",
920 dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
921 } else if (IS_VALLEYVIEW(dev)) {
922 u32 freq_sts, val;
923
924 mutex_lock(&dev_priv->rps.hw_lock);
925 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
926 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
927 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
928
929 val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1);
930 seq_printf(m, "max GPU freq: %d MHz\n",
931 vlv_gpu_freq(dev_priv->mem_freq, val));
932
933 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM);
934 seq_printf(m, "min GPU freq: %d MHz\n",
935 vlv_gpu_freq(dev_priv->mem_freq, val));
936
937 seq_printf(m, "current GPU freq: %d MHz\n",
938 vlv_gpu_freq(dev_priv->mem_freq,
939 (freq_sts >> 8) & 0xff));
940 mutex_unlock(&dev_priv->rps.hw_lock);
941 } else {
942 seq_puts(m, "no P-state info available\n");
943 }
944
945 return 0;
946 }
947
948 static int i915_delayfreq_table(struct seq_file *m, void *unused)
949 {
950 struct drm_info_node *node = (struct drm_info_node *) m->private;
951 struct drm_device *dev = node->minor->dev;
952 drm_i915_private_t *dev_priv = dev->dev_private;
953 u32 delayfreq;
954 int ret, i;
955
956 ret = mutex_lock_interruptible(&dev->struct_mutex);
957 if (ret)
958 return ret;
959
960 for (i = 0; i < 16; i++) {
961 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
962 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
963 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
964 }
965
966 mutex_unlock(&dev->struct_mutex);
967
968 return 0;
969 }
970
971 static inline int MAP_TO_MV(int map)
972 {
973 return 1250 - (map * 25);
974 }
975
976 static int i915_inttoext_table(struct seq_file *m, void *unused)
977 {
978 struct drm_info_node *node = (struct drm_info_node *) m->private;
979 struct drm_device *dev = node->minor->dev;
980 drm_i915_private_t *dev_priv = dev->dev_private;
981 u32 inttoext;
982 int ret, i;
983
984 ret = mutex_lock_interruptible(&dev->struct_mutex);
985 if (ret)
986 return ret;
987
988 for (i = 1; i <= 32; i++) {
989 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
990 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
991 }
992
993 mutex_unlock(&dev->struct_mutex);
994
995 return 0;
996 }
997
998 static int ironlake_drpc_info(struct seq_file *m)
999 {
1000 struct drm_info_node *node = (struct drm_info_node *) m->private;
1001 struct drm_device *dev = node->minor->dev;
1002 drm_i915_private_t *dev_priv = dev->dev_private;
1003 u32 rgvmodectl, rstdbyctl;
1004 u16 crstandvid;
1005 int ret;
1006
1007 ret = mutex_lock_interruptible(&dev->struct_mutex);
1008 if (ret)
1009 return ret;
1010
1011 rgvmodectl = I915_READ(MEMMODECTL);
1012 rstdbyctl = I915_READ(RSTDBYCTL);
1013 crstandvid = I915_READ16(CRSTANDVID);
1014
1015 mutex_unlock(&dev->struct_mutex);
1016
1017 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1018 "yes" : "no");
1019 seq_printf(m, "Boost freq: %d\n",
1020 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1021 MEMMODE_BOOST_FREQ_SHIFT);
1022 seq_printf(m, "HW control enabled: %s\n",
1023 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1024 seq_printf(m, "SW control enabled: %s\n",
1025 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1026 seq_printf(m, "Gated voltage change: %s\n",
1027 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1028 seq_printf(m, "Starting frequency: P%d\n",
1029 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1030 seq_printf(m, "Max P-state: P%d\n",
1031 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1032 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1033 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1034 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1035 seq_printf(m, "Render standby enabled: %s\n",
1036 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1037 seq_puts(m, "Current RS state: ");
1038 switch (rstdbyctl & RSX_STATUS_MASK) {
1039 case RSX_STATUS_ON:
1040 seq_puts(m, "on\n");
1041 break;
1042 case RSX_STATUS_RC1:
1043 seq_puts(m, "RC1\n");
1044 break;
1045 case RSX_STATUS_RC1E:
1046 seq_puts(m, "RC1E\n");
1047 break;
1048 case RSX_STATUS_RS1:
1049 seq_puts(m, "RS1\n");
1050 break;
1051 case RSX_STATUS_RS2:
1052 seq_puts(m, "RS2 (RC6)\n");
1053 break;
1054 case RSX_STATUS_RS3:
1055 seq_puts(m, "RC3 (RC6+)\n");
1056 break;
1057 default:
1058 seq_puts(m, "unknown\n");
1059 break;
1060 }
1061
1062 return 0;
1063 }
1064
1065 static int gen6_drpc_info(struct seq_file *m)
1066 {
1067
1068 struct drm_info_node *node = (struct drm_info_node *) m->private;
1069 struct drm_device *dev = node->minor->dev;
1070 struct drm_i915_private *dev_priv = dev->dev_private;
1071 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1072 unsigned forcewake_count;
1073 int count = 0, ret;
1074
1075 ret = mutex_lock_interruptible(&dev->struct_mutex);
1076 if (ret)
1077 return ret;
1078
1079 spin_lock_irq(&dev_priv->uncore.lock);
1080 forcewake_count = dev_priv->uncore.forcewake_count;
1081 spin_unlock_irq(&dev_priv->uncore.lock);
1082
1083 if (forcewake_count) {
1084 seq_puts(m, "RC information inaccurate because somebody "
1085 "holds a forcewake reference \n");
1086 } else {
1087 /* NB: we cannot use forcewake, else we read the wrong values */
1088 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1089 udelay(10);
1090 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1091 }
1092
1093 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1094 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1095
1096 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1097 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1098 mutex_unlock(&dev->struct_mutex);
1099 mutex_lock(&dev_priv->rps.hw_lock);
1100 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1101 mutex_unlock(&dev_priv->rps.hw_lock);
1102
1103 seq_printf(m, "Video Turbo Mode: %s\n",
1104 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1105 seq_printf(m, "HW control enabled: %s\n",
1106 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1107 seq_printf(m, "SW control enabled: %s\n",
1108 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1109 GEN6_RP_MEDIA_SW_MODE));
1110 seq_printf(m, "RC1e Enabled: %s\n",
1111 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1112 seq_printf(m, "RC6 Enabled: %s\n",
1113 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1114 seq_printf(m, "Deep RC6 Enabled: %s\n",
1115 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1116 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1117 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1118 seq_puts(m, "Current RC state: ");
1119 switch (gt_core_status & GEN6_RCn_MASK) {
1120 case GEN6_RC0:
1121 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1122 seq_puts(m, "Core Power Down\n");
1123 else
1124 seq_puts(m, "on\n");
1125 break;
1126 case GEN6_RC3:
1127 seq_puts(m, "RC3\n");
1128 break;
1129 case GEN6_RC6:
1130 seq_puts(m, "RC6\n");
1131 break;
1132 case GEN6_RC7:
1133 seq_puts(m, "RC7\n");
1134 break;
1135 default:
1136 seq_puts(m, "Unknown\n");
1137 break;
1138 }
1139
1140 seq_printf(m, "Core Power Down: %s\n",
1141 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1142
1143 /* Not exactly sure what this is */
1144 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1145 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1146 seq_printf(m, "RC6 residency since boot: %u\n",
1147 I915_READ(GEN6_GT_GFX_RC6));
1148 seq_printf(m, "RC6+ residency since boot: %u\n",
1149 I915_READ(GEN6_GT_GFX_RC6p));
1150 seq_printf(m, "RC6++ residency since boot: %u\n",
1151 I915_READ(GEN6_GT_GFX_RC6pp));
1152
1153 seq_printf(m, "RC6 voltage: %dmV\n",
1154 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1155 seq_printf(m, "RC6+ voltage: %dmV\n",
1156 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1157 seq_printf(m, "RC6++ voltage: %dmV\n",
1158 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1159 return 0;
1160 }
1161
1162 static int i915_drpc_info(struct seq_file *m, void *unused)
1163 {
1164 struct drm_info_node *node = (struct drm_info_node *) m->private;
1165 struct drm_device *dev = node->minor->dev;
1166
1167 if (IS_GEN6(dev) || IS_GEN7(dev))
1168 return gen6_drpc_info(m);
1169 else
1170 return ironlake_drpc_info(m);
1171 }
1172
1173 static int i915_fbc_status(struct seq_file *m, void *unused)
1174 {
1175 struct drm_info_node *node = (struct drm_info_node *) m->private;
1176 struct drm_device *dev = node->minor->dev;
1177 drm_i915_private_t *dev_priv = dev->dev_private;
1178
1179 if (!I915_HAS_FBC(dev)) {
1180 seq_puts(m, "FBC unsupported on this chipset\n");
1181 return 0;
1182 }
1183
1184 if (intel_fbc_enabled(dev)) {
1185 seq_puts(m, "FBC enabled\n");
1186 } else {
1187 seq_puts(m, "FBC disabled: ");
1188 switch (dev_priv->fbc.no_fbc_reason) {
1189 case FBC_OK:
1190 seq_puts(m, "FBC actived, but currently disabled in hardware");
1191 break;
1192 case FBC_UNSUPPORTED:
1193 seq_puts(m, "unsupported by this chipset");
1194 break;
1195 case FBC_NO_OUTPUT:
1196 seq_puts(m, "no outputs");
1197 break;
1198 case FBC_STOLEN_TOO_SMALL:
1199 seq_puts(m, "not enough stolen memory");
1200 break;
1201 case FBC_UNSUPPORTED_MODE:
1202 seq_puts(m, "mode not supported");
1203 break;
1204 case FBC_MODE_TOO_LARGE:
1205 seq_puts(m, "mode too large");
1206 break;
1207 case FBC_BAD_PLANE:
1208 seq_puts(m, "FBC unsupported on plane");
1209 break;
1210 case FBC_NOT_TILED:
1211 seq_puts(m, "scanout buffer not tiled");
1212 break;
1213 case FBC_MULTIPLE_PIPES:
1214 seq_puts(m, "multiple pipes are enabled");
1215 break;
1216 case FBC_MODULE_PARAM:
1217 seq_puts(m, "disabled per module param (default off)");
1218 break;
1219 case FBC_CHIP_DEFAULT:
1220 seq_puts(m, "disabled per chip default");
1221 break;
1222 default:
1223 seq_puts(m, "unknown reason");
1224 }
1225 seq_putc(m, '\n');
1226 }
1227 return 0;
1228 }
1229
1230 static int i915_ips_status(struct seq_file *m, void *unused)
1231 {
1232 struct drm_info_node *node = (struct drm_info_node *) m->private;
1233 struct drm_device *dev = node->minor->dev;
1234 struct drm_i915_private *dev_priv = dev->dev_private;
1235
1236 if (!HAS_IPS(dev)) {
1237 seq_puts(m, "not supported\n");
1238 return 0;
1239 }
1240
1241 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1242 seq_puts(m, "enabled\n");
1243 else
1244 seq_puts(m, "disabled\n");
1245
1246 return 0;
1247 }
1248
1249 static int i915_sr_status(struct seq_file *m, void *unused)
1250 {
1251 struct drm_info_node *node = (struct drm_info_node *) m->private;
1252 struct drm_device *dev = node->minor->dev;
1253 drm_i915_private_t *dev_priv = dev->dev_private;
1254 bool sr_enabled = false;
1255
1256 if (HAS_PCH_SPLIT(dev))
1257 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1258 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1259 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1260 else if (IS_I915GM(dev))
1261 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1262 else if (IS_PINEVIEW(dev))
1263 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1264
1265 seq_printf(m, "self-refresh: %s\n",
1266 sr_enabled ? "enabled" : "disabled");
1267
1268 return 0;
1269 }
1270
1271 static int i915_emon_status(struct seq_file *m, void *unused)
1272 {
1273 struct drm_info_node *node = (struct drm_info_node *) m->private;
1274 struct drm_device *dev = node->minor->dev;
1275 drm_i915_private_t *dev_priv = dev->dev_private;
1276 unsigned long temp, chipset, gfx;
1277 int ret;
1278
1279 if (!IS_GEN5(dev))
1280 return -ENODEV;
1281
1282 ret = mutex_lock_interruptible(&dev->struct_mutex);
1283 if (ret)
1284 return ret;
1285
1286 temp = i915_mch_val(dev_priv);
1287 chipset = i915_chipset_val(dev_priv);
1288 gfx = i915_gfx_val(dev_priv);
1289 mutex_unlock(&dev->struct_mutex);
1290
1291 seq_printf(m, "GMCH temp: %ld\n", temp);
1292 seq_printf(m, "Chipset power: %ld\n", chipset);
1293 seq_printf(m, "GFX power: %ld\n", gfx);
1294 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1295
1296 return 0;
1297 }
1298
1299 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1300 {
1301 struct drm_info_node *node = (struct drm_info_node *) m->private;
1302 struct drm_device *dev = node->minor->dev;
1303 drm_i915_private_t *dev_priv = dev->dev_private;
1304 int ret;
1305 int gpu_freq, ia_freq;
1306
1307 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1308 seq_puts(m, "unsupported on this chipset\n");
1309 return 0;
1310 }
1311
1312 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1313 if (ret)
1314 return ret;
1315
1316 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1317
1318 for (gpu_freq = dev_priv->rps.min_delay;
1319 gpu_freq <= dev_priv->rps.max_delay;
1320 gpu_freq++) {
1321 ia_freq = gpu_freq;
1322 sandybridge_pcode_read(dev_priv,
1323 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1324 &ia_freq);
1325 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1326 gpu_freq * GT_FREQUENCY_MULTIPLIER,
1327 ((ia_freq >> 0) & 0xff) * 100,
1328 ((ia_freq >> 8) & 0xff) * 100);
1329 }
1330
1331 mutex_unlock(&dev_priv->rps.hw_lock);
1332
1333 return 0;
1334 }
1335
1336 static int i915_gfxec(struct seq_file *m, void *unused)
1337 {
1338 struct drm_info_node *node = (struct drm_info_node *) m->private;
1339 struct drm_device *dev = node->minor->dev;
1340 drm_i915_private_t *dev_priv = dev->dev_private;
1341 int ret;
1342
1343 ret = mutex_lock_interruptible(&dev->struct_mutex);
1344 if (ret)
1345 return ret;
1346
1347 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1348
1349 mutex_unlock(&dev->struct_mutex);
1350
1351 return 0;
1352 }
1353
1354 static int i915_opregion(struct seq_file *m, void *unused)
1355 {
1356 struct drm_info_node *node = (struct drm_info_node *) m->private;
1357 struct drm_device *dev = node->minor->dev;
1358 drm_i915_private_t *dev_priv = dev->dev_private;
1359 struct intel_opregion *opregion = &dev_priv->opregion;
1360 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1361 int ret;
1362
1363 if (data == NULL)
1364 return -ENOMEM;
1365
1366 ret = mutex_lock_interruptible(&dev->struct_mutex);
1367 if (ret)
1368 goto out;
1369
1370 if (opregion->header) {
1371 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1372 seq_write(m, data, OPREGION_SIZE);
1373 }
1374
1375 mutex_unlock(&dev->struct_mutex);
1376
1377 out:
1378 kfree(data);
1379 return 0;
1380 }
1381
1382 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1383 {
1384 struct drm_info_node *node = (struct drm_info_node *) m->private;
1385 struct drm_device *dev = node->minor->dev;
1386 drm_i915_private_t *dev_priv = dev->dev_private;
1387 struct intel_fbdev *ifbdev;
1388 struct intel_framebuffer *fb;
1389 int ret;
1390
1391 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1392 if (ret)
1393 return ret;
1394
1395 ifbdev = dev_priv->fbdev;
1396 fb = to_intel_framebuffer(ifbdev->helper.fb);
1397
1398 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1399 fb->base.width,
1400 fb->base.height,
1401 fb->base.depth,
1402 fb->base.bits_per_pixel,
1403 atomic_read(&fb->base.refcount.refcount));
1404 describe_obj(m, fb->obj);
1405 seq_putc(m, '\n');
1406 mutex_unlock(&dev->mode_config.mutex);
1407
1408 mutex_lock(&dev->mode_config.fb_lock);
1409 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1410 if (&fb->base == ifbdev->helper.fb)
1411 continue;
1412
1413 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1414 fb->base.width,
1415 fb->base.height,
1416 fb->base.depth,
1417 fb->base.bits_per_pixel,
1418 atomic_read(&fb->base.refcount.refcount));
1419 describe_obj(m, fb->obj);
1420 seq_putc(m, '\n');
1421 }
1422 mutex_unlock(&dev->mode_config.fb_lock);
1423
1424 return 0;
1425 }
1426
1427 static int i915_context_status(struct seq_file *m, void *unused)
1428 {
1429 struct drm_info_node *node = (struct drm_info_node *) m->private;
1430 struct drm_device *dev = node->minor->dev;
1431 drm_i915_private_t *dev_priv = dev->dev_private;
1432 struct intel_ring_buffer *ring;
1433 int ret, i;
1434
1435 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1436 if (ret)
1437 return ret;
1438
1439 if (dev_priv->ips.pwrctx) {
1440 seq_puts(m, "power context ");
1441 describe_obj(m, dev_priv->ips.pwrctx);
1442 seq_putc(m, '\n');
1443 }
1444
1445 if (dev_priv->ips.renderctx) {
1446 seq_puts(m, "render context ");
1447 describe_obj(m, dev_priv->ips.renderctx);
1448 seq_putc(m, '\n');
1449 }
1450
1451 for_each_ring(ring, dev_priv, i) {
1452 if (ring->default_context) {
1453 seq_printf(m, "HW default context %s ring ", ring->name);
1454 describe_obj(m, ring->default_context->obj);
1455 seq_putc(m, '\n');
1456 }
1457 }
1458
1459 mutex_unlock(&dev->mode_config.mutex);
1460
1461 return 0;
1462 }
1463
1464 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1465 {
1466 struct drm_info_node *node = (struct drm_info_node *) m->private;
1467 struct drm_device *dev = node->minor->dev;
1468 struct drm_i915_private *dev_priv = dev->dev_private;
1469 unsigned forcewake_count;
1470
1471 spin_lock_irq(&dev_priv->uncore.lock);
1472 forcewake_count = dev_priv->uncore.forcewake_count;
1473 spin_unlock_irq(&dev_priv->uncore.lock);
1474
1475 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1476
1477 return 0;
1478 }
1479
1480 static const char *swizzle_string(unsigned swizzle)
1481 {
1482 switch (swizzle) {
1483 case I915_BIT_6_SWIZZLE_NONE:
1484 return "none";
1485 case I915_BIT_6_SWIZZLE_9:
1486 return "bit9";
1487 case I915_BIT_6_SWIZZLE_9_10:
1488 return "bit9/bit10";
1489 case I915_BIT_6_SWIZZLE_9_11:
1490 return "bit9/bit11";
1491 case I915_BIT_6_SWIZZLE_9_10_11:
1492 return "bit9/bit10/bit11";
1493 case I915_BIT_6_SWIZZLE_9_17:
1494 return "bit9/bit17";
1495 case I915_BIT_6_SWIZZLE_9_10_17:
1496 return "bit9/bit10/bit17";
1497 case I915_BIT_6_SWIZZLE_UNKNOWN:
1498 return "unknown";
1499 }
1500
1501 return "bug";
1502 }
1503
1504 static int i915_swizzle_info(struct seq_file *m, void *data)
1505 {
1506 struct drm_info_node *node = (struct drm_info_node *) m->private;
1507 struct drm_device *dev = node->minor->dev;
1508 struct drm_i915_private *dev_priv = dev->dev_private;
1509 int ret;
1510
1511 ret = mutex_lock_interruptible(&dev->struct_mutex);
1512 if (ret)
1513 return ret;
1514
1515 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1516 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1517 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1518 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1519
1520 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1521 seq_printf(m, "DDC = 0x%08x\n",
1522 I915_READ(DCC));
1523 seq_printf(m, "C0DRB3 = 0x%04x\n",
1524 I915_READ16(C0DRB3));
1525 seq_printf(m, "C1DRB3 = 0x%04x\n",
1526 I915_READ16(C1DRB3));
1527 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
1528 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1529 I915_READ(MAD_DIMM_C0));
1530 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1531 I915_READ(MAD_DIMM_C1));
1532 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1533 I915_READ(MAD_DIMM_C2));
1534 seq_printf(m, "TILECTL = 0x%08x\n",
1535 I915_READ(TILECTL));
1536 seq_printf(m, "ARB_MODE = 0x%08x\n",
1537 I915_READ(ARB_MODE));
1538 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1539 I915_READ(DISP_ARB_CTL));
1540 }
1541 mutex_unlock(&dev->struct_mutex);
1542
1543 return 0;
1544 }
1545
1546 static int i915_ppgtt_info(struct seq_file *m, void *data)
1547 {
1548 struct drm_info_node *node = (struct drm_info_node *) m->private;
1549 struct drm_device *dev = node->minor->dev;
1550 struct drm_i915_private *dev_priv = dev->dev_private;
1551 struct intel_ring_buffer *ring;
1552 int i, ret;
1553
1554
1555 ret = mutex_lock_interruptible(&dev->struct_mutex);
1556 if (ret)
1557 return ret;
1558 if (INTEL_INFO(dev)->gen == 6)
1559 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1560
1561 for_each_ring(ring, dev_priv, i) {
1562 seq_printf(m, "%s\n", ring->name);
1563 if (INTEL_INFO(dev)->gen == 7)
1564 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1565 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1566 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1567 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1568 }
1569 if (dev_priv->mm.aliasing_ppgtt) {
1570 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1571
1572 seq_puts(m, "aliasing PPGTT:\n");
1573 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1574 }
1575 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1576 mutex_unlock(&dev->struct_mutex);
1577
1578 return 0;
1579 }
1580
1581 static int i915_dpio_info(struct seq_file *m, void *data)
1582 {
1583 struct drm_info_node *node = (struct drm_info_node *) m->private;
1584 struct drm_device *dev = node->minor->dev;
1585 struct drm_i915_private *dev_priv = dev->dev_private;
1586 int ret;
1587
1588
1589 if (!IS_VALLEYVIEW(dev)) {
1590 seq_puts(m, "unsupported\n");
1591 return 0;
1592 }
1593
1594 ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
1595 if (ret)
1596 return ret;
1597
1598 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1599
1600 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1601 vlv_dpio_read(dev_priv, _DPIO_DIV_A));
1602 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1603 vlv_dpio_read(dev_priv, _DPIO_DIV_B));
1604
1605 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1606 vlv_dpio_read(dev_priv, _DPIO_REFSFR_A));
1607 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1608 vlv_dpio_read(dev_priv, _DPIO_REFSFR_B));
1609
1610 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1611 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
1612 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1613 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
1614
1615 seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
1616 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A));
1617 seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
1618 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B));
1619
1620 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1621 vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
1622
1623 mutex_unlock(&dev_priv->dpio_lock);
1624
1625 return 0;
1626 }
1627
1628 static int i915_llc(struct seq_file *m, void *data)
1629 {
1630 struct drm_info_node *node = (struct drm_info_node *) m->private;
1631 struct drm_device *dev = node->minor->dev;
1632 struct drm_i915_private *dev_priv = dev->dev_private;
1633
1634 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1635 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
1636 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
1637
1638 return 0;
1639 }
1640
1641 static int i915_edp_psr_status(struct seq_file *m, void *data)
1642 {
1643 struct drm_info_node *node = m->private;
1644 struct drm_device *dev = node->minor->dev;
1645 struct drm_i915_private *dev_priv = dev->dev_private;
1646 u32 psrstat, psrperf;
1647
1648 if (!IS_HASWELL(dev)) {
1649 seq_puts(m, "PSR not supported on this platform\n");
1650 } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
1651 seq_puts(m, "PSR enabled\n");
1652 } else {
1653 seq_puts(m, "PSR disabled: ");
1654 switch (dev_priv->no_psr_reason) {
1655 case PSR_NO_SOURCE:
1656 seq_puts(m, "not supported on this platform");
1657 break;
1658 case PSR_NO_SINK:
1659 seq_puts(m, "not supported by panel");
1660 break;
1661 case PSR_MODULE_PARAM:
1662 seq_puts(m, "disabled by flag");
1663 break;
1664 case PSR_CRTC_NOT_ACTIVE:
1665 seq_puts(m, "crtc not active");
1666 break;
1667 case PSR_PWR_WELL_ENABLED:
1668 seq_puts(m, "power well enabled");
1669 break;
1670 case PSR_NOT_TILED:
1671 seq_puts(m, "not tiled");
1672 break;
1673 case PSR_SPRITE_ENABLED:
1674 seq_puts(m, "sprite enabled");
1675 break;
1676 case PSR_S3D_ENABLED:
1677 seq_puts(m, "stereo 3d enabled");
1678 break;
1679 case PSR_INTERLACED_ENABLED:
1680 seq_puts(m, "interlaced enabled");
1681 break;
1682 case PSR_HSW_NOT_DDIA:
1683 seq_puts(m, "HSW ties PSR to DDI A (eDP)");
1684 break;
1685 default:
1686 seq_puts(m, "unknown reason");
1687 }
1688 seq_puts(m, "\n");
1689 return 0;
1690 }
1691
1692 psrstat = I915_READ(EDP_PSR_STATUS_CTL);
1693
1694 seq_puts(m, "PSR Current State: ");
1695 switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
1696 case EDP_PSR_STATUS_STATE_IDLE:
1697 seq_puts(m, "Reset state\n");
1698 break;
1699 case EDP_PSR_STATUS_STATE_SRDONACK:
1700 seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
1701 break;
1702 case EDP_PSR_STATUS_STATE_SRDENT:
1703 seq_puts(m, "SRD entry\n");
1704 break;
1705 case EDP_PSR_STATUS_STATE_BUFOFF:
1706 seq_puts(m, "Wait for buffer turn off\n");
1707 break;
1708 case EDP_PSR_STATUS_STATE_BUFON:
1709 seq_puts(m, "Wait for buffer turn on\n");
1710 break;
1711 case EDP_PSR_STATUS_STATE_AUXACK:
1712 seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
1713 break;
1714 case EDP_PSR_STATUS_STATE_SRDOFFACK:
1715 seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
1716 break;
1717 default:
1718 seq_puts(m, "Unknown\n");
1719 break;
1720 }
1721
1722 seq_puts(m, "Link Status: ");
1723 switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
1724 case EDP_PSR_STATUS_LINK_FULL_OFF:
1725 seq_puts(m, "Link is fully off\n");
1726 break;
1727 case EDP_PSR_STATUS_LINK_FULL_ON:
1728 seq_puts(m, "Link is fully on\n");
1729 break;
1730 case EDP_PSR_STATUS_LINK_STANDBY:
1731 seq_puts(m, "Link is in standby\n");
1732 break;
1733 default:
1734 seq_puts(m, "Unknown\n");
1735 break;
1736 }
1737
1738 seq_printf(m, "PSR Entry Count: %u\n",
1739 psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
1740 EDP_PSR_STATUS_COUNT_MASK);
1741
1742 seq_printf(m, "Max Sleep Timer Counter: %u\n",
1743 psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
1744 EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
1745
1746 seq_printf(m, "Had AUX error: %s\n",
1747 yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
1748
1749 seq_printf(m, "Sending AUX: %s\n",
1750 yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
1751
1752 seq_printf(m, "Sending Idle: %s\n",
1753 yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
1754
1755 seq_printf(m, "Sending TP2 TP3: %s\n",
1756 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
1757
1758 seq_printf(m, "Sending TP1: %s\n",
1759 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
1760
1761 seq_printf(m, "Idle Count: %u\n",
1762 psrstat & EDP_PSR_STATUS_IDLE_MASK);
1763
1764 psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
1765 seq_printf(m, "Performance Counter: %u\n", psrperf);
1766
1767 return 0;
1768 }
1769
1770 static int
1771 i915_wedged_get(void *data, u64 *val)
1772 {
1773 struct drm_device *dev = data;
1774 drm_i915_private_t *dev_priv = dev->dev_private;
1775
1776 *val = atomic_read(&dev_priv->gpu_error.reset_counter);
1777
1778 return 0;
1779 }
1780
1781 static int
1782 i915_wedged_set(void *data, u64 val)
1783 {
1784 struct drm_device *dev = data;
1785
1786 DRM_INFO("Manually setting wedged to %llu\n", val);
1787 i915_handle_error(dev, val);
1788
1789 return 0;
1790 }
1791
1792 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
1793 i915_wedged_get, i915_wedged_set,
1794 "%llu\n");
1795
1796 static int
1797 i915_ring_stop_get(void *data, u64 *val)
1798 {
1799 struct drm_device *dev = data;
1800 drm_i915_private_t *dev_priv = dev->dev_private;
1801
1802 *val = dev_priv->gpu_error.stop_rings;
1803
1804 return 0;
1805 }
1806
1807 static int
1808 i915_ring_stop_set(void *data, u64 val)
1809 {
1810 struct drm_device *dev = data;
1811 struct drm_i915_private *dev_priv = dev->dev_private;
1812 int ret;
1813
1814 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
1815
1816 ret = mutex_lock_interruptible(&dev->struct_mutex);
1817 if (ret)
1818 return ret;
1819
1820 dev_priv->gpu_error.stop_rings = val;
1821 mutex_unlock(&dev->struct_mutex);
1822
1823 return 0;
1824 }
1825
1826 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
1827 i915_ring_stop_get, i915_ring_stop_set,
1828 "0x%08llx\n");
1829
1830 #define DROP_UNBOUND 0x1
1831 #define DROP_BOUND 0x2
1832 #define DROP_RETIRE 0x4
1833 #define DROP_ACTIVE 0x8
1834 #define DROP_ALL (DROP_UNBOUND | \
1835 DROP_BOUND | \
1836 DROP_RETIRE | \
1837 DROP_ACTIVE)
1838 static int
1839 i915_drop_caches_get(void *data, u64 *val)
1840 {
1841 *val = DROP_ALL;
1842
1843 return 0;
1844 }
1845
1846 static int
1847 i915_drop_caches_set(void *data, u64 val)
1848 {
1849 struct drm_device *dev = data;
1850 struct drm_i915_private *dev_priv = dev->dev_private;
1851 struct drm_i915_gem_object *obj, *next;
1852 struct i915_address_space *vm;
1853 struct i915_vma *vma, *x;
1854 int ret;
1855
1856 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
1857
1858 /* No need to check and wait for gpu resets, only libdrm auto-restarts
1859 * on ioctls on -EAGAIN. */
1860 ret = mutex_lock_interruptible(&dev->struct_mutex);
1861 if (ret)
1862 return ret;
1863
1864 if (val & DROP_ACTIVE) {
1865 ret = i915_gpu_idle(dev);
1866 if (ret)
1867 goto unlock;
1868 }
1869
1870 if (val & (DROP_RETIRE | DROP_ACTIVE))
1871 i915_gem_retire_requests(dev);
1872
1873 if (val & DROP_BOUND) {
1874 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
1875 list_for_each_entry_safe(vma, x, &vm->inactive_list,
1876 mm_list) {
1877 if (vma->obj->pin_count)
1878 continue;
1879
1880 ret = i915_vma_unbind(vma);
1881 if (ret)
1882 goto unlock;
1883 }
1884 }
1885 }
1886
1887 if (val & DROP_UNBOUND) {
1888 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1889 global_list)
1890 if (obj->pages_pin_count == 0) {
1891 ret = i915_gem_object_put_pages(obj);
1892 if (ret)
1893 goto unlock;
1894 }
1895 }
1896
1897 unlock:
1898 mutex_unlock(&dev->struct_mutex);
1899
1900 return ret;
1901 }
1902
1903 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
1904 i915_drop_caches_get, i915_drop_caches_set,
1905 "0x%08llx\n");
1906
1907 static int
1908 i915_max_freq_get(void *data, u64 *val)
1909 {
1910 struct drm_device *dev = data;
1911 drm_i915_private_t *dev_priv = dev->dev_private;
1912 int ret;
1913
1914 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1915 return -ENODEV;
1916
1917 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1918 if (ret)
1919 return ret;
1920
1921 if (IS_VALLEYVIEW(dev))
1922 *val = vlv_gpu_freq(dev_priv->mem_freq,
1923 dev_priv->rps.max_delay);
1924 else
1925 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
1926 mutex_unlock(&dev_priv->rps.hw_lock);
1927
1928 return 0;
1929 }
1930
1931 static int
1932 i915_max_freq_set(void *data, u64 val)
1933 {
1934 struct drm_device *dev = data;
1935 struct drm_i915_private *dev_priv = dev->dev_private;
1936 int ret;
1937
1938 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1939 return -ENODEV;
1940
1941 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
1942
1943 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1944 if (ret)
1945 return ret;
1946
1947 /*
1948 * Turbo will still be enabled, but won't go above the set value.
1949 */
1950 if (IS_VALLEYVIEW(dev)) {
1951 val = vlv_freq_opcode(dev_priv->mem_freq, val);
1952 dev_priv->rps.max_delay = val;
1953 gen6_set_rps(dev, val);
1954 } else {
1955 do_div(val, GT_FREQUENCY_MULTIPLIER);
1956 dev_priv->rps.max_delay = val;
1957 gen6_set_rps(dev, val);
1958 }
1959
1960 mutex_unlock(&dev_priv->rps.hw_lock);
1961
1962 return 0;
1963 }
1964
1965 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
1966 i915_max_freq_get, i915_max_freq_set,
1967 "%llu\n");
1968
1969 static int
1970 i915_min_freq_get(void *data, u64 *val)
1971 {
1972 struct drm_device *dev = data;
1973 drm_i915_private_t *dev_priv = dev->dev_private;
1974 int ret;
1975
1976 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1977 return -ENODEV;
1978
1979 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1980 if (ret)
1981 return ret;
1982
1983 if (IS_VALLEYVIEW(dev))
1984 *val = vlv_gpu_freq(dev_priv->mem_freq,
1985 dev_priv->rps.min_delay);
1986 else
1987 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
1988 mutex_unlock(&dev_priv->rps.hw_lock);
1989
1990 return 0;
1991 }
1992
1993 static int
1994 i915_min_freq_set(void *data, u64 val)
1995 {
1996 struct drm_device *dev = data;
1997 struct drm_i915_private *dev_priv = dev->dev_private;
1998 int ret;
1999
2000 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2001 return -ENODEV;
2002
2003 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
2004
2005 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2006 if (ret)
2007 return ret;
2008
2009 /*
2010 * Turbo will still be enabled, but won't go below the set value.
2011 */
2012 if (IS_VALLEYVIEW(dev)) {
2013 val = vlv_freq_opcode(dev_priv->mem_freq, val);
2014 dev_priv->rps.min_delay = val;
2015 valleyview_set_rps(dev, val);
2016 } else {
2017 do_div(val, GT_FREQUENCY_MULTIPLIER);
2018 dev_priv->rps.min_delay = val;
2019 gen6_set_rps(dev, val);
2020 }
2021 mutex_unlock(&dev_priv->rps.hw_lock);
2022
2023 return 0;
2024 }
2025
2026 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
2027 i915_min_freq_get, i915_min_freq_set,
2028 "%llu\n");
2029
2030 static int
2031 i915_cache_sharing_get(void *data, u64 *val)
2032 {
2033 struct drm_device *dev = data;
2034 drm_i915_private_t *dev_priv = dev->dev_private;
2035 u32 snpcr;
2036 int ret;
2037
2038 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2039 return -ENODEV;
2040
2041 ret = mutex_lock_interruptible(&dev->struct_mutex);
2042 if (ret)
2043 return ret;
2044
2045 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2046 mutex_unlock(&dev_priv->dev->struct_mutex);
2047
2048 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
2049
2050 return 0;
2051 }
2052
2053 static int
2054 i915_cache_sharing_set(void *data, u64 val)
2055 {
2056 struct drm_device *dev = data;
2057 struct drm_i915_private *dev_priv = dev->dev_private;
2058 u32 snpcr;
2059
2060 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2061 return -ENODEV;
2062
2063 if (val > 3)
2064 return -EINVAL;
2065
2066 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
2067
2068 /* Update the cache sharing policy here as well */
2069 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2070 snpcr &= ~GEN6_MBC_SNPCR_MASK;
2071 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
2072 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
2073
2074 return 0;
2075 }
2076
2077 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
2078 i915_cache_sharing_get, i915_cache_sharing_set,
2079 "%llu\n");
2080
2081 /* As the drm_debugfs_init() routines are called before dev->dev_private is
2082 * allocated we need to hook into the minor for release. */
2083 static int
2084 drm_add_fake_info_node(struct drm_minor *minor,
2085 struct dentry *ent,
2086 const void *key)
2087 {
2088 struct drm_info_node *node;
2089
2090 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
2091 if (node == NULL) {
2092 debugfs_remove(ent);
2093 return -ENOMEM;
2094 }
2095
2096 node->minor = minor;
2097 node->dent = ent;
2098 node->info_ent = (void *) key;
2099
2100 mutex_lock(&minor->debugfs_lock);
2101 list_add(&node->list, &minor->debugfs_list);
2102 mutex_unlock(&minor->debugfs_lock);
2103
2104 return 0;
2105 }
2106
2107 static int i915_forcewake_open(struct inode *inode, struct file *file)
2108 {
2109 struct drm_device *dev = inode->i_private;
2110 struct drm_i915_private *dev_priv = dev->dev_private;
2111
2112 if (INTEL_INFO(dev)->gen < 6)
2113 return 0;
2114
2115 gen6_gt_force_wake_get(dev_priv);
2116
2117 return 0;
2118 }
2119
2120 static int i915_forcewake_release(struct inode *inode, struct file *file)
2121 {
2122 struct drm_device *dev = inode->i_private;
2123 struct drm_i915_private *dev_priv = dev->dev_private;
2124
2125 if (INTEL_INFO(dev)->gen < 6)
2126 return 0;
2127
2128 gen6_gt_force_wake_put(dev_priv);
2129
2130 return 0;
2131 }
2132
2133 static const struct file_operations i915_forcewake_fops = {
2134 .owner = THIS_MODULE,
2135 .open = i915_forcewake_open,
2136 .release = i915_forcewake_release,
2137 };
2138
2139 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
2140 {
2141 struct drm_device *dev = minor->dev;
2142 struct dentry *ent;
2143
2144 ent = debugfs_create_file("i915_forcewake_user",
2145 S_IRUSR,
2146 root, dev,
2147 &i915_forcewake_fops);
2148 if (IS_ERR(ent))
2149 return PTR_ERR(ent);
2150
2151 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
2152 }
2153
2154 static int i915_debugfs_create(struct dentry *root,
2155 struct drm_minor *minor,
2156 const char *name,
2157 const struct file_operations *fops)
2158 {
2159 struct drm_device *dev = minor->dev;
2160 struct dentry *ent;
2161
2162 ent = debugfs_create_file(name,
2163 S_IRUGO | S_IWUSR,
2164 root, dev,
2165 fops);
2166 if (IS_ERR(ent))
2167 return PTR_ERR(ent);
2168
2169 return drm_add_fake_info_node(minor, ent, fops);
2170 }
2171
2172 static struct drm_info_list i915_debugfs_list[] = {
2173 {"i915_capabilities", i915_capabilities, 0},
2174 {"i915_gem_objects", i915_gem_object_info, 0},
2175 {"i915_gem_gtt", i915_gem_gtt_info, 0},
2176 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
2177 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
2178 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
2179 {"i915_gem_stolen", i915_gem_stolen_list_info },
2180 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2181 {"i915_gem_request", i915_gem_request_info, 0},
2182 {"i915_gem_seqno", i915_gem_seqno_info, 0},
2183 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2184 {"i915_gem_interrupt", i915_interrupt_info, 0},
2185 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
2186 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
2187 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
2188 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
2189 {"i915_rstdby_delays", i915_rstdby_delays, 0},
2190 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
2191 {"i915_delayfreq_table", i915_delayfreq_table, 0},
2192 {"i915_inttoext_table", i915_inttoext_table, 0},
2193 {"i915_drpc_info", i915_drpc_info, 0},
2194 {"i915_emon_status", i915_emon_status, 0},
2195 {"i915_ring_freq_table", i915_ring_freq_table, 0},
2196 {"i915_gfxec", i915_gfxec, 0},
2197 {"i915_fbc_status", i915_fbc_status, 0},
2198 {"i915_ips_status", i915_ips_status, 0},
2199 {"i915_sr_status", i915_sr_status, 0},
2200 {"i915_opregion", i915_opregion, 0},
2201 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2202 {"i915_context_status", i915_context_status, 0},
2203 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
2204 {"i915_swizzle_info", i915_swizzle_info, 0},
2205 {"i915_ppgtt_info", i915_ppgtt_info, 0},
2206 {"i915_dpio", i915_dpio_info, 0},
2207 {"i915_llc", i915_llc, 0},
2208 {"i915_edp_psr_status", i915_edp_psr_status, 0},
2209 };
2210 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2211
2212 static struct i915_debugfs_files {
2213 const char *name;
2214 const struct file_operations *fops;
2215 } i915_debugfs_files[] = {
2216 {"i915_wedged", &i915_wedged_fops},
2217 {"i915_max_freq", &i915_max_freq_fops},
2218 {"i915_min_freq", &i915_min_freq_fops},
2219 {"i915_cache_sharing", &i915_cache_sharing_fops},
2220 {"i915_ring_stop", &i915_ring_stop_fops},
2221 {"i915_gem_drop_caches", &i915_drop_caches_fops},
2222 {"i915_error_state", &i915_error_state_fops},
2223 {"i915_next_seqno", &i915_next_seqno_fops},
2224 };
2225
2226 int i915_debugfs_init(struct drm_minor *minor)
2227 {
2228 int ret, i;
2229
2230 ret = i915_forcewake_create(minor->debugfs_root, minor);
2231 if (ret)
2232 return ret;
2233
2234 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2235 ret = i915_debugfs_create(minor->debugfs_root, minor,
2236 i915_debugfs_files[i].name,
2237 i915_debugfs_files[i].fops);
2238 if (ret)
2239 return ret;
2240 }
2241
2242 return drm_debugfs_create_files(i915_debugfs_list,
2243 I915_DEBUGFS_ENTRIES,
2244 minor->debugfs_root, minor);
2245 }
2246
2247 void i915_debugfs_cleanup(struct drm_minor *minor)
2248 {
2249 int i;
2250
2251 drm_debugfs_remove_files(i915_debugfs_list,
2252 I915_DEBUGFS_ENTRIES, minor);
2253 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
2254 1, minor);
2255 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2256 struct drm_info_list *info_list =
2257 (struct drm_info_list *) i915_debugfs_files[i].fops;
2258
2259 drm_debugfs_remove_files(info_list, 1, minor);
2260 }
2261 }
2262
2263 #endif /* CONFIG_DEBUG_FS */
This page took 0.077714 seconds and 6 git commands to generate.