drm/i915: Move flags describing VMA mappings into the VMA
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
37 #include <drm/drmP.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
41 #include "i915_drv.h"
42
43 enum {
44 ACTIVE_LIST,
45 INACTIVE_LIST,
46 PINNED_LIST,
47 };
48
49 static const char *yesno(int v)
50 {
51 return v ? "yes" : "no";
52 }
53
54 /* As the drm_debugfs_init() routines are called before dev->dev_private is
55 * allocated we need to hook into the minor for release. */
56 static int
57 drm_add_fake_info_node(struct drm_minor *minor,
58 struct dentry *ent,
59 const void *key)
60 {
61 struct drm_info_node *node;
62
63 node = kmalloc(sizeof(*node), GFP_KERNEL);
64 if (node == NULL) {
65 debugfs_remove(ent);
66 return -ENOMEM;
67 }
68
69 node->minor = minor;
70 node->dent = ent;
71 node->info_ent = (void *) key;
72
73 mutex_lock(&minor->debugfs_lock);
74 list_add(&node->list, &minor->debugfs_list);
75 mutex_unlock(&minor->debugfs_lock);
76
77 return 0;
78 }
79
80 static int i915_capabilities(struct seq_file *m, void *data)
81 {
82 struct drm_info_node *node = m->private;
83 struct drm_device *dev = node->minor->dev;
84 const struct intel_device_info *info = INTEL_INFO(dev);
85
86 seq_printf(m, "gen: %d\n", info->gen);
87 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
89 #define SEP_SEMICOLON ;
90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
91 #undef PRINT_FLAG
92 #undef SEP_SEMICOLON
93
94 return 0;
95 }
96
97 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
98 {
99 if (obj->user_pin_count > 0)
100 return "P";
101 else if (i915_gem_obj_is_pinned(obj))
102 return "p";
103 else
104 return " ";
105 }
106
107 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
108 {
109 switch (obj->tiling_mode) {
110 default:
111 case I915_TILING_NONE: return " ";
112 case I915_TILING_X: return "X";
113 case I915_TILING_Y: return "Y";
114 }
115 }
116
117 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
118 {
119 return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
120 }
121
122 static void
123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
124 {
125 struct i915_vma *vma;
126 int pin_count = 0;
127
128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
129 &obj->base,
130 get_pin_flag(obj),
131 get_tiling_flag(obj),
132 get_global_flag(obj),
133 obj->base.size / 1024,
134 obj->base.read_domains,
135 obj->base.write_domain,
136 obj->last_read_seqno,
137 obj->last_write_seqno,
138 obj->last_fenced_seqno,
139 i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
140 obj->dirty ? " dirty" : "",
141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
142 if (obj->base.name)
143 seq_printf(m, " (name: %d)", obj->base.name);
144 list_for_each_entry(vma, &obj->vma_list, vma_link)
145 if (vma->pin_count > 0)
146 pin_count++;
147 seq_printf(m, " (pinned x %d)", pin_count);
148 if (obj->pin_display)
149 seq_printf(m, " (display)");
150 if (obj->fence_reg != I915_FENCE_REG_NONE)
151 seq_printf(m, " (fence: %d)", obj->fence_reg);
152 list_for_each_entry(vma, &obj->vma_list, vma_link) {
153 if (!i915_is_ggtt(vma->vm))
154 seq_puts(m, " (pp");
155 else
156 seq_puts(m, " (g");
157 seq_printf(m, "gtt offset: %08lx, size: %08lx)",
158 vma->node.start, vma->node.size);
159 }
160 if (obj->stolen)
161 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
162 if (obj->pin_mappable || obj->fault_mappable) {
163 char s[3], *t = s;
164 if (obj->pin_mappable)
165 *t++ = 'p';
166 if (obj->fault_mappable)
167 *t++ = 'f';
168 *t = '\0';
169 seq_printf(m, " (%s mappable)", s);
170 }
171 if (obj->ring != NULL)
172 seq_printf(m, " (%s)", obj->ring->name);
173 if (obj->frontbuffer_bits)
174 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
175 }
176
177 static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
178 {
179 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
180 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
181 seq_putc(m, ' ');
182 }
183
184 static int i915_gem_object_list_info(struct seq_file *m, void *data)
185 {
186 struct drm_info_node *node = m->private;
187 uintptr_t list = (uintptr_t) node->info_ent->data;
188 struct list_head *head;
189 struct drm_device *dev = node->minor->dev;
190 struct drm_i915_private *dev_priv = dev->dev_private;
191 struct i915_address_space *vm = &dev_priv->gtt.base;
192 struct i915_vma *vma;
193 size_t total_obj_size, total_gtt_size;
194 int count, ret;
195
196 ret = mutex_lock_interruptible(&dev->struct_mutex);
197 if (ret)
198 return ret;
199
200 /* FIXME: the user of this interface might want more than just GGTT */
201 switch (list) {
202 case ACTIVE_LIST:
203 seq_puts(m, "Active:\n");
204 head = &vm->active_list;
205 break;
206 case INACTIVE_LIST:
207 seq_puts(m, "Inactive:\n");
208 head = &vm->inactive_list;
209 break;
210 default:
211 mutex_unlock(&dev->struct_mutex);
212 return -EINVAL;
213 }
214
215 total_obj_size = total_gtt_size = count = 0;
216 list_for_each_entry(vma, head, mm_list) {
217 seq_printf(m, " ");
218 describe_obj(m, vma->obj);
219 seq_printf(m, "\n");
220 total_obj_size += vma->obj->base.size;
221 total_gtt_size += vma->node.size;
222 count++;
223 }
224 mutex_unlock(&dev->struct_mutex);
225
226 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
227 count, total_obj_size, total_gtt_size);
228 return 0;
229 }
230
231 static int obj_rank_by_stolen(void *priv,
232 struct list_head *A, struct list_head *B)
233 {
234 struct drm_i915_gem_object *a =
235 container_of(A, struct drm_i915_gem_object, obj_exec_link);
236 struct drm_i915_gem_object *b =
237 container_of(B, struct drm_i915_gem_object, obj_exec_link);
238
239 return a->stolen->start - b->stolen->start;
240 }
241
242 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
243 {
244 struct drm_info_node *node = m->private;
245 struct drm_device *dev = node->minor->dev;
246 struct drm_i915_private *dev_priv = dev->dev_private;
247 struct drm_i915_gem_object *obj;
248 size_t total_obj_size, total_gtt_size;
249 LIST_HEAD(stolen);
250 int count, ret;
251
252 ret = mutex_lock_interruptible(&dev->struct_mutex);
253 if (ret)
254 return ret;
255
256 total_obj_size = total_gtt_size = count = 0;
257 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
258 if (obj->stolen == NULL)
259 continue;
260
261 list_add(&obj->obj_exec_link, &stolen);
262
263 total_obj_size += obj->base.size;
264 total_gtt_size += i915_gem_obj_ggtt_size(obj);
265 count++;
266 }
267 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
268 if (obj->stolen == NULL)
269 continue;
270
271 list_add(&obj->obj_exec_link, &stolen);
272
273 total_obj_size += obj->base.size;
274 count++;
275 }
276 list_sort(NULL, &stolen, obj_rank_by_stolen);
277 seq_puts(m, "Stolen:\n");
278 while (!list_empty(&stolen)) {
279 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
280 seq_puts(m, " ");
281 describe_obj(m, obj);
282 seq_putc(m, '\n');
283 list_del_init(&obj->obj_exec_link);
284 }
285 mutex_unlock(&dev->struct_mutex);
286
287 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
288 count, total_obj_size, total_gtt_size);
289 return 0;
290 }
291
292 #define count_objects(list, member) do { \
293 list_for_each_entry(obj, list, member) { \
294 size += i915_gem_obj_ggtt_size(obj); \
295 ++count; \
296 if (obj->map_and_fenceable) { \
297 mappable_size += i915_gem_obj_ggtt_size(obj); \
298 ++mappable_count; \
299 } \
300 } \
301 } while (0)
302
303 struct file_stats {
304 struct drm_i915_file_private *file_priv;
305 int count;
306 size_t total, unbound;
307 size_t global, shared;
308 size_t active, inactive;
309 };
310
311 static int per_file_stats(int id, void *ptr, void *data)
312 {
313 struct drm_i915_gem_object *obj = ptr;
314 struct file_stats *stats = data;
315 struct i915_vma *vma;
316
317 stats->count++;
318 stats->total += obj->base.size;
319
320 if (obj->base.name || obj->base.dma_buf)
321 stats->shared += obj->base.size;
322
323 if (USES_FULL_PPGTT(obj->base.dev)) {
324 list_for_each_entry(vma, &obj->vma_list, vma_link) {
325 struct i915_hw_ppgtt *ppgtt;
326
327 if (!drm_mm_node_allocated(&vma->node))
328 continue;
329
330 if (i915_is_ggtt(vma->vm)) {
331 stats->global += obj->base.size;
332 continue;
333 }
334
335 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
336 if (ppgtt->file_priv != stats->file_priv)
337 continue;
338
339 if (obj->ring) /* XXX per-vma statistic */
340 stats->active += obj->base.size;
341 else
342 stats->inactive += obj->base.size;
343
344 return 0;
345 }
346 } else {
347 if (i915_gem_obj_ggtt_bound(obj)) {
348 stats->global += obj->base.size;
349 if (obj->ring)
350 stats->active += obj->base.size;
351 else
352 stats->inactive += obj->base.size;
353 return 0;
354 }
355 }
356
357 if (!list_empty(&obj->global_list))
358 stats->unbound += obj->base.size;
359
360 return 0;
361 }
362
363 #define count_vmas(list, member) do { \
364 list_for_each_entry(vma, list, member) { \
365 size += i915_gem_obj_ggtt_size(vma->obj); \
366 ++count; \
367 if (vma->obj->map_and_fenceable) { \
368 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
369 ++mappable_count; \
370 } \
371 } \
372 } while (0)
373
374 static int i915_gem_object_info(struct seq_file *m, void* data)
375 {
376 struct drm_info_node *node = m->private;
377 struct drm_device *dev = node->minor->dev;
378 struct drm_i915_private *dev_priv = dev->dev_private;
379 u32 count, mappable_count, purgeable_count;
380 size_t size, mappable_size, purgeable_size;
381 struct drm_i915_gem_object *obj;
382 struct i915_address_space *vm = &dev_priv->gtt.base;
383 struct drm_file *file;
384 struct i915_vma *vma;
385 int ret;
386
387 ret = mutex_lock_interruptible(&dev->struct_mutex);
388 if (ret)
389 return ret;
390
391 seq_printf(m, "%u objects, %zu bytes\n",
392 dev_priv->mm.object_count,
393 dev_priv->mm.object_memory);
394
395 size = count = mappable_size = mappable_count = 0;
396 count_objects(&dev_priv->mm.bound_list, global_list);
397 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
398 count, mappable_count, size, mappable_size);
399
400 size = count = mappable_size = mappable_count = 0;
401 count_vmas(&vm->active_list, mm_list);
402 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
403 count, mappable_count, size, mappable_size);
404
405 size = count = mappable_size = mappable_count = 0;
406 count_vmas(&vm->inactive_list, mm_list);
407 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
408 count, mappable_count, size, mappable_size);
409
410 size = count = purgeable_size = purgeable_count = 0;
411 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
412 size += obj->base.size, ++count;
413 if (obj->madv == I915_MADV_DONTNEED)
414 purgeable_size += obj->base.size, ++purgeable_count;
415 }
416 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
417
418 size = count = mappable_size = mappable_count = 0;
419 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
420 if (obj->fault_mappable) {
421 size += i915_gem_obj_ggtt_size(obj);
422 ++count;
423 }
424 if (obj->pin_mappable) {
425 mappable_size += i915_gem_obj_ggtt_size(obj);
426 ++mappable_count;
427 }
428 if (obj->madv == I915_MADV_DONTNEED) {
429 purgeable_size += obj->base.size;
430 ++purgeable_count;
431 }
432 }
433 seq_printf(m, "%u purgeable objects, %zu bytes\n",
434 purgeable_count, purgeable_size);
435 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
436 mappable_count, mappable_size);
437 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
438 count, size);
439
440 seq_printf(m, "%zu [%lu] gtt total\n",
441 dev_priv->gtt.base.total,
442 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
443
444 seq_putc(m, '\n');
445 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
446 struct file_stats stats;
447 struct task_struct *task;
448
449 memset(&stats, 0, sizeof(stats));
450 stats.file_priv = file->driver_priv;
451 spin_lock(&file->table_lock);
452 idr_for_each(&file->object_idr, per_file_stats, &stats);
453 spin_unlock(&file->table_lock);
454 /*
455 * Although we have a valid reference on file->pid, that does
456 * not guarantee that the task_struct who called get_pid() is
457 * still alive (e.g. get_pid(current) => fork() => exit()).
458 * Therefore, we need to protect this ->comm access using RCU.
459 */
460 rcu_read_lock();
461 task = pid_task(file->pid, PIDTYPE_PID);
462 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
463 task ? task->comm : "<unknown>",
464 stats.count,
465 stats.total,
466 stats.active,
467 stats.inactive,
468 stats.global,
469 stats.shared,
470 stats.unbound);
471 rcu_read_unlock();
472 }
473
474 mutex_unlock(&dev->struct_mutex);
475
476 return 0;
477 }
478
479 static int i915_gem_gtt_info(struct seq_file *m, void *data)
480 {
481 struct drm_info_node *node = m->private;
482 struct drm_device *dev = node->minor->dev;
483 uintptr_t list = (uintptr_t) node->info_ent->data;
484 struct drm_i915_private *dev_priv = dev->dev_private;
485 struct drm_i915_gem_object *obj;
486 size_t total_obj_size, total_gtt_size;
487 int count, ret;
488
489 ret = mutex_lock_interruptible(&dev->struct_mutex);
490 if (ret)
491 return ret;
492
493 total_obj_size = total_gtt_size = count = 0;
494 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
495 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
496 continue;
497
498 seq_puts(m, " ");
499 describe_obj(m, obj);
500 seq_putc(m, '\n');
501 total_obj_size += obj->base.size;
502 total_gtt_size += i915_gem_obj_ggtt_size(obj);
503 count++;
504 }
505
506 mutex_unlock(&dev->struct_mutex);
507
508 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
509 count, total_obj_size, total_gtt_size);
510
511 return 0;
512 }
513
514 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
515 {
516 struct drm_info_node *node = m->private;
517 struct drm_device *dev = node->minor->dev;
518 struct drm_i915_private *dev_priv = dev->dev_private;
519 struct intel_crtc *crtc;
520 int ret;
521
522 ret = mutex_lock_interruptible(&dev->struct_mutex);
523 if (ret)
524 return ret;
525
526 for_each_intel_crtc(dev, crtc) {
527 const char pipe = pipe_name(crtc->pipe);
528 const char plane = plane_name(crtc->plane);
529 struct intel_unpin_work *work;
530
531 spin_lock_irq(&dev->event_lock);
532 work = crtc->unpin_work;
533 if (work == NULL) {
534 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
535 pipe, plane);
536 } else {
537 u32 addr;
538
539 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
540 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
541 pipe, plane);
542 } else {
543 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
544 pipe, plane);
545 }
546 if (work->flip_queued_ring) {
547 seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n",
548 work->flip_queued_ring->name,
549 work->flip_queued_seqno,
550 dev_priv->next_seqno,
551 work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
552 i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
553 work->flip_queued_seqno));
554 } else
555 seq_printf(m, "Flip not associated with any ring\n");
556 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
557 work->flip_queued_vblank,
558 work->flip_ready_vblank,
559 drm_vblank_count(dev, crtc->pipe));
560 if (work->enable_stall_check)
561 seq_puts(m, "Stall check enabled, ");
562 else
563 seq_puts(m, "Stall check waiting for page flip ioctl, ");
564 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
565
566 if (INTEL_INFO(dev)->gen >= 4)
567 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
568 else
569 addr = I915_READ(DSPADDR(crtc->plane));
570 seq_printf(m, "Current scanout address 0x%08x\n", addr);
571
572 if (work->pending_flip_obj) {
573 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
574 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
575 }
576 }
577 spin_unlock_irq(&dev->event_lock);
578 }
579
580 mutex_unlock(&dev->struct_mutex);
581
582 return 0;
583 }
584
585 static int i915_gem_request_info(struct seq_file *m, void *data)
586 {
587 struct drm_info_node *node = m->private;
588 struct drm_device *dev = node->minor->dev;
589 struct drm_i915_private *dev_priv = dev->dev_private;
590 struct intel_engine_cs *ring;
591 struct drm_i915_gem_request *gem_request;
592 int ret, count, i;
593
594 ret = mutex_lock_interruptible(&dev->struct_mutex);
595 if (ret)
596 return ret;
597
598 count = 0;
599 for_each_ring(ring, dev_priv, i) {
600 if (list_empty(&ring->request_list))
601 continue;
602
603 seq_printf(m, "%s requests:\n", ring->name);
604 list_for_each_entry(gem_request,
605 &ring->request_list,
606 list) {
607 seq_printf(m, " %d @ %d\n",
608 gem_request->seqno,
609 (int) (jiffies - gem_request->emitted_jiffies));
610 }
611 count++;
612 }
613 mutex_unlock(&dev->struct_mutex);
614
615 if (count == 0)
616 seq_puts(m, "No requests\n");
617
618 return 0;
619 }
620
621 static void i915_ring_seqno_info(struct seq_file *m,
622 struct intel_engine_cs *ring)
623 {
624 if (ring->get_seqno) {
625 seq_printf(m, "Current sequence (%s): %u\n",
626 ring->name, ring->get_seqno(ring, false));
627 }
628 }
629
630 static int i915_gem_seqno_info(struct seq_file *m, void *data)
631 {
632 struct drm_info_node *node = m->private;
633 struct drm_device *dev = node->minor->dev;
634 struct drm_i915_private *dev_priv = dev->dev_private;
635 struct intel_engine_cs *ring;
636 int ret, i;
637
638 ret = mutex_lock_interruptible(&dev->struct_mutex);
639 if (ret)
640 return ret;
641 intel_runtime_pm_get(dev_priv);
642
643 for_each_ring(ring, dev_priv, i)
644 i915_ring_seqno_info(m, ring);
645
646 intel_runtime_pm_put(dev_priv);
647 mutex_unlock(&dev->struct_mutex);
648
649 return 0;
650 }
651
652
653 static int i915_interrupt_info(struct seq_file *m, void *data)
654 {
655 struct drm_info_node *node = m->private;
656 struct drm_device *dev = node->minor->dev;
657 struct drm_i915_private *dev_priv = dev->dev_private;
658 struct intel_engine_cs *ring;
659 int ret, i, pipe;
660
661 ret = mutex_lock_interruptible(&dev->struct_mutex);
662 if (ret)
663 return ret;
664 intel_runtime_pm_get(dev_priv);
665
666 if (IS_CHERRYVIEW(dev)) {
667 seq_printf(m, "Master Interrupt Control:\t%08x\n",
668 I915_READ(GEN8_MASTER_IRQ));
669
670 seq_printf(m, "Display IER:\t%08x\n",
671 I915_READ(VLV_IER));
672 seq_printf(m, "Display IIR:\t%08x\n",
673 I915_READ(VLV_IIR));
674 seq_printf(m, "Display IIR_RW:\t%08x\n",
675 I915_READ(VLV_IIR_RW));
676 seq_printf(m, "Display IMR:\t%08x\n",
677 I915_READ(VLV_IMR));
678 for_each_pipe(dev_priv, pipe)
679 seq_printf(m, "Pipe %c stat:\t%08x\n",
680 pipe_name(pipe),
681 I915_READ(PIPESTAT(pipe)));
682
683 seq_printf(m, "Port hotplug:\t%08x\n",
684 I915_READ(PORT_HOTPLUG_EN));
685 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
686 I915_READ(VLV_DPFLIPSTAT));
687 seq_printf(m, "DPINVGTT:\t%08x\n",
688 I915_READ(DPINVGTT));
689
690 for (i = 0; i < 4; i++) {
691 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
692 i, I915_READ(GEN8_GT_IMR(i)));
693 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
694 i, I915_READ(GEN8_GT_IIR(i)));
695 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
696 i, I915_READ(GEN8_GT_IER(i)));
697 }
698
699 seq_printf(m, "PCU interrupt mask:\t%08x\n",
700 I915_READ(GEN8_PCU_IMR));
701 seq_printf(m, "PCU interrupt identity:\t%08x\n",
702 I915_READ(GEN8_PCU_IIR));
703 seq_printf(m, "PCU interrupt enable:\t%08x\n",
704 I915_READ(GEN8_PCU_IER));
705 } else if (INTEL_INFO(dev)->gen >= 8) {
706 seq_printf(m, "Master Interrupt Control:\t%08x\n",
707 I915_READ(GEN8_MASTER_IRQ));
708
709 for (i = 0; i < 4; i++) {
710 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
711 i, I915_READ(GEN8_GT_IMR(i)));
712 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
713 i, I915_READ(GEN8_GT_IIR(i)));
714 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
715 i, I915_READ(GEN8_GT_IER(i)));
716 }
717
718 for_each_pipe(dev_priv, pipe) {
719 if (!intel_display_power_is_enabled(dev_priv,
720 POWER_DOMAIN_PIPE(pipe))) {
721 seq_printf(m, "Pipe %c power disabled\n",
722 pipe_name(pipe));
723 continue;
724 }
725 seq_printf(m, "Pipe %c IMR:\t%08x\n",
726 pipe_name(pipe),
727 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
728 seq_printf(m, "Pipe %c IIR:\t%08x\n",
729 pipe_name(pipe),
730 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
731 seq_printf(m, "Pipe %c IER:\t%08x\n",
732 pipe_name(pipe),
733 I915_READ(GEN8_DE_PIPE_IER(pipe)));
734 }
735
736 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
737 I915_READ(GEN8_DE_PORT_IMR));
738 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
739 I915_READ(GEN8_DE_PORT_IIR));
740 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
741 I915_READ(GEN8_DE_PORT_IER));
742
743 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
744 I915_READ(GEN8_DE_MISC_IMR));
745 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
746 I915_READ(GEN8_DE_MISC_IIR));
747 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
748 I915_READ(GEN8_DE_MISC_IER));
749
750 seq_printf(m, "PCU interrupt mask:\t%08x\n",
751 I915_READ(GEN8_PCU_IMR));
752 seq_printf(m, "PCU interrupt identity:\t%08x\n",
753 I915_READ(GEN8_PCU_IIR));
754 seq_printf(m, "PCU interrupt enable:\t%08x\n",
755 I915_READ(GEN8_PCU_IER));
756 } else if (IS_VALLEYVIEW(dev)) {
757 seq_printf(m, "Display IER:\t%08x\n",
758 I915_READ(VLV_IER));
759 seq_printf(m, "Display IIR:\t%08x\n",
760 I915_READ(VLV_IIR));
761 seq_printf(m, "Display IIR_RW:\t%08x\n",
762 I915_READ(VLV_IIR_RW));
763 seq_printf(m, "Display IMR:\t%08x\n",
764 I915_READ(VLV_IMR));
765 for_each_pipe(dev_priv, pipe)
766 seq_printf(m, "Pipe %c stat:\t%08x\n",
767 pipe_name(pipe),
768 I915_READ(PIPESTAT(pipe)));
769
770 seq_printf(m, "Master IER:\t%08x\n",
771 I915_READ(VLV_MASTER_IER));
772
773 seq_printf(m, "Render IER:\t%08x\n",
774 I915_READ(GTIER));
775 seq_printf(m, "Render IIR:\t%08x\n",
776 I915_READ(GTIIR));
777 seq_printf(m, "Render IMR:\t%08x\n",
778 I915_READ(GTIMR));
779
780 seq_printf(m, "PM IER:\t\t%08x\n",
781 I915_READ(GEN6_PMIER));
782 seq_printf(m, "PM IIR:\t\t%08x\n",
783 I915_READ(GEN6_PMIIR));
784 seq_printf(m, "PM IMR:\t\t%08x\n",
785 I915_READ(GEN6_PMIMR));
786
787 seq_printf(m, "Port hotplug:\t%08x\n",
788 I915_READ(PORT_HOTPLUG_EN));
789 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
790 I915_READ(VLV_DPFLIPSTAT));
791 seq_printf(m, "DPINVGTT:\t%08x\n",
792 I915_READ(DPINVGTT));
793
794 } else if (!HAS_PCH_SPLIT(dev)) {
795 seq_printf(m, "Interrupt enable: %08x\n",
796 I915_READ(IER));
797 seq_printf(m, "Interrupt identity: %08x\n",
798 I915_READ(IIR));
799 seq_printf(m, "Interrupt mask: %08x\n",
800 I915_READ(IMR));
801 for_each_pipe(dev_priv, pipe)
802 seq_printf(m, "Pipe %c stat: %08x\n",
803 pipe_name(pipe),
804 I915_READ(PIPESTAT(pipe)));
805 } else {
806 seq_printf(m, "North Display Interrupt enable: %08x\n",
807 I915_READ(DEIER));
808 seq_printf(m, "North Display Interrupt identity: %08x\n",
809 I915_READ(DEIIR));
810 seq_printf(m, "North Display Interrupt mask: %08x\n",
811 I915_READ(DEIMR));
812 seq_printf(m, "South Display Interrupt enable: %08x\n",
813 I915_READ(SDEIER));
814 seq_printf(m, "South Display Interrupt identity: %08x\n",
815 I915_READ(SDEIIR));
816 seq_printf(m, "South Display Interrupt mask: %08x\n",
817 I915_READ(SDEIMR));
818 seq_printf(m, "Graphics Interrupt enable: %08x\n",
819 I915_READ(GTIER));
820 seq_printf(m, "Graphics Interrupt identity: %08x\n",
821 I915_READ(GTIIR));
822 seq_printf(m, "Graphics Interrupt mask: %08x\n",
823 I915_READ(GTIMR));
824 }
825 for_each_ring(ring, dev_priv, i) {
826 if (INTEL_INFO(dev)->gen >= 6) {
827 seq_printf(m,
828 "Graphics Interrupt mask (%s): %08x\n",
829 ring->name, I915_READ_IMR(ring));
830 }
831 i915_ring_seqno_info(m, ring);
832 }
833 intel_runtime_pm_put(dev_priv);
834 mutex_unlock(&dev->struct_mutex);
835
836 return 0;
837 }
838
839 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
840 {
841 struct drm_info_node *node = m->private;
842 struct drm_device *dev = node->minor->dev;
843 struct drm_i915_private *dev_priv = dev->dev_private;
844 int i, ret;
845
846 ret = mutex_lock_interruptible(&dev->struct_mutex);
847 if (ret)
848 return ret;
849
850 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
851 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
852 for (i = 0; i < dev_priv->num_fence_regs; i++) {
853 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
854
855 seq_printf(m, "Fence %d, pin count = %d, object = ",
856 i, dev_priv->fence_regs[i].pin_count);
857 if (obj == NULL)
858 seq_puts(m, "unused");
859 else
860 describe_obj(m, obj);
861 seq_putc(m, '\n');
862 }
863
864 mutex_unlock(&dev->struct_mutex);
865 return 0;
866 }
867
868 static int i915_hws_info(struct seq_file *m, void *data)
869 {
870 struct drm_info_node *node = m->private;
871 struct drm_device *dev = node->minor->dev;
872 struct drm_i915_private *dev_priv = dev->dev_private;
873 struct intel_engine_cs *ring;
874 const u32 *hws;
875 int i;
876
877 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
878 hws = ring->status_page.page_addr;
879 if (hws == NULL)
880 return 0;
881
882 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
883 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
884 i * 4,
885 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
886 }
887 return 0;
888 }
889
890 static ssize_t
891 i915_error_state_write(struct file *filp,
892 const char __user *ubuf,
893 size_t cnt,
894 loff_t *ppos)
895 {
896 struct i915_error_state_file_priv *error_priv = filp->private_data;
897 struct drm_device *dev = error_priv->dev;
898 int ret;
899
900 DRM_DEBUG_DRIVER("Resetting error state\n");
901
902 ret = mutex_lock_interruptible(&dev->struct_mutex);
903 if (ret)
904 return ret;
905
906 i915_destroy_error_state(dev);
907 mutex_unlock(&dev->struct_mutex);
908
909 return cnt;
910 }
911
912 static int i915_error_state_open(struct inode *inode, struct file *file)
913 {
914 struct drm_device *dev = inode->i_private;
915 struct i915_error_state_file_priv *error_priv;
916
917 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
918 if (!error_priv)
919 return -ENOMEM;
920
921 error_priv->dev = dev;
922
923 i915_error_state_get(dev, error_priv);
924
925 file->private_data = error_priv;
926
927 return 0;
928 }
929
930 static int i915_error_state_release(struct inode *inode, struct file *file)
931 {
932 struct i915_error_state_file_priv *error_priv = file->private_data;
933
934 i915_error_state_put(error_priv);
935 kfree(error_priv);
936
937 return 0;
938 }
939
940 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
941 size_t count, loff_t *pos)
942 {
943 struct i915_error_state_file_priv *error_priv = file->private_data;
944 struct drm_i915_error_state_buf error_str;
945 loff_t tmp_pos = 0;
946 ssize_t ret_count = 0;
947 int ret;
948
949 ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
950 if (ret)
951 return ret;
952
953 ret = i915_error_state_to_str(&error_str, error_priv);
954 if (ret)
955 goto out;
956
957 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
958 error_str.buf,
959 error_str.bytes);
960
961 if (ret_count < 0)
962 ret = ret_count;
963 else
964 *pos = error_str.start + ret_count;
965 out:
966 i915_error_state_buf_release(&error_str);
967 return ret ?: ret_count;
968 }
969
970 static const struct file_operations i915_error_state_fops = {
971 .owner = THIS_MODULE,
972 .open = i915_error_state_open,
973 .read = i915_error_state_read,
974 .write = i915_error_state_write,
975 .llseek = default_llseek,
976 .release = i915_error_state_release,
977 };
978
979 static int
980 i915_next_seqno_get(void *data, u64 *val)
981 {
982 struct drm_device *dev = data;
983 struct drm_i915_private *dev_priv = dev->dev_private;
984 int ret;
985
986 ret = mutex_lock_interruptible(&dev->struct_mutex);
987 if (ret)
988 return ret;
989
990 *val = dev_priv->next_seqno;
991 mutex_unlock(&dev->struct_mutex);
992
993 return 0;
994 }
995
996 static int
997 i915_next_seqno_set(void *data, u64 val)
998 {
999 struct drm_device *dev = data;
1000 int ret;
1001
1002 ret = mutex_lock_interruptible(&dev->struct_mutex);
1003 if (ret)
1004 return ret;
1005
1006 ret = i915_gem_set_seqno(dev, val);
1007 mutex_unlock(&dev->struct_mutex);
1008
1009 return ret;
1010 }
1011
1012 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1013 i915_next_seqno_get, i915_next_seqno_set,
1014 "0x%llx\n");
1015
1016 static int i915_frequency_info(struct seq_file *m, void *unused)
1017 {
1018 struct drm_info_node *node = m->private;
1019 struct drm_device *dev = node->minor->dev;
1020 struct drm_i915_private *dev_priv = dev->dev_private;
1021 int ret = 0;
1022
1023 intel_runtime_pm_get(dev_priv);
1024
1025 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1026
1027 if (IS_GEN5(dev)) {
1028 u16 rgvswctl = I915_READ16(MEMSWCTL);
1029 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1030
1031 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1032 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1033 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1034 MEMSTAT_VID_SHIFT);
1035 seq_printf(m, "Current P-state: %d\n",
1036 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1037 } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
1038 IS_BROADWELL(dev)) {
1039 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1040 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1041 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1042 u32 rpmodectl, rpinclimit, rpdeclimit;
1043 u32 rpstat, cagf, reqf;
1044 u32 rpupei, rpcurup, rpprevup;
1045 u32 rpdownei, rpcurdown, rpprevdown;
1046 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1047 int max_freq;
1048
1049 /* RPSTAT1 is in the GT power well */
1050 ret = mutex_lock_interruptible(&dev->struct_mutex);
1051 if (ret)
1052 goto out;
1053
1054 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
1055
1056 reqf = I915_READ(GEN6_RPNSWREQ);
1057 reqf &= ~GEN6_TURBO_DISABLE;
1058 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1059 reqf >>= 24;
1060 else
1061 reqf >>= 25;
1062 reqf *= GT_FREQUENCY_MULTIPLIER;
1063
1064 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1065 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1066 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1067
1068 rpstat = I915_READ(GEN6_RPSTAT1);
1069 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
1070 rpcurup = I915_READ(GEN6_RP_CUR_UP);
1071 rpprevup = I915_READ(GEN6_RP_PREV_UP);
1072 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
1073 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
1074 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
1075 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1076 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1077 else
1078 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1079 cagf *= GT_FREQUENCY_MULTIPLIER;
1080
1081 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
1082 mutex_unlock(&dev->struct_mutex);
1083
1084 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1085 pm_ier = I915_READ(GEN6_PMIER);
1086 pm_imr = I915_READ(GEN6_PMIMR);
1087 pm_isr = I915_READ(GEN6_PMISR);
1088 pm_iir = I915_READ(GEN6_PMIIR);
1089 pm_mask = I915_READ(GEN6_PMINTRMSK);
1090 } else {
1091 pm_ier = I915_READ(GEN8_GT_IER(2));
1092 pm_imr = I915_READ(GEN8_GT_IMR(2));
1093 pm_isr = I915_READ(GEN8_GT_ISR(2));
1094 pm_iir = I915_READ(GEN8_GT_IIR(2));
1095 pm_mask = I915_READ(GEN6_PMINTRMSK);
1096 }
1097 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1098 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1099 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1100 seq_printf(m, "Render p-state ratio: %d\n",
1101 (gt_perf_status & 0xff00) >> 8);
1102 seq_printf(m, "Render p-state VID: %d\n",
1103 gt_perf_status & 0xff);
1104 seq_printf(m, "Render p-state limit: %d\n",
1105 rp_state_limits & 0xff);
1106 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1107 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1108 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1109 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1110 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1111 seq_printf(m, "CAGF: %dMHz\n", cagf);
1112 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
1113 GEN6_CURICONT_MASK);
1114 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
1115 GEN6_CURBSYTAVG_MASK);
1116 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
1117 GEN6_CURBSYTAVG_MASK);
1118 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
1119 GEN6_CURIAVG_MASK);
1120 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
1121 GEN6_CURBSYTAVG_MASK);
1122 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
1123 GEN6_CURBSYTAVG_MASK);
1124
1125 max_freq = (rp_state_cap & 0xff0000) >> 16;
1126 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1127 max_freq * GT_FREQUENCY_MULTIPLIER);
1128
1129 max_freq = (rp_state_cap & 0xff00) >> 8;
1130 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1131 max_freq * GT_FREQUENCY_MULTIPLIER);
1132
1133 max_freq = rp_state_cap & 0xff;
1134 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1135 max_freq * GT_FREQUENCY_MULTIPLIER);
1136
1137 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1138 dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
1139 } else if (IS_VALLEYVIEW(dev)) {
1140 u32 freq_sts;
1141
1142 mutex_lock(&dev_priv->rps.hw_lock);
1143 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1144 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1145 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1146
1147 seq_printf(m, "max GPU freq: %d MHz\n",
1148 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1149
1150 seq_printf(m, "min GPU freq: %d MHz\n",
1151 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1152
1153 seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
1154 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1155
1156 seq_printf(m, "current GPU freq: %d MHz\n",
1157 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1158 mutex_unlock(&dev_priv->rps.hw_lock);
1159 } else {
1160 seq_puts(m, "no P-state info available\n");
1161 }
1162
1163 out:
1164 intel_runtime_pm_put(dev_priv);
1165 return ret;
1166 }
1167
1168 static int ironlake_drpc_info(struct seq_file *m)
1169 {
1170 struct drm_info_node *node = m->private;
1171 struct drm_device *dev = node->minor->dev;
1172 struct drm_i915_private *dev_priv = dev->dev_private;
1173 u32 rgvmodectl, rstdbyctl;
1174 u16 crstandvid;
1175 int ret;
1176
1177 ret = mutex_lock_interruptible(&dev->struct_mutex);
1178 if (ret)
1179 return ret;
1180 intel_runtime_pm_get(dev_priv);
1181
1182 rgvmodectl = I915_READ(MEMMODECTL);
1183 rstdbyctl = I915_READ(RSTDBYCTL);
1184 crstandvid = I915_READ16(CRSTANDVID);
1185
1186 intel_runtime_pm_put(dev_priv);
1187 mutex_unlock(&dev->struct_mutex);
1188
1189 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1190 "yes" : "no");
1191 seq_printf(m, "Boost freq: %d\n",
1192 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1193 MEMMODE_BOOST_FREQ_SHIFT);
1194 seq_printf(m, "HW control enabled: %s\n",
1195 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1196 seq_printf(m, "SW control enabled: %s\n",
1197 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1198 seq_printf(m, "Gated voltage change: %s\n",
1199 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1200 seq_printf(m, "Starting frequency: P%d\n",
1201 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1202 seq_printf(m, "Max P-state: P%d\n",
1203 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1204 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1205 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1206 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1207 seq_printf(m, "Render standby enabled: %s\n",
1208 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1209 seq_puts(m, "Current RS state: ");
1210 switch (rstdbyctl & RSX_STATUS_MASK) {
1211 case RSX_STATUS_ON:
1212 seq_puts(m, "on\n");
1213 break;
1214 case RSX_STATUS_RC1:
1215 seq_puts(m, "RC1\n");
1216 break;
1217 case RSX_STATUS_RC1E:
1218 seq_puts(m, "RC1E\n");
1219 break;
1220 case RSX_STATUS_RS1:
1221 seq_puts(m, "RS1\n");
1222 break;
1223 case RSX_STATUS_RS2:
1224 seq_puts(m, "RS2 (RC6)\n");
1225 break;
1226 case RSX_STATUS_RS3:
1227 seq_puts(m, "RC3 (RC6+)\n");
1228 break;
1229 default:
1230 seq_puts(m, "unknown\n");
1231 break;
1232 }
1233
1234 return 0;
1235 }
1236
1237 static int vlv_drpc_info(struct seq_file *m)
1238 {
1239
1240 struct drm_info_node *node = m->private;
1241 struct drm_device *dev = node->minor->dev;
1242 struct drm_i915_private *dev_priv = dev->dev_private;
1243 u32 rpmodectl1, rcctl1;
1244 unsigned fw_rendercount = 0, fw_mediacount = 0;
1245
1246 intel_runtime_pm_get(dev_priv);
1247
1248 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1249 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1250
1251 intel_runtime_pm_put(dev_priv);
1252
1253 seq_printf(m, "Video Turbo Mode: %s\n",
1254 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1255 seq_printf(m, "Turbo enabled: %s\n",
1256 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1257 seq_printf(m, "HW control enabled: %s\n",
1258 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1259 seq_printf(m, "SW control enabled: %s\n",
1260 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1261 GEN6_RP_MEDIA_SW_MODE));
1262 seq_printf(m, "RC6 Enabled: %s\n",
1263 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1264 GEN6_RC_CTL_EI_MODE(1))));
1265 seq_printf(m, "Render Power Well: %s\n",
1266 (I915_READ(VLV_GTLC_PW_STATUS) &
1267 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1268 seq_printf(m, "Media Power Well: %s\n",
1269 (I915_READ(VLV_GTLC_PW_STATUS) &
1270 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1271
1272 seq_printf(m, "Render RC6 residency since boot: %u\n",
1273 I915_READ(VLV_GT_RENDER_RC6));
1274 seq_printf(m, "Media RC6 residency since boot: %u\n",
1275 I915_READ(VLV_GT_MEDIA_RC6));
1276
1277 spin_lock_irq(&dev_priv->uncore.lock);
1278 fw_rendercount = dev_priv->uncore.fw_rendercount;
1279 fw_mediacount = dev_priv->uncore.fw_mediacount;
1280 spin_unlock_irq(&dev_priv->uncore.lock);
1281
1282 seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
1283 seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
1284
1285
1286 return 0;
1287 }
1288
1289
1290 static int gen6_drpc_info(struct seq_file *m)
1291 {
1292
1293 struct drm_info_node *node = m->private;
1294 struct drm_device *dev = node->minor->dev;
1295 struct drm_i915_private *dev_priv = dev->dev_private;
1296 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1297 unsigned forcewake_count;
1298 int count = 0, ret;
1299
1300 ret = mutex_lock_interruptible(&dev->struct_mutex);
1301 if (ret)
1302 return ret;
1303 intel_runtime_pm_get(dev_priv);
1304
1305 spin_lock_irq(&dev_priv->uncore.lock);
1306 forcewake_count = dev_priv->uncore.forcewake_count;
1307 spin_unlock_irq(&dev_priv->uncore.lock);
1308
1309 if (forcewake_count) {
1310 seq_puts(m, "RC information inaccurate because somebody "
1311 "holds a forcewake reference \n");
1312 } else {
1313 /* NB: we cannot use forcewake, else we read the wrong values */
1314 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1315 udelay(10);
1316 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1317 }
1318
1319 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1320 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1321
1322 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1323 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1324 mutex_unlock(&dev->struct_mutex);
1325 mutex_lock(&dev_priv->rps.hw_lock);
1326 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1327 mutex_unlock(&dev_priv->rps.hw_lock);
1328
1329 intel_runtime_pm_put(dev_priv);
1330
1331 seq_printf(m, "Video Turbo Mode: %s\n",
1332 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1333 seq_printf(m, "HW control enabled: %s\n",
1334 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1335 seq_printf(m, "SW control enabled: %s\n",
1336 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1337 GEN6_RP_MEDIA_SW_MODE));
1338 seq_printf(m, "RC1e Enabled: %s\n",
1339 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1340 seq_printf(m, "RC6 Enabled: %s\n",
1341 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1342 seq_printf(m, "Deep RC6 Enabled: %s\n",
1343 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1344 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1345 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1346 seq_puts(m, "Current RC state: ");
1347 switch (gt_core_status & GEN6_RCn_MASK) {
1348 case GEN6_RC0:
1349 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1350 seq_puts(m, "Core Power Down\n");
1351 else
1352 seq_puts(m, "on\n");
1353 break;
1354 case GEN6_RC3:
1355 seq_puts(m, "RC3\n");
1356 break;
1357 case GEN6_RC6:
1358 seq_puts(m, "RC6\n");
1359 break;
1360 case GEN6_RC7:
1361 seq_puts(m, "RC7\n");
1362 break;
1363 default:
1364 seq_puts(m, "Unknown\n");
1365 break;
1366 }
1367
1368 seq_printf(m, "Core Power Down: %s\n",
1369 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1370
1371 /* Not exactly sure what this is */
1372 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1373 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1374 seq_printf(m, "RC6 residency since boot: %u\n",
1375 I915_READ(GEN6_GT_GFX_RC6));
1376 seq_printf(m, "RC6+ residency since boot: %u\n",
1377 I915_READ(GEN6_GT_GFX_RC6p));
1378 seq_printf(m, "RC6++ residency since boot: %u\n",
1379 I915_READ(GEN6_GT_GFX_RC6pp));
1380
1381 seq_printf(m, "RC6 voltage: %dmV\n",
1382 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1383 seq_printf(m, "RC6+ voltage: %dmV\n",
1384 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1385 seq_printf(m, "RC6++ voltage: %dmV\n",
1386 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1387 return 0;
1388 }
1389
1390 static int i915_drpc_info(struct seq_file *m, void *unused)
1391 {
1392 struct drm_info_node *node = m->private;
1393 struct drm_device *dev = node->minor->dev;
1394
1395 if (IS_VALLEYVIEW(dev))
1396 return vlv_drpc_info(m);
1397 else if (INTEL_INFO(dev)->gen >= 6)
1398 return gen6_drpc_info(m);
1399 else
1400 return ironlake_drpc_info(m);
1401 }
1402
1403 static int i915_fbc_status(struct seq_file *m, void *unused)
1404 {
1405 struct drm_info_node *node = m->private;
1406 struct drm_device *dev = node->minor->dev;
1407 struct drm_i915_private *dev_priv = dev->dev_private;
1408
1409 if (!HAS_FBC(dev)) {
1410 seq_puts(m, "FBC unsupported on this chipset\n");
1411 return 0;
1412 }
1413
1414 intel_runtime_pm_get(dev_priv);
1415
1416 if (intel_fbc_enabled(dev)) {
1417 seq_puts(m, "FBC enabled\n");
1418 } else {
1419 seq_puts(m, "FBC disabled: ");
1420 switch (dev_priv->fbc.no_fbc_reason) {
1421 case FBC_OK:
1422 seq_puts(m, "FBC actived, but currently disabled in hardware");
1423 break;
1424 case FBC_UNSUPPORTED:
1425 seq_puts(m, "unsupported by this chipset");
1426 break;
1427 case FBC_NO_OUTPUT:
1428 seq_puts(m, "no outputs");
1429 break;
1430 case FBC_STOLEN_TOO_SMALL:
1431 seq_puts(m, "not enough stolen memory");
1432 break;
1433 case FBC_UNSUPPORTED_MODE:
1434 seq_puts(m, "mode not supported");
1435 break;
1436 case FBC_MODE_TOO_LARGE:
1437 seq_puts(m, "mode too large");
1438 break;
1439 case FBC_BAD_PLANE:
1440 seq_puts(m, "FBC unsupported on plane");
1441 break;
1442 case FBC_NOT_TILED:
1443 seq_puts(m, "scanout buffer not tiled");
1444 break;
1445 case FBC_MULTIPLE_PIPES:
1446 seq_puts(m, "multiple pipes are enabled");
1447 break;
1448 case FBC_MODULE_PARAM:
1449 seq_puts(m, "disabled per module param (default off)");
1450 break;
1451 case FBC_CHIP_DEFAULT:
1452 seq_puts(m, "disabled per chip default");
1453 break;
1454 default:
1455 seq_puts(m, "unknown reason");
1456 }
1457 seq_putc(m, '\n');
1458 }
1459
1460 intel_runtime_pm_put(dev_priv);
1461
1462 return 0;
1463 }
1464
1465 static int i915_fbc_fc_get(void *data, u64 *val)
1466 {
1467 struct drm_device *dev = data;
1468 struct drm_i915_private *dev_priv = dev->dev_private;
1469
1470 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1471 return -ENODEV;
1472
1473 drm_modeset_lock_all(dev);
1474 *val = dev_priv->fbc.false_color;
1475 drm_modeset_unlock_all(dev);
1476
1477 return 0;
1478 }
1479
1480 static int i915_fbc_fc_set(void *data, u64 val)
1481 {
1482 struct drm_device *dev = data;
1483 struct drm_i915_private *dev_priv = dev->dev_private;
1484 u32 reg;
1485
1486 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1487 return -ENODEV;
1488
1489 drm_modeset_lock_all(dev);
1490
1491 reg = I915_READ(ILK_DPFC_CONTROL);
1492 dev_priv->fbc.false_color = val;
1493
1494 I915_WRITE(ILK_DPFC_CONTROL, val ?
1495 (reg | FBC_CTL_FALSE_COLOR) :
1496 (reg & ~FBC_CTL_FALSE_COLOR));
1497
1498 drm_modeset_unlock_all(dev);
1499 return 0;
1500 }
1501
1502 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
1503 i915_fbc_fc_get, i915_fbc_fc_set,
1504 "%llu\n");
1505
1506 static int i915_ips_status(struct seq_file *m, void *unused)
1507 {
1508 struct drm_info_node *node = m->private;
1509 struct drm_device *dev = node->minor->dev;
1510 struct drm_i915_private *dev_priv = dev->dev_private;
1511
1512 if (!HAS_IPS(dev)) {
1513 seq_puts(m, "not supported\n");
1514 return 0;
1515 }
1516
1517 intel_runtime_pm_get(dev_priv);
1518
1519 seq_printf(m, "Enabled by kernel parameter: %s\n",
1520 yesno(i915.enable_ips));
1521
1522 if (INTEL_INFO(dev)->gen >= 8) {
1523 seq_puts(m, "Currently: unknown\n");
1524 } else {
1525 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1526 seq_puts(m, "Currently: enabled\n");
1527 else
1528 seq_puts(m, "Currently: disabled\n");
1529 }
1530
1531 intel_runtime_pm_put(dev_priv);
1532
1533 return 0;
1534 }
1535
1536 static int i915_sr_status(struct seq_file *m, void *unused)
1537 {
1538 struct drm_info_node *node = m->private;
1539 struct drm_device *dev = node->minor->dev;
1540 struct drm_i915_private *dev_priv = dev->dev_private;
1541 bool sr_enabled = false;
1542
1543 intel_runtime_pm_get(dev_priv);
1544
1545 if (HAS_PCH_SPLIT(dev))
1546 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1547 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1548 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1549 else if (IS_I915GM(dev))
1550 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1551 else if (IS_PINEVIEW(dev))
1552 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1553
1554 intel_runtime_pm_put(dev_priv);
1555
1556 seq_printf(m, "self-refresh: %s\n",
1557 sr_enabled ? "enabled" : "disabled");
1558
1559 return 0;
1560 }
1561
1562 static int i915_emon_status(struct seq_file *m, void *unused)
1563 {
1564 struct drm_info_node *node = m->private;
1565 struct drm_device *dev = node->minor->dev;
1566 struct drm_i915_private *dev_priv = dev->dev_private;
1567 unsigned long temp, chipset, gfx;
1568 int ret;
1569
1570 if (!IS_GEN5(dev))
1571 return -ENODEV;
1572
1573 ret = mutex_lock_interruptible(&dev->struct_mutex);
1574 if (ret)
1575 return ret;
1576
1577 temp = i915_mch_val(dev_priv);
1578 chipset = i915_chipset_val(dev_priv);
1579 gfx = i915_gfx_val(dev_priv);
1580 mutex_unlock(&dev->struct_mutex);
1581
1582 seq_printf(m, "GMCH temp: %ld\n", temp);
1583 seq_printf(m, "Chipset power: %ld\n", chipset);
1584 seq_printf(m, "GFX power: %ld\n", gfx);
1585 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1586
1587 return 0;
1588 }
1589
1590 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1591 {
1592 struct drm_info_node *node = m->private;
1593 struct drm_device *dev = node->minor->dev;
1594 struct drm_i915_private *dev_priv = dev->dev_private;
1595 int ret = 0;
1596 int gpu_freq, ia_freq;
1597
1598 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1599 seq_puts(m, "unsupported on this chipset\n");
1600 return 0;
1601 }
1602
1603 intel_runtime_pm_get(dev_priv);
1604
1605 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1606
1607 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1608 if (ret)
1609 goto out;
1610
1611 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1612
1613 for (gpu_freq = dev_priv->rps.min_freq_softlimit;
1614 gpu_freq <= dev_priv->rps.max_freq_softlimit;
1615 gpu_freq++) {
1616 ia_freq = gpu_freq;
1617 sandybridge_pcode_read(dev_priv,
1618 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1619 &ia_freq);
1620 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1621 gpu_freq * GT_FREQUENCY_MULTIPLIER,
1622 ((ia_freq >> 0) & 0xff) * 100,
1623 ((ia_freq >> 8) & 0xff) * 100);
1624 }
1625
1626 mutex_unlock(&dev_priv->rps.hw_lock);
1627
1628 out:
1629 intel_runtime_pm_put(dev_priv);
1630 return ret;
1631 }
1632
1633 static int i915_opregion(struct seq_file *m, void *unused)
1634 {
1635 struct drm_info_node *node = m->private;
1636 struct drm_device *dev = node->minor->dev;
1637 struct drm_i915_private *dev_priv = dev->dev_private;
1638 struct intel_opregion *opregion = &dev_priv->opregion;
1639 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1640 int ret;
1641
1642 if (data == NULL)
1643 return -ENOMEM;
1644
1645 ret = mutex_lock_interruptible(&dev->struct_mutex);
1646 if (ret)
1647 goto out;
1648
1649 if (opregion->header) {
1650 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1651 seq_write(m, data, OPREGION_SIZE);
1652 }
1653
1654 mutex_unlock(&dev->struct_mutex);
1655
1656 out:
1657 kfree(data);
1658 return 0;
1659 }
1660
1661 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1662 {
1663 struct drm_info_node *node = m->private;
1664 struct drm_device *dev = node->minor->dev;
1665 struct intel_fbdev *ifbdev = NULL;
1666 struct intel_framebuffer *fb;
1667
1668 #ifdef CONFIG_DRM_I915_FBDEV
1669 struct drm_i915_private *dev_priv = dev->dev_private;
1670
1671 ifbdev = dev_priv->fbdev;
1672 fb = to_intel_framebuffer(ifbdev->helper.fb);
1673
1674 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1675 fb->base.width,
1676 fb->base.height,
1677 fb->base.depth,
1678 fb->base.bits_per_pixel,
1679 atomic_read(&fb->base.refcount.refcount));
1680 describe_obj(m, fb->obj);
1681 seq_putc(m, '\n');
1682 #endif
1683
1684 mutex_lock(&dev->mode_config.fb_lock);
1685 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1686 if (ifbdev && &fb->base == ifbdev->helper.fb)
1687 continue;
1688
1689 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1690 fb->base.width,
1691 fb->base.height,
1692 fb->base.depth,
1693 fb->base.bits_per_pixel,
1694 atomic_read(&fb->base.refcount.refcount));
1695 describe_obj(m, fb->obj);
1696 seq_putc(m, '\n');
1697 }
1698 mutex_unlock(&dev->mode_config.fb_lock);
1699
1700 return 0;
1701 }
1702
1703 static void describe_ctx_ringbuf(struct seq_file *m,
1704 struct intel_ringbuffer *ringbuf)
1705 {
1706 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1707 ringbuf->space, ringbuf->head, ringbuf->tail,
1708 ringbuf->last_retired_head);
1709 }
1710
1711 static int i915_context_status(struct seq_file *m, void *unused)
1712 {
1713 struct drm_info_node *node = m->private;
1714 struct drm_device *dev = node->minor->dev;
1715 struct drm_i915_private *dev_priv = dev->dev_private;
1716 struct intel_engine_cs *ring;
1717 struct intel_context *ctx;
1718 int ret, i;
1719
1720 ret = mutex_lock_interruptible(&dev->struct_mutex);
1721 if (ret)
1722 return ret;
1723
1724 if (dev_priv->ips.pwrctx) {
1725 seq_puts(m, "power context ");
1726 describe_obj(m, dev_priv->ips.pwrctx);
1727 seq_putc(m, '\n');
1728 }
1729
1730 if (dev_priv->ips.renderctx) {
1731 seq_puts(m, "render context ");
1732 describe_obj(m, dev_priv->ips.renderctx);
1733 seq_putc(m, '\n');
1734 }
1735
1736 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1737 if (!i915.enable_execlists &&
1738 ctx->legacy_hw_ctx.rcs_state == NULL)
1739 continue;
1740
1741 seq_puts(m, "HW context ");
1742 describe_ctx(m, ctx);
1743 for_each_ring(ring, dev_priv, i) {
1744 if (ring->default_context == ctx)
1745 seq_printf(m, "(default context %s) ",
1746 ring->name);
1747 }
1748
1749 if (i915.enable_execlists) {
1750 seq_putc(m, '\n');
1751 for_each_ring(ring, dev_priv, i) {
1752 struct drm_i915_gem_object *ctx_obj =
1753 ctx->engine[i].state;
1754 struct intel_ringbuffer *ringbuf =
1755 ctx->engine[i].ringbuf;
1756
1757 seq_printf(m, "%s: ", ring->name);
1758 if (ctx_obj)
1759 describe_obj(m, ctx_obj);
1760 if (ringbuf)
1761 describe_ctx_ringbuf(m, ringbuf);
1762 seq_putc(m, '\n');
1763 }
1764 } else {
1765 describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1766 }
1767
1768 seq_putc(m, '\n');
1769 }
1770
1771 mutex_unlock(&dev->struct_mutex);
1772
1773 return 0;
1774 }
1775
1776 static int i915_dump_lrc(struct seq_file *m, void *unused)
1777 {
1778 struct drm_info_node *node = (struct drm_info_node *) m->private;
1779 struct drm_device *dev = node->minor->dev;
1780 struct drm_i915_private *dev_priv = dev->dev_private;
1781 struct intel_engine_cs *ring;
1782 struct intel_context *ctx;
1783 int ret, i;
1784
1785 if (!i915.enable_execlists) {
1786 seq_printf(m, "Logical Ring Contexts are disabled\n");
1787 return 0;
1788 }
1789
1790 ret = mutex_lock_interruptible(&dev->struct_mutex);
1791 if (ret)
1792 return ret;
1793
1794 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1795 for_each_ring(ring, dev_priv, i) {
1796 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
1797
1798 if (ring->default_context == ctx)
1799 continue;
1800
1801 if (ctx_obj) {
1802 struct page *page = i915_gem_object_get_page(ctx_obj, 1);
1803 uint32_t *reg_state = kmap_atomic(page);
1804 int j;
1805
1806 seq_printf(m, "CONTEXT: %s %u\n", ring->name,
1807 intel_execlists_ctx_id(ctx_obj));
1808
1809 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
1810 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1811 i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
1812 reg_state[j], reg_state[j + 1],
1813 reg_state[j + 2], reg_state[j + 3]);
1814 }
1815 kunmap_atomic(reg_state);
1816
1817 seq_putc(m, '\n');
1818 }
1819 }
1820 }
1821
1822 mutex_unlock(&dev->struct_mutex);
1823
1824 return 0;
1825 }
1826
1827 static int i915_execlists(struct seq_file *m, void *data)
1828 {
1829 struct drm_info_node *node = (struct drm_info_node *)m->private;
1830 struct drm_device *dev = node->minor->dev;
1831 struct drm_i915_private *dev_priv = dev->dev_private;
1832 struct intel_engine_cs *ring;
1833 u32 status_pointer;
1834 u8 read_pointer;
1835 u8 write_pointer;
1836 u32 status;
1837 u32 ctx_id;
1838 struct list_head *cursor;
1839 int ring_id, i;
1840 int ret;
1841
1842 if (!i915.enable_execlists) {
1843 seq_puts(m, "Logical Ring Contexts are disabled\n");
1844 return 0;
1845 }
1846
1847 ret = mutex_lock_interruptible(&dev->struct_mutex);
1848 if (ret)
1849 return ret;
1850
1851 intel_runtime_pm_get(dev_priv);
1852
1853 for_each_ring(ring, dev_priv, ring_id) {
1854 struct intel_ctx_submit_request *head_req = NULL;
1855 int count = 0;
1856 unsigned long flags;
1857
1858 seq_printf(m, "%s\n", ring->name);
1859
1860 status = I915_READ(RING_EXECLIST_STATUS(ring));
1861 ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4);
1862 seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
1863 status, ctx_id);
1864
1865 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
1866 seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
1867
1868 read_pointer = ring->next_context_status_buffer;
1869 write_pointer = status_pointer & 0x07;
1870 if (read_pointer > write_pointer)
1871 write_pointer += 6;
1872 seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
1873 read_pointer, write_pointer);
1874
1875 for (i = 0; i < 6; i++) {
1876 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i);
1877 ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4);
1878
1879 seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
1880 i, status, ctx_id);
1881 }
1882
1883 spin_lock_irqsave(&ring->execlist_lock, flags);
1884 list_for_each(cursor, &ring->execlist_queue)
1885 count++;
1886 head_req = list_first_entry_or_null(&ring->execlist_queue,
1887 struct intel_ctx_submit_request, execlist_link);
1888 spin_unlock_irqrestore(&ring->execlist_lock, flags);
1889
1890 seq_printf(m, "\t%d requests in queue\n", count);
1891 if (head_req) {
1892 struct drm_i915_gem_object *ctx_obj;
1893
1894 ctx_obj = head_req->ctx->engine[ring_id].state;
1895 seq_printf(m, "\tHead request id: %u\n",
1896 intel_execlists_ctx_id(ctx_obj));
1897 seq_printf(m, "\tHead request tail: %u\n",
1898 head_req->tail);
1899 }
1900
1901 seq_putc(m, '\n');
1902 }
1903
1904 intel_runtime_pm_put(dev_priv);
1905 mutex_unlock(&dev->struct_mutex);
1906
1907 return 0;
1908 }
1909
1910 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1911 {
1912 struct drm_info_node *node = m->private;
1913 struct drm_device *dev = node->minor->dev;
1914 struct drm_i915_private *dev_priv = dev->dev_private;
1915 unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
1916
1917 spin_lock_irq(&dev_priv->uncore.lock);
1918 if (IS_VALLEYVIEW(dev)) {
1919 fw_rendercount = dev_priv->uncore.fw_rendercount;
1920 fw_mediacount = dev_priv->uncore.fw_mediacount;
1921 } else
1922 forcewake_count = dev_priv->uncore.forcewake_count;
1923 spin_unlock_irq(&dev_priv->uncore.lock);
1924
1925 if (IS_VALLEYVIEW(dev)) {
1926 seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
1927 seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
1928 } else
1929 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1930
1931 return 0;
1932 }
1933
1934 static const char *swizzle_string(unsigned swizzle)
1935 {
1936 switch (swizzle) {
1937 case I915_BIT_6_SWIZZLE_NONE:
1938 return "none";
1939 case I915_BIT_6_SWIZZLE_9:
1940 return "bit9";
1941 case I915_BIT_6_SWIZZLE_9_10:
1942 return "bit9/bit10";
1943 case I915_BIT_6_SWIZZLE_9_11:
1944 return "bit9/bit11";
1945 case I915_BIT_6_SWIZZLE_9_10_11:
1946 return "bit9/bit10/bit11";
1947 case I915_BIT_6_SWIZZLE_9_17:
1948 return "bit9/bit17";
1949 case I915_BIT_6_SWIZZLE_9_10_17:
1950 return "bit9/bit10/bit17";
1951 case I915_BIT_6_SWIZZLE_UNKNOWN:
1952 return "unknown";
1953 }
1954
1955 return "bug";
1956 }
1957
1958 static int i915_swizzle_info(struct seq_file *m, void *data)
1959 {
1960 struct drm_info_node *node = m->private;
1961 struct drm_device *dev = node->minor->dev;
1962 struct drm_i915_private *dev_priv = dev->dev_private;
1963 int ret;
1964
1965 ret = mutex_lock_interruptible(&dev->struct_mutex);
1966 if (ret)
1967 return ret;
1968 intel_runtime_pm_get(dev_priv);
1969
1970 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1971 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1972 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1973 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1974
1975 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1976 seq_printf(m, "DDC = 0x%08x\n",
1977 I915_READ(DCC));
1978 seq_printf(m, "C0DRB3 = 0x%04x\n",
1979 I915_READ16(C0DRB3));
1980 seq_printf(m, "C1DRB3 = 0x%04x\n",
1981 I915_READ16(C1DRB3));
1982 } else if (INTEL_INFO(dev)->gen >= 6) {
1983 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1984 I915_READ(MAD_DIMM_C0));
1985 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1986 I915_READ(MAD_DIMM_C1));
1987 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1988 I915_READ(MAD_DIMM_C2));
1989 seq_printf(m, "TILECTL = 0x%08x\n",
1990 I915_READ(TILECTL));
1991 if (INTEL_INFO(dev)->gen >= 8)
1992 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1993 I915_READ(GAMTARBMODE));
1994 else
1995 seq_printf(m, "ARB_MODE = 0x%08x\n",
1996 I915_READ(ARB_MODE));
1997 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1998 I915_READ(DISP_ARB_CTL));
1999 }
2000 intel_runtime_pm_put(dev_priv);
2001 mutex_unlock(&dev->struct_mutex);
2002
2003 return 0;
2004 }
2005
2006 static int per_file_ctx(int id, void *ptr, void *data)
2007 {
2008 struct intel_context *ctx = ptr;
2009 struct seq_file *m = data;
2010 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2011
2012 if (!ppgtt) {
2013 seq_printf(m, " no ppgtt for context %d\n",
2014 ctx->user_handle);
2015 return 0;
2016 }
2017
2018 if (i915_gem_context_is_default(ctx))
2019 seq_puts(m, " default context:\n");
2020 else
2021 seq_printf(m, " context %d:\n", ctx->user_handle);
2022 ppgtt->debug_dump(ppgtt, m);
2023
2024 return 0;
2025 }
2026
2027 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2028 {
2029 struct drm_i915_private *dev_priv = dev->dev_private;
2030 struct intel_engine_cs *ring;
2031 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2032 int unused, i;
2033
2034 if (!ppgtt)
2035 return;
2036
2037 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
2038 seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
2039 for_each_ring(ring, dev_priv, unused) {
2040 seq_printf(m, "%s\n", ring->name);
2041 for (i = 0; i < 4; i++) {
2042 u32 offset = 0x270 + i * 8;
2043 u64 pdp = I915_READ(ring->mmio_base + offset + 4);
2044 pdp <<= 32;
2045 pdp |= I915_READ(ring->mmio_base + offset);
2046 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2047 }
2048 }
2049 }
2050
2051 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2052 {
2053 struct drm_i915_private *dev_priv = dev->dev_private;
2054 struct intel_engine_cs *ring;
2055 struct drm_file *file;
2056 int i;
2057
2058 if (INTEL_INFO(dev)->gen == 6)
2059 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2060
2061 for_each_ring(ring, dev_priv, i) {
2062 seq_printf(m, "%s\n", ring->name);
2063 if (INTEL_INFO(dev)->gen == 7)
2064 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
2065 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
2066 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
2067 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
2068 }
2069 if (dev_priv->mm.aliasing_ppgtt) {
2070 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2071
2072 seq_puts(m, "aliasing PPGTT:\n");
2073 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
2074
2075 ppgtt->debug_dump(ppgtt, m);
2076 }
2077
2078 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2079 struct drm_i915_file_private *file_priv = file->driver_priv;
2080
2081 seq_printf(m, "proc: %s\n",
2082 get_pid_task(file->pid, PIDTYPE_PID)->comm);
2083 idr_for_each(&file_priv->context_idr, per_file_ctx, m);
2084 }
2085 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2086 }
2087
2088 static int i915_ppgtt_info(struct seq_file *m, void *data)
2089 {
2090 struct drm_info_node *node = m->private;
2091 struct drm_device *dev = node->minor->dev;
2092 struct drm_i915_private *dev_priv = dev->dev_private;
2093
2094 int ret = mutex_lock_interruptible(&dev->struct_mutex);
2095 if (ret)
2096 return ret;
2097 intel_runtime_pm_get(dev_priv);
2098
2099 if (INTEL_INFO(dev)->gen >= 8)
2100 gen8_ppgtt_info(m, dev);
2101 else if (INTEL_INFO(dev)->gen >= 6)
2102 gen6_ppgtt_info(m, dev);
2103
2104 intel_runtime_pm_put(dev_priv);
2105 mutex_unlock(&dev->struct_mutex);
2106
2107 return 0;
2108 }
2109
2110 static int i915_llc(struct seq_file *m, void *data)
2111 {
2112 struct drm_info_node *node = m->private;
2113 struct drm_device *dev = node->minor->dev;
2114 struct drm_i915_private *dev_priv = dev->dev_private;
2115
2116 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
2117 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
2118 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
2119
2120 return 0;
2121 }
2122
2123 static int i915_edp_psr_status(struct seq_file *m, void *data)
2124 {
2125 struct drm_info_node *node = m->private;
2126 struct drm_device *dev = node->minor->dev;
2127 struct drm_i915_private *dev_priv = dev->dev_private;
2128 u32 psrperf = 0;
2129 bool enabled = false;
2130
2131 intel_runtime_pm_get(dev_priv);
2132
2133 mutex_lock(&dev_priv->psr.lock);
2134 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2135 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2136 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2137 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2138 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2139 dev_priv->psr.busy_frontbuffer_bits);
2140 seq_printf(m, "Re-enable work scheduled: %s\n",
2141 yesno(work_busy(&dev_priv->psr.work.work)));
2142
2143 enabled = HAS_PSR(dev) &&
2144 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
2145 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
2146
2147 if (HAS_PSR(dev))
2148 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
2149 EDP_PSR_PERF_CNT_MASK;
2150 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2151 mutex_unlock(&dev_priv->psr.lock);
2152
2153 intel_runtime_pm_put(dev_priv);
2154 return 0;
2155 }
2156
2157 static int i915_sink_crc(struct seq_file *m, void *data)
2158 {
2159 struct drm_info_node *node = m->private;
2160 struct drm_device *dev = node->minor->dev;
2161 struct intel_encoder *encoder;
2162 struct intel_connector *connector;
2163 struct intel_dp *intel_dp = NULL;
2164 int ret;
2165 u8 crc[6];
2166
2167 drm_modeset_lock_all(dev);
2168 list_for_each_entry(connector, &dev->mode_config.connector_list,
2169 base.head) {
2170
2171 if (connector->base.dpms != DRM_MODE_DPMS_ON)
2172 continue;
2173
2174 if (!connector->base.encoder)
2175 continue;
2176
2177 encoder = to_intel_encoder(connector->base.encoder);
2178 if (encoder->type != INTEL_OUTPUT_EDP)
2179 continue;
2180
2181 intel_dp = enc_to_intel_dp(&encoder->base);
2182
2183 ret = intel_dp_sink_crc(intel_dp, crc);
2184 if (ret)
2185 goto out;
2186
2187 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2188 crc[0], crc[1], crc[2],
2189 crc[3], crc[4], crc[5]);
2190 goto out;
2191 }
2192 ret = -ENODEV;
2193 out:
2194 drm_modeset_unlock_all(dev);
2195 return ret;
2196 }
2197
2198 static int i915_energy_uJ(struct seq_file *m, void *data)
2199 {
2200 struct drm_info_node *node = m->private;
2201 struct drm_device *dev = node->minor->dev;
2202 struct drm_i915_private *dev_priv = dev->dev_private;
2203 u64 power;
2204 u32 units;
2205
2206 if (INTEL_INFO(dev)->gen < 6)
2207 return -ENODEV;
2208
2209 intel_runtime_pm_get(dev_priv);
2210
2211 rdmsrl(MSR_RAPL_POWER_UNIT, power);
2212 power = (power & 0x1f00) >> 8;
2213 units = 1000000 / (1 << power); /* convert to uJ */
2214 power = I915_READ(MCH_SECP_NRG_STTS);
2215 power *= units;
2216
2217 intel_runtime_pm_put(dev_priv);
2218
2219 seq_printf(m, "%llu", (long long unsigned)power);
2220
2221 return 0;
2222 }
2223
2224 static int i915_pc8_status(struct seq_file *m, void *unused)
2225 {
2226 struct drm_info_node *node = m->private;
2227 struct drm_device *dev = node->minor->dev;
2228 struct drm_i915_private *dev_priv = dev->dev_private;
2229
2230 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2231 seq_puts(m, "not supported\n");
2232 return 0;
2233 }
2234
2235 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
2236 seq_printf(m, "IRQs disabled: %s\n",
2237 yesno(!intel_irqs_enabled(dev_priv)));
2238
2239 return 0;
2240 }
2241
2242 static const char *power_domain_str(enum intel_display_power_domain domain)
2243 {
2244 switch (domain) {
2245 case POWER_DOMAIN_PIPE_A:
2246 return "PIPE_A";
2247 case POWER_DOMAIN_PIPE_B:
2248 return "PIPE_B";
2249 case POWER_DOMAIN_PIPE_C:
2250 return "PIPE_C";
2251 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
2252 return "PIPE_A_PANEL_FITTER";
2253 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
2254 return "PIPE_B_PANEL_FITTER";
2255 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
2256 return "PIPE_C_PANEL_FITTER";
2257 case POWER_DOMAIN_TRANSCODER_A:
2258 return "TRANSCODER_A";
2259 case POWER_DOMAIN_TRANSCODER_B:
2260 return "TRANSCODER_B";
2261 case POWER_DOMAIN_TRANSCODER_C:
2262 return "TRANSCODER_C";
2263 case POWER_DOMAIN_TRANSCODER_EDP:
2264 return "TRANSCODER_EDP";
2265 case POWER_DOMAIN_PORT_DDI_A_2_LANES:
2266 return "PORT_DDI_A_2_LANES";
2267 case POWER_DOMAIN_PORT_DDI_A_4_LANES:
2268 return "PORT_DDI_A_4_LANES";
2269 case POWER_DOMAIN_PORT_DDI_B_2_LANES:
2270 return "PORT_DDI_B_2_LANES";
2271 case POWER_DOMAIN_PORT_DDI_B_4_LANES:
2272 return "PORT_DDI_B_4_LANES";
2273 case POWER_DOMAIN_PORT_DDI_C_2_LANES:
2274 return "PORT_DDI_C_2_LANES";
2275 case POWER_DOMAIN_PORT_DDI_C_4_LANES:
2276 return "PORT_DDI_C_4_LANES";
2277 case POWER_DOMAIN_PORT_DDI_D_2_LANES:
2278 return "PORT_DDI_D_2_LANES";
2279 case POWER_DOMAIN_PORT_DDI_D_4_LANES:
2280 return "PORT_DDI_D_4_LANES";
2281 case POWER_DOMAIN_PORT_DSI:
2282 return "PORT_DSI";
2283 case POWER_DOMAIN_PORT_CRT:
2284 return "PORT_CRT";
2285 case POWER_DOMAIN_PORT_OTHER:
2286 return "PORT_OTHER";
2287 case POWER_DOMAIN_VGA:
2288 return "VGA";
2289 case POWER_DOMAIN_AUDIO:
2290 return "AUDIO";
2291 case POWER_DOMAIN_PLLS:
2292 return "PLLS";
2293 case POWER_DOMAIN_INIT:
2294 return "INIT";
2295 default:
2296 WARN_ON(1);
2297 return "?";
2298 }
2299 }
2300
2301 static int i915_power_domain_info(struct seq_file *m, void *unused)
2302 {
2303 struct drm_info_node *node = m->private;
2304 struct drm_device *dev = node->minor->dev;
2305 struct drm_i915_private *dev_priv = dev->dev_private;
2306 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2307 int i;
2308
2309 mutex_lock(&power_domains->lock);
2310
2311 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2312 for (i = 0; i < power_domains->power_well_count; i++) {
2313 struct i915_power_well *power_well;
2314 enum intel_display_power_domain power_domain;
2315
2316 power_well = &power_domains->power_wells[i];
2317 seq_printf(m, "%-25s %d\n", power_well->name,
2318 power_well->count);
2319
2320 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2321 power_domain++) {
2322 if (!(BIT(power_domain) & power_well->domains))
2323 continue;
2324
2325 seq_printf(m, " %-23s %d\n",
2326 power_domain_str(power_domain),
2327 power_domains->domain_use_count[power_domain]);
2328 }
2329 }
2330
2331 mutex_unlock(&power_domains->lock);
2332
2333 return 0;
2334 }
2335
2336 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2337 struct drm_display_mode *mode)
2338 {
2339 int i;
2340
2341 for (i = 0; i < tabs; i++)
2342 seq_putc(m, '\t');
2343
2344 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2345 mode->base.id, mode->name,
2346 mode->vrefresh, mode->clock,
2347 mode->hdisplay, mode->hsync_start,
2348 mode->hsync_end, mode->htotal,
2349 mode->vdisplay, mode->vsync_start,
2350 mode->vsync_end, mode->vtotal,
2351 mode->type, mode->flags);
2352 }
2353
2354 static void intel_encoder_info(struct seq_file *m,
2355 struct intel_crtc *intel_crtc,
2356 struct intel_encoder *intel_encoder)
2357 {
2358 struct drm_info_node *node = m->private;
2359 struct drm_device *dev = node->minor->dev;
2360 struct drm_crtc *crtc = &intel_crtc->base;
2361 struct intel_connector *intel_connector;
2362 struct drm_encoder *encoder;
2363
2364 encoder = &intel_encoder->base;
2365 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2366 encoder->base.id, encoder->name);
2367 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2368 struct drm_connector *connector = &intel_connector->base;
2369 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2370 connector->base.id,
2371 connector->name,
2372 drm_get_connector_status_name(connector->status));
2373 if (connector->status == connector_status_connected) {
2374 struct drm_display_mode *mode = &crtc->mode;
2375 seq_printf(m, ", mode:\n");
2376 intel_seq_print_mode(m, 2, mode);
2377 } else {
2378 seq_putc(m, '\n');
2379 }
2380 }
2381 }
2382
2383 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2384 {
2385 struct drm_info_node *node = m->private;
2386 struct drm_device *dev = node->minor->dev;
2387 struct drm_crtc *crtc = &intel_crtc->base;
2388 struct intel_encoder *intel_encoder;
2389
2390 if (crtc->primary->fb)
2391 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2392 crtc->primary->fb->base.id, crtc->x, crtc->y,
2393 crtc->primary->fb->width, crtc->primary->fb->height);
2394 else
2395 seq_puts(m, "\tprimary plane disabled\n");
2396 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2397 intel_encoder_info(m, intel_crtc, intel_encoder);
2398 }
2399
2400 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2401 {
2402 struct drm_display_mode *mode = panel->fixed_mode;
2403
2404 seq_printf(m, "\tfixed mode:\n");
2405 intel_seq_print_mode(m, 2, mode);
2406 }
2407
2408 static void intel_dp_info(struct seq_file *m,
2409 struct intel_connector *intel_connector)
2410 {
2411 struct intel_encoder *intel_encoder = intel_connector->encoder;
2412 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2413
2414 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2415 seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
2416 "no");
2417 if (intel_encoder->type == INTEL_OUTPUT_EDP)
2418 intel_panel_info(m, &intel_connector->panel);
2419 }
2420
2421 static void intel_hdmi_info(struct seq_file *m,
2422 struct intel_connector *intel_connector)
2423 {
2424 struct intel_encoder *intel_encoder = intel_connector->encoder;
2425 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2426
2427 seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
2428 "no");
2429 }
2430
2431 static void intel_lvds_info(struct seq_file *m,
2432 struct intel_connector *intel_connector)
2433 {
2434 intel_panel_info(m, &intel_connector->panel);
2435 }
2436
2437 static void intel_connector_info(struct seq_file *m,
2438 struct drm_connector *connector)
2439 {
2440 struct intel_connector *intel_connector = to_intel_connector(connector);
2441 struct intel_encoder *intel_encoder = intel_connector->encoder;
2442 struct drm_display_mode *mode;
2443
2444 seq_printf(m, "connector %d: type %s, status: %s\n",
2445 connector->base.id, connector->name,
2446 drm_get_connector_status_name(connector->status));
2447 if (connector->status == connector_status_connected) {
2448 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2449 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2450 connector->display_info.width_mm,
2451 connector->display_info.height_mm);
2452 seq_printf(m, "\tsubpixel order: %s\n",
2453 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2454 seq_printf(m, "\tCEA rev: %d\n",
2455 connector->display_info.cea_rev);
2456 }
2457 if (intel_encoder) {
2458 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2459 intel_encoder->type == INTEL_OUTPUT_EDP)
2460 intel_dp_info(m, intel_connector);
2461 else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2462 intel_hdmi_info(m, intel_connector);
2463 else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2464 intel_lvds_info(m, intel_connector);
2465 }
2466
2467 seq_printf(m, "\tmodes:\n");
2468 list_for_each_entry(mode, &connector->modes, head)
2469 intel_seq_print_mode(m, 2, mode);
2470 }
2471
2472 static bool cursor_active(struct drm_device *dev, int pipe)
2473 {
2474 struct drm_i915_private *dev_priv = dev->dev_private;
2475 u32 state;
2476
2477 if (IS_845G(dev) || IS_I865G(dev))
2478 state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
2479 else
2480 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2481
2482 return state;
2483 }
2484
2485 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2486 {
2487 struct drm_i915_private *dev_priv = dev->dev_private;
2488 u32 pos;
2489
2490 pos = I915_READ(CURPOS(pipe));
2491
2492 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2493 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2494 *x = -*x;
2495
2496 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2497 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2498 *y = -*y;
2499
2500 return cursor_active(dev, pipe);
2501 }
2502
2503 static int i915_display_info(struct seq_file *m, void *unused)
2504 {
2505 struct drm_info_node *node = m->private;
2506 struct drm_device *dev = node->minor->dev;
2507 struct drm_i915_private *dev_priv = dev->dev_private;
2508 struct intel_crtc *crtc;
2509 struct drm_connector *connector;
2510
2511 intel_runtime_pm_get(dev_priv);
2512 drm_modeset_lock_all(dev);
2513 seq_printf(m, "CRTC info\n");
2514 seq_printf(m, "---------\n");
2515 for_each_intel_crtc(dev, crtc) {
2516 bool active;
2517 int x, y;
2518
2519 seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
2520 crtc->base.base.id, pipe_name(crtc->pipe),
2521 yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
2522 if (crtc->active) {
2523 intel_crtc_info(m, crtc);
2524
2525 active = cursor_position(dev, crtc->pipe, &x, &y);
2526 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
2527 yesno(crtc->cursor_base),
2528 x, y, crtc->cursor_width, crtc->cursor_height,
2529 crtc->cursor_addr, yesno(active));
2530 }
2531
2532 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2533 yesno(!crtc->cpu_fifo_underrun_disabled),
2534 yesno(!crtc->pch_fifo_underrun_disabled));
2535 }
2536
2537 seq_printf(m, "\n");
2538 seq_printf(m, "Connector info\n");
2539 seq_printf(m, "--------------\n");
2540 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2541 intel_connector_info(m, connector);
2542 }
2543 drm_modeset_unlock_all(dev);
2544 intel_runtime_pm_put(dev_priv);
2545
2546 return 0;
2547 }
2548
2549 static int i915_semaphore_status(struct seq_file *m, void *unused)
2550 {
2551 struct drm_info_node *node = (struct drm_info_node *) m->private;
2552 struct drm_device *dev = node->minor->dev;
2553 struct drm_i915_private *dev_priv = dev->dev_private;
2554 struct intel_engine_cs *ring;
2555 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
2556 int i, j, ret;
2557
2558 if (!i915_semaphore_is_enabled(dev)) {
2559 seq_puts(m, "Semaphores are disabled\n");
2560 return 0;
2561 }
2562
2563 ret = mutex_lock_interruptible(&dev->struct_mutex);
2564 if (ret)
2565 return ret;
2566 intel_runtime_pm_get(dev_priv);
2567
2568 if (IS_BROADWELL(dev)) {
2569 struct page *page;
2570 uint64_t *seqno;
2571
2572 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
2573
2574 seqno = (uint64_t *)kmap_atomic(page);
2575 for_each_ring(ring, dev_priv, i) {
2576 uint64_t offset;
2577
2578 seq_printf(m, "%s\n", ring->name);
2579
2580 seq_puts(m, " Last signal:");
2581 for (j = 0; j < num_rings; j++) {
2582 offset = i * I915_NUM_RINGS + j;
2583 seq_printf(m, "0x%08llx (0x%02llx) ",
2584 seqno[offset], offset * 8);
2585 }
2586 seq_putc(m, '\n');
2587
2588 seq_puts(m, " Last wait: ");
2589 for (j = 0; j < num_rings; j++) {
2590 offset = i + (j * I915_NUM_RINGS);
2591 seq_printf(m, "0x%08llx (0x%02llx) ",
2592 seqno[offset], offset * 8);
2593 }
2594 seq_putc(m, '\n');
2595
2596 }
2597 kunmap_atomic(seqno);
2598 } else {
2599 seq_puts(m, " Last signal:");
2600 for_each_ring(ring, dev_priv, i)
2601 for (j = 0; j < num_rings; j++)
2602 seq_printf(m, "0x%08x\n",
2603 I915_READ(ring->semaphore.mbox.signal[j]));
2604 seq_putc(m, '\n');
2605 }
2606
2607 seq_puts(m, "\nSync seqno:\n");
2608 for_each_ring(ring, dev_priv, i) {
2609 for (j = 0; j < num_rings; j++) {
2610 seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
2611 }
2612 seq_putc(m, '\n');
2613 }
2614 seq_putc(m, '\n');
2615
2616 intel_runtime_pm_put(dev_priv);
2617 mutex_unlock(&dev->struct_mutex);
2618 return 0;
2619 }
2620
2621 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2622 {
2623 struct drm_info_node *node = (struct drm_info_node *) m->private;
2624 struct drm_device *dev = node->minor->dev;
2625 struct drm_i915_private *dev_priv = dev->dev_private;
2626 int i;
2627
2628 drm_modeset_lock_all(dev);
2629 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2630 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2631
2632 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
2633 seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
2634 pll->active, yesno(pll->on));
2635 seq_printf(m, " tracked hardware state:\n");
2636 seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll);
2637 seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
2638 seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0);
2639 seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1);
2640 seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll);
2641 }
2642 drm_modeset_unlock_all(dev);
2643
2644 return 0;
2645 }
2646
2647 static int i915_wa_registers(struct seq_file *m, void *unused)
2648 {
2649 int i;
2650 int ret;
2651 struct drm_info_node *node = (struct drm_info_node *) m->private;
2652 struct drm_device *dev = node->minor->dev;
2653 struct drm_i915_private *dev_priv = dev->dev_private;
2654
2655 ret = mutex_lock_interruptible(&dev->struct_mutex);
2656 if (ret)
2657 return ret;
2658
2659 intel_runtime_pm_get(dev_priv);
2660
2661 seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
2662 for (i = 0; i < dev_priv->workarounds.count; ++i) {
2663 u32 addr, mask, value, read;
2664 bool ok;
2665
2666 addr = dev_priv->workarounds.reg[i].addr;
2667 mask = dev_priv->workarounds.reg[i].mask;
2668 value = dev_priv->workarounds.reg[i].value;
2669 read = I915_READ(addr);
2670 ok = (value & mask) == (read & mask);
2671 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
2672 addr, value, mask, read, ok ? "OK" : "FAIL");
2673 }
2674
2675 intel_runtime_pm_put(dev_priv);
2676 mutex_unlock(&dev->struct_mutex);
2677
2678 return 0;
2679 }
2680
2681 struct pipe_crc_info {
2682 const char *name;
2683 struct drm_device *dev;
2684 enum pipe pipe;
2685 };
2686
2687 static int i915_dp_mst_info(struct seq_file *m, void *unused)
2688 {
2689 struct drm_info_node *node = (struct drm_info_node *) m->private;
2690 struct drm_device *dev = node->minor->dev;
2691 struct drm_encoder *encoder;
2692 struct intel_encoder *intel_encoder;
2693 struct intel_digital_port *intel_dig_port;
2694 drm_modeset_lock_all(dev);
2695 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2696 intel_encoder = to_intel_encoder(encoder);
2697 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
2698 continue;
2699 intel_dig_port = enc_to_dig_port(encoder);
2700 if (!intel_dig_port->dp.can_mst)
2701 continue;
2702
2703 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
2704 }
2705 drm_modeset_unlock_all(dev);
2706 return 0;
2707 }
2708
2709 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
2710 {
2711 struct pipe_crc_info *info = inode->i_private;
2712 struct drm_i915_private *dev_priv = info->dev->dev_private;
2713 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2714
2715 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
2716 return -ENODEV;
2717
2718 spin_lock_irq(&pipe_crc->lock);
2719
2720 if (pipe_crc->opened) {
2721 spin_unlock_irq(&pipe_crc->lock);
2722 return -EBUSY; /* already open */
2723 }
2724
2725 pipe_crc->opened = true;
2726 filep->private_data = inode->i_private;
2727
2728 spin_unlock_irq(&pipe_crc->lock);
2729
2730 return 0;
2731 }
2732
2733 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
2734 {
2735 struct pipe_crc_info *info = inode->i_private;
2736 struct drm_i915_private *dev_priv = info->dev->dev_private;
2737 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2738
2739 spin_lock_irq(&pipe_crc->lock);
2740 pipe_crc->opened = false;
2741 spin_unlock_irq(&pipe_crc->lock);
2742
2743 return 0;
2744 }
2745
2746 /* (6 fields, 8 chars each, space separated (5) + '\n') */
2747 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
2748 /* account for \'0' */
2749 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
2750
2751 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
2752 {
2753 assert_spin_locked(&pipe_crc->lock);
2754 return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
2755 INTEL_PIPE_CRC_ENTRIES_NR);
2756 }
2757
2758 static ssize_t
2759 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
2760 loff_t *pos)
2761 {
2762 struct pipe_crc_info *info = filep->private_data;
2763 struct drm_device *dev = info->dev;
2764 struct drm_i915_private *dev_priv = dev->dev_private;
2765 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2766 char buf[PIPE_CRC_BUFFER_LEN];
2767 int head, tail, n_entries, n;
2768 ssize_t bytes_read;
2769
2770 /*
2771 * Don't allow user space to provide buffers not big enough to hold
2772 * a line of data.
2773 */
2774 if (count < PIPE_CRC_LINE_LEN)
2775 return -EINVAL;
2776
2777 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
2778 return 0;
2779
2780 /* nothing to read */
2781 spin_lock_irq(&pipe_crc->lock);
2782 while (pipe_crc_data_count(pipe_crc) == 0) {
2783 int ret;
2784
2785 if (filep->f_flags & O_NONBLOCK) {
2786 spin_unlock_irq(&pipe_crc->lock);
2787 return -EAGAIN;
2788 }
2789
2790 ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
2791 pipe_crc_data_count(pipe_crc), pipe_crc->lock);
2792 if (ret) {
2793 spin_unlock_irq(&pipe_crc->lock);
2794 return ret;
2795 }
2796 }
2797
2798 /* We now have one or more entries to read */
2799 head = pipe_crc->head;
2800 tail = pipe_crc->tail;
2801 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
2802 count / PIPE_CRC_LINE_LEN);
2803 spin_unlock_irq(&pipe_crc->lock);
2804
2805 bytes_read = 0;
2806 n = 0;
2807 do {
2808 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
2809 int ret;
2810
2811 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
2812 "%8u %8x %8x %8x %8x %8x\n",
2813 entry->frame, entry->crc[0],
2814 entry->crc[1], entry->crc[2],
2815 entry->crc[3], entry->crc[4]);
2816
2817 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
2818 buf, PIPE_CRC_LINE_LEN);
2819 if (ret == PIPE_CRC_LINE_LEN)
2820 return -EFAULT;
2821
2822 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
2823 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
2824 n++;
2825 } while (--n_entries);
2826
2827 spin_lock_irq(&pipe_crc->lock);
2828 pipe_crc->tail = tail;
2829 spin_unlock_irq(&pipe_crc->lock);
2830
2831 return bytes_read;
2832 }
2833
2834 static const struct file_operations i915_pipe_crc_fops = {
2835 .owner = THIS_MODULE,
2836 .open = i915_pipe_crc_open,
2837 .read = i915_pipe_crc_read,
2838 .release = i915_pipe_crc_release,
2839 };
2840
2841 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
2842 {
2843 .name = "i915_pipe_A_crc",
2844 .pipe = PIPE_A,
2845 },
2846 {
2847 .name = "i915_pipe_B_crc",
2848 .pipe = PIPE_B,
2849 },
2850 {
2851 .name = "i915_pipe_C_crc",
2852 .pipe = PIPE_C,
2853 },
2854 };
2855
2856 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
2857 enum pipe pipe)
2858 {
2859 struct drm_device *dev = minor->dev;
2860 struct dentry *ent;
2861 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
2862
2863 info->dev = dev;
2864 ent = debugfs_create_file(info->name, S_IRUGO, root, info,
2865 &i915_pipe_crc_fops);
2866 if (!ent)
2867 return -ENOMEM;
2868
2869 return drm_add_fake_info_node(minor, ent, info);
2870 }
2871
2872 static const char * const pipe_crc_sources[] = {
2873 "none",
2874 "plane1",
2875 "plane2",
2876 "pf",
2877 "pipe",
2878 "TV",
2879 "DP-B",
2880 "DP-C",
2881 "DP-D",
2882 "auto",
2883 };
2884
2885 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
2886 {
2887 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
2888 return pipe_crc_sources[source];
2889 }
2890
2891 static int display_crc_ctl_show(struct seq_file *m, void *data)
2892 {
2893 struct drm_device *dev = m->private;
2894 struct drm_i915_private *dev_priv = dev->dev_private;
2895 int i;
2896
2897 for (i = 0; i < I915_MAX_PIPES; i++)
2898 seq_printf(m, "%c %s\n", pipe_name(i),
2899 pipe_crc_source_name(dev_priv->pipe_crc[i].source));
2900
2901 return 0;
2902 }
2903
2904 static int display_crc_ctl_open(struct inode *inode, struct file *file)
2905 {
2906 struct drm_device *dev = inode->i_private;
2907
2908 return single_open(file, display_crc_ctl_show, dev);
2909 }
2910
2911 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2912 uint32_t *val)
2913 {
2914 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2915 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2916
2917 switch (*source) {
2918 case INTEL_PIPE_CRC_SOURCE_PIPE:
2919 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
2920 break;
2921 case INTEL_PIPE_CRC_SOURCE_NONE:
2922 *val = 0;
2923 break;
2924 default:
2925 return -EINVAL;
2926 }
2927
2928 return 0;
2929 }
2930
2931 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
2932 enum intel_pipe_crc_source *source)
2933 {
2934 struct intel_encoder *encoder;
2935 struct intel_crtc *crtc;
2936 struct intel_digital_port *dig_port;
2937 int ret = 0;
2938
2939 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2940
2941 drm_modeset_lock_all(dev);
2942 for_each_intel_encoder(dev, encoder) {
2943 if (!encoder->base.crtc)
2944 continue;
2945
2946 crtc = to_intel_crtc(encoder->base.crtc);
2947
2948 if (crtc->pipe != pipe)
2949 continue;
2950
2951 switch (encoder->type) {
2952 case INTEL_OUTPUT_TVOUT:
2953 *source = INTEL_PIPE_CRC_SOURCE_TV;
2954 break;
2955 case INTEL_OUTPUT_DISPLAYPORT:
2956 case INTEL_OUTPUT_EDP:
2957 dig_port = enc_to_dig_port(&encoder->base);
2958 switch (dig_port->port) {
2959 case PORT_B:
2960 *source = INTEL_PIPE_CRC_SOURCE_DP_B;
2961 break;
2962 case PORT_C:
2963 *source = INTEL_PIPE_CRC_SOURCE_DP_C;
2964 break;
2965 case PORT_D:
2966 *source = INTEL_PIPE_CRC_SOURCE_DP_D;
2967 break;
2968 default:
2969 WARN(1, "nonexisting DP port %c\n",
2970 port_name(dig_port->port));
2971 break;
2972 }
2973 break;
2974 }
2975 }
2976 drm_modeset_unlock_all(dev);
2977
2978 return ret;
2979 }
2980
2981 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
2982 enum pipe pipe,
2983 enum intel_pipe_crc_source *source,
2984 uint32_t *val)
2985 {
2986 struct drm_i915_private *dev_priv = dev->dev_private;
2987 bool need_stable_symbols = false;
2988
2989 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2990 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2991 if (ret)
2992 return ret;
2993 }
2994
2995 switch (*source) {
2996 case INTEL_PIPE_CRC_SOURCE_PIPE:
2997 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
2998 break;
2999 case INTEL_PIPE_CRC_SOURCE_DP_B:
3000 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
3001 need_stable_symbols = true;
3002 break;
3003 case INTEL_PIPE_CRC_SOURCE_DP_C:
3004 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
3005 need_stable_symbols = true;
3006 break;
3007 case INTEL_PIPE_CRC_SOURCE_NONE:
3008 *val = 0;
3009 break;
3010 default:
3011 return -EINVAL;
3012 }
3013
3014 /*
3015 * When the pipe CRC tap point is after the transcoders we need
3016 * to tweak symbol-level features to produce a deterministic series of
3017 * symbols for a given frame. We need to reset those features only once
3018 * a frame (instead of every nth symbol):
3019 * - DC-balance: used to ensure a better clock recovery from the data
3020 * link (SDVO)
3021 * - DisplayPort scrambling: used for EMI reduction
3022 */
3023 if (need_stable_symbols) {
3024 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3025
3026 tmp |= DC_BALANCE_RESET_VLV;
3027 if (pipe == PIPE_A)
3028 tmp |= PIPE_A_SCRAMBLE_RESET;
3029 else
3030 tmp |= PIPE_B_SCRAMBLE_RESET;
3031
3032 I915_WRITE(PORT_DFT2_G4X, tmp);
3033 }
3034
3035 return 0;
3036 }
3037
3038 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3039 enum pipe pipe,
3040 enum intel_pipe_crc_source *source,
3041 uint32_t *val)
3042 {
3043 struct drm_i915_private *dev_priv = dev->dev_private;
3044 bool need_stable_symbols = false;
3045
3046 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3047 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3048 if (ret)
3049 return ret;
3050 }
3051
3052 switch (*source) {
3053 case INTEL_PIPE_CRC_SOURCE_PIPE:
3054 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
3055 break;
3056 case INTEL_PIPE_CRC_SOURCE_TV:
3057 if (!SUPPORTS_TV(dev))
3058 return -EINVAL;
3059 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
3060 break;
3061 case INTEL_PIPE_CRC_SOURCE_DP_B:
3062 if (!IS_G4X(dev))
3063 return -EINVAL;
3064 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
3065 need_stable_symbols = true;
3066 break;
3067 case INTEL_PIPE_CRC_SOURCE_DP_C:
3068 if (!IS_G4X(dev))
3069 return -EINVAL;
3070 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
3071 need_stable_symbols = true;
3072 break;
3073 case INTEL_PIPE_CRC_SOURCE_DP_D:
3074 if (!IS_G4X(dev))
3075 return -EINVAL;
3076 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
3077 need_stable_symbols = true;
3078 break;
3079 case INTEL_PIPE_CRC_SOURCE_NONE:
3080 *val = 0;
3081 break;
3082 default:
3083 return -EINVAL;
3084 }
3085
3086 /*
3087 * When the pipe CRC tap point is after the transcoders we need
3088 * to tweak symbol-level features to produce a deterministic series of
3089 * symbols for a given frame. We need to reset those features only once
3090 * a frame (instead of every nth symbol):
3091 * - DC-balance: used to ensure a better clock recovery from the data
3092 * link (SDVO)
3093 * - DisplayPort scrambling: used for EMI reduction
3094 */
3095 if (need_stable_symbols) {
3096 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3097
3098 WARN_ON(!IS_G4X(dev));
3099
3100 I915_WRITE(PORT_DFT_I9XX,
3101 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
3102
3103 if (pipe == PIPE_A)
3104 tmp |= PIPE_A_SCRAMBLE_RESET;
3105 else
3106 tmp |= PIPE_B_SCRAMBLE_RESET;
3107
3108 I915_WRITE(PORT_DFT2_G4X, tmp);
3109 }
3110
3111 return 0;
3112 }
3113
3114 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
3115 enum pipe pipe)
3116 {
3117 struct drm_i915_private *dev_priv = dev->dev_private;
3118 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3119
3120 if (pipe == PIPE_A)
3121 tmp &= ~PIPE_A_SCRAMBLE_RESET;
3122 else
3123 tmp &= ~PIPE_B_SCRAMBLE_RESET;
3124 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
3125 tmp &= ~DC_BALANCE_RESET_VLV;
3126 I915_WRITE(PORT_DFT2_G4X, tmp);
3127
3128 }
3129
3130 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
3131 enum pipe pipe)
3132 {
3133 struct drm_i915_private *dev_priv = dev->dev_private;
3134 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3135
3136 if (pipe == PIPE_A)
3137 tmp &= ~PIPE_A_SCRAMBLE_RESET;
3138 else
3139 tmp &= ~PIPE_B_SCRAMBLE_RESET;
3140 I915_WRITE(PORT_DFT2_G4X, tmp);
3141
3142 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
3143 I915_WRITE(PORT_DFT_I9XX,
3144 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
3145 }
3146 }
3147
3148 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3149 uint32_t *val)
3150 {
3151 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3152 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
3153
3154 switch (*source) {
3155 case INTEL_PIPE_CRC_SOURCE_PLANE1:
3156 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
3157 break;
3158 case INTEL_PIPE_CRC_SOURCE_PLANE2:
3159 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
3160 break;
3161 case INTEL_PIPE_CRC_SOURCE_PIPE:
3162 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
3163 break;
3164 case INTEL_PIPE_CRC_SOURCE_NONE:
3165 *val = 0;
3166 break;
3167 default:
3168 return -EINVAL;
3169 }
3170
3171 return 0;
3172 }
3173
3174 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
3175 {
3176 struct drm_i915_private *dev_priv = dev->dev_private;
3177 struct intel_crtc *crtc =
3178 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3179
3180 drm_modeset_lock_all(dev);
3181 /*
3182 * If we use the eDP transcoder we need to make sure that we don't
3183 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
3184 * relevant on hsw with pipe A when using the always-on power well
3185 * routing.
3186 */
3187 if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
3188 !crtc->config.pch_pfit.enabled) {
3189 crtc->config.pch_pfit.force_thru = true;
3190
3191 intel_display_power_get(dev_priv,
3192 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
3193
3194 dev_priv->display.crtc_disable(&crtc->base);
3195 dev_priv->display.crtc_enable(&crtc->base);
3196 }
3197 drm_modeset_unlock_all(dev);
3198 }
3199
3200 static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
3201 {
3202 struct drm_i915_private *dev_priv = dev->dev_private;
3203 struct intel_crtc *crtc =
3204 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3205
3206 drm_modeset_lock_all(dev);
3207 /*
3208 * If we use the eDP transcoder we need to make sure that we don't
3209 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
3210 * relevant on hsw with pipe A when using the always-on power well
3211 * routing.
3212 */
3213 if (crtc->config.pch_pfit.force_thru) {
3214 crtc->config.pch_pfit.force_thru = false;
3215
3216 dev_priv->display.crtc_disable(&crtc->base);
3217 dev_priv->display.crtc_enable(&crtc->base);
3218
3219 intel_display_power_put(dev_priv,
3220 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
3221 }
3222 drm_modeset_unlock_all(dev);
3223 }
3224
3225 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
3226 enum pipe pipe,
3227 enum intel_pipe_crc_source *source,
3228 uint32_t *val)
3229 {
3230 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3231 *source = INTEL_PIPE_CRC_SOURCE_PF;
3232
3233 switch (*source) {
3234 case INTEL_PIPE_CRC_SOURCE_PLANE1:
3235 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
3236 break;
3237 case INTEL_PIPE_CRC_SOURCE_PLANE2:
3238 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
3239 break;
3240 case INTEL_PIPE_CRC_SOURCE_PF:
3241 if (IS_HASWELL(dev) && pipe == PIPE_A)
3242 hsw_trans_edp_pipe_A_crc_wa(dev);
3243
3244 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
3245 break;
3246 case INTEL_PIPE_CRC_SOURCE_NONE:
3247 *val = 0;
3248 break;
3249 default:
3250 return -EINVAL;
3251 }
3252
3253 return 0;
3254 }
3255
3256 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3257 enum intel_pipe_crc_source source)
3258 {
3259 struct drm_i915_private *dev_priv = dev->dev_private;
3260 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3261 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
3262 pipe));
3263 u32 val = 0; /* shut up gcc */
3264 int ret;
3265
3266 if (pipe_crc->source == source)
3267 return 0;
3268
3269 /* forbid changing the source without going back to 'none' */
3270 if (pipe_crc->source && source)
3271 return -EINVAL;
3272
3273 if (IS_GEN2(dev))
3274 ret = i8xx_pipe_crc_ctl_reg(&source, &val);
3275 else if (INTEL_INFO(dev)->gen < 5)
3276 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3277 else if (IS_VALLEYVIEW(dev))
3278 ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3279 else if (IS_GEN5(dev) || IS_GEN6(dev))
3280 ret = ilk_pipe_crc_ctl_reg(&source, &val);
3281 else
3282 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3283
3284 if (ret != 0)
3285 return ret;
3286
3287 /* none -> real source transition */
3288 if (source) {
3289 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
3290 pipe_name(pipe), pipe_crc_source_name(source));
3291
3292 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
3293 INTEL_PIPE_CRC_ENTRIES_NR,
3294 GFP_KERNEL);
3295 if (!pipe_crc->entries)
3296 return -ENOMEM;
3297
3298 /*
3299 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3300 * enabled and disabled dynamically based on package C states,
3301 * user space can't make reliable use of the CRCs, so let's just
3302 * completely disable it.
3303 */
3304 hsw_disable_ips(crtc);
3305
3306 spin_lock_irq(&pipe_crc->lock);
3307 pipe_crc->head = 0;
3308 pipe_crc->tail = 0;
3309 spin_unlock_irq(&pipe_crc->lock);
3310 }
3311
3312 pipe_crc->source = source;
3313
3314 I915_WRITE(PIPE_CRC_CTL(pipe), val);
3315 POSTING_READ(PIPE_CRC_CTL(pipe));
3316
3317 /* real source -> none transition */
3318 if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
3319 struct intel_pipe_crc_entry *entries;
3320 struct intel_crtc *crtc =
3321 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
3322
3323 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
3324 pipe_name(pipe));
3325
3326 drm_modeset_lock(&crtc->base.mutex, NULL);
3327 if (crtc->active)
3328 intel_wait_for_vblank(dev, pipe);
3329 drm_modeset_unlock(&crtc->base.mutex);
3330
3331 spin_lock_irq(&pipe_crc->lock);
3332 entries = pipe_crc->entries;
3333 pipe_crc->entries = NULL;
3334 spin_unlock_irq(&pipe_crc->lock);
3335
3336 kfree(entries);
3337
3338 if (IS_G4X(dev))
3339 g4x_undo_pipe_scramble_reset(dev, pipe);
3340 else if (IS_VALLEYVIEW(dev))
3341 vlv_undo_pipe_scramble_reset(dev, pipe);
3342 else if (IS_HASWELL(dev) && pipe == PIPE_A)
3343 hsw_undo_trans_edp_pipe_A_crc_wa(dev);
3344
3345 hsw_enable_ips(crtc);
3346 }
3347
3348 return 0;
3349 }
3350
3351 /*
3352 * Parse pipe CRC command strings:
3353 * command: wsp* object wsp+ name wsp+ source wsp*
3354 * object: 'pipe'
3355 * name: (A | B | C)
3356 * source: (none | plane1 | plane2 | pf)
3357 * wsp: (#0x20 | #0x9 | #0xA)+
3358 *
3359 * eg.:
3360 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
3361 * "pipe A none" -> Stop CRC
3362 */
3363 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
3364 {
3365 int n_words = 0;
3366
3367 while (*buf) {
3368 char *end;
3369
3370 /* skip leading white space */
3371 buf = skip_spaces(buf);
3372 if (!*buf)
3373 break; /* end of buffer */
3374
3375 /* find end of word */
3376 for (end = buf; *end && !isspace(*end); end++)
3377 ;
3378
3379 if (n_words == max_words) {
3380 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
3381 max_words);
3382 return -EINVAL; /* ran out of words[] before bytes */
3383 }
3384
3385 if (*end)
3386 *end++ = '\0';
3387 words[n_words++] = buf;
3388 buf = end;
3389 }
3390
3391 return n_words;
3392 }
3393
3394 enum intel_pipe_crc_object {
3395 PIPE_CRC_OBJECT_PIPE,
3396 };
3397
3398 static const char * const pipe_crc_objects[] = {
3399 "pipe",
3400 };
3401
3402 static int
3403 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
3404 {
3405 int i;
3406
3407 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
3408 if (!strcmp(buf, pipe_crc_objects[i])) {
3409 *o = i;
3410 return 0;
3411 }
3412
3413 return -EINVAL;
3414 }
3415
3416 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
3417 {
3418 const char name = buf[0];
3419
3420 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
3421 return -EINVAL;
3422
3423 *pipe = name - 'A';
3424
3425 return 0;
3426 }
3427
3428 static int
3429 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
3430 {
3431 int i;
3432
3433 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
3434 if (!strcmp(buf, pipe_crc_sources[i])) {
3435 *s = i;
3436 return 0;
3437 }
3438
3439 return -EINVAL;
3440 }
3441
3442 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
3443 {
3444 #define N_WORDS 3
3445 int n_words;
3446 char *words[N_WORDS];
3447 enum pipe pipe;
3448 enum intel_pipe_crc_object object;
3449 enum intel_pipe_crc_source source;
3450
3451 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
3452 if (n_words != N_WORDS) {
3453 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
3454 N_WORDS);
3455 return -EINVAL;
3456 }
3457
3458 if (display_crc_ctl_parse_object(words[0], &object) < 0) {
3459 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
3460 return -EINVAL;
3461 }
3462
3463 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
3464 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
3465 return -EINVAL;
3466 }
3467
3468 if (display_crc_ctl_parse_source(words[2], &source) < 0) {
3469 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
3470 return -EINVAL;
3471 }
3472
3473 return pipe_crc_set_source(dev, pipe, source);
3474 }
3475
3476 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
3477 size_t len, loff_t *offp)
3478 {
3479 struct seq_file *m = file->private_data;
3480 struct drm_device *dev = m->private;
3481 char *tmpbuf;
3482 int ret;
3483
3484 if (len == 0)
3485 return 0;
3486
3487 if (len > PAGE_SIZE - 1) {
3488 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
3489 PAGE_SIZE);
3490 return -E2BIG;
3491 }
3492
3493 tmpbuf = kmalloc(len + 1, GFP_KERNEL);
3494 if (!tmpbuf)
3495 return -ENOMEM;
3496
3497 if (copy_from_user(tmpbuf, ubuf, len)) {
3498 ret = -EFAULT;
3499 goto out;
3500 }
3501 tmpbuf[len] = '\0';
3502
3503 ret = display_crc_ctl_parse(dev, tmpbuf, len);
3504
3505 out:
3506 kfree(tmpbuf);
3507 if (ret < 0)
3508 return ret;
3509
3510 *offp += len;
3511 return len;
3512 }
3513
3514 static const struct file_operations i915_display_crc_ctl_fops = {
3515 .owner = THIS_MODULE,
3516 .open = display_crc_ctl_open,
3517 .read = seq_read,
3518 .llseek = seq_lseek,
3519 .release = single_release,
3520 .write = display_crc_ctl_write
3521 };
3522
3523 static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
3524 {
3525 struct drm_device *dev = m->private;
3526 int num_levels = ilk_wm_max_level(dev) + 1;
3527 int level;
3528
3529 drm_modeset_lock_all(dev);
3530
3531 for (level = 0; level < num_levels; level++) {
3532 unsigned int latency = wm[level];
3533
3534 /* WM1+ latency values in 0.5us units */
3535 if (level > 0)
3536 latency *= 5;
3537
3538 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3539 level, wm[level],
3540 latency / 10, latency % 10);
3541 }
3542
3543 drm_modeset_unlock_all(dev);
3544 }
3545
3546 static int pri_wm_latency_show(struct seq_file *m, void *data)
3547 {
3548 struct drm_device *dev = m->private;
3549
3550 wm_latency_show(m, to_i915(dev)->wm.pri_latency);
3551
3552 return 0;
3553 }
3554
3555 static int spr_wm_latency_show(struct seq_file *m, void *data)
3556 {
3557 struct drm_device *dev = m->private;
3558
3559 wm_latency_show(m, to_i915(dev)->wm.spr_latency);
3560
3561 return 0;
3562 }
3563
3564 static int cur_wm_latency_show(struct seq_file *m, void *data)
3565 {
3566 struct drm_device *dev = m->private;
3567
3568 wm_latency_show(m, to_i915(dev)->wm.cur_latency);
3569
3570 return 0;
3571 }
3572
3573 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3574 {
3575 struct drm_device *dev = inode->i_private;
3576
3577 if (HAS_GMCH_DISPLAY(dev))
3578 return -ENODEV;
3579
3580 return single_open(file, pri_wm_latency_show, dev);
3581 }
3582
3583 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3584 {
3585 struct drm_device *dev = inode->i_private;
3586
3587 if (HAS_GMCH_DISPLAY(dev))
3588 return -ENODEV;
3589
3590 return single_open(file, spr_wm_latency_show, dev);
3591 }
3592
3593 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3594 {
3595 struct drm_device *dev = inode->i_private;
3596
3597 if (HAS_GMCH_DISPLAY(dev))
3598 return -ENODEV;
3599
3600 return single_open(file, cur_wm_latency_show, dev);
3601 }
3602
3603 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3604 size_t len, loff_t *offp, uint16_t wm[5])
3605 {
3606 struct seq_file *m = file->private_data;
3607 struct drm_device *dev = m->private;
3608 uint16_t new[5] = { 0 };
3609 int num_levels = ilk_wm_max_level(dev) + 1;
3610 int level;
3611 int ret;
3612 char tmp[32];
3613
3614 if (len >= sizeof(tmp))
3615 return -EINVAL;
3616
3617 if (copy_from_user(tmp, ubuf, len))
3618 return -EFAULT;
3619
3620 tmp[len] = '\0';
3621
3622 ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
3623 if (ret != num_levels)
3624 return -EINVAL;
3625
3626 drm_modeset_lock_all(dev);
3627
3628 for (level = 0; level < num_levels; level++)
3629 wm[level] = new[level];
3630
3631 drm_modeset_unlock_all(dev);
3632
3633 return len;
3634 }
3635
3636
3637 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3638 size_t len, loff_t *offp)
3639 {
3640 struct seq_file *m = file->private_data;
3641 struct drm_device *dev = m->private;
3642
3643 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
3644 }
3645
3646 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3647 size_t len, loff_t *offp)
3648 {
3649 struct seq_file *m = file->private_data;
3650 struct drm_device *dev = m->private;
3651
3652 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
3653 }
3654
3655 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3656 size_t len, loff_t *offp)
3657 {
3658 struct seq_file *m = file->private_data;
3659 struct drm_device *dev = m->private;
3660
3661 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
3662 }
3663
3664 static const struct file_operations i915_pri_wm_latency_fops = {
3665 .owner = THIS_MODULE,
3666 .open = pri_wm_latency_open,
3667 .read = seq_read,
3668 .llseek = seq_lseek,
3669 .release = single_release,
3670 .write = pri_wm_latency_write
3671 };
3672
3673 static const struct file_operations i915_spr_wm_latency_fops = {
3674 .owner = THIS_MODULE,
3675 .open = spr_wm_latency_open,
3676 .read = seq_read,
3677 .llseek = seq_lseek,
3678 .release = single_release,
3679 .write = spr_wm_latency_write
3680 };
3681
3682 static const struct file_operations i915_cur_wm_latency_fops = {
3683 .owner = THIS_MODULE,
3684 .open = cur_wm_latency_open,
3685 .read = seq_read,
3686 .llseek = seq_lseek,
3687 .release = single_release,
3688 .write = cur_wm_latency_write
3689 };
3690
3691 static int
3692 i915_wedged_get(void *data, u64 *val)
3693 {
3694 struct drm_device *dev = data;
3695 struct drm_i915_private *dev_priv = dev->dev_private;
3696
3697 *val = atomic_read(&dev_priv->gpu_error.reset_counter);
3698
3699 return 0;
3700 }
3701
3702 static int
3703 i915_wedged_set(void *data, u64 val)
3704 {
3705 struct drm_device *dev = data;
3706 struct drm_i915_private *dev_priv = dev->dev_private;
3707
3708 intel_runtime_pm_get(dev_priv);
3709
3710 i915_handle_error(dev, val,
3711 "Manually setting wedged to %llu", val);
3712
3713 intel_runtime_pm_put(dev_priv);
3714
3715 return 0;
3716 }
3717
3718 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3719 i915_wedged_get, i915_wedged_set,
3720 "%llu\n");
3721
3722 static int
3723 i915_ring_stop_get(void *data, u64 *val)
3724 {
3725 struct drm_device *dev = data;
3726 struct drm_i915_private *dev_priv = dev->dev_private;
3727
3728 *val = dev_priv->gpu_error.stop_rings;
3729
3730 return 0;
3731 }
3732
3733 static int
3734 i915_ring_stop_set(void *data, u64 val)
3735 {
3736 struct drm_device *dev = data;
3737 struct drm_i915_private *dev_priv = dev->dev_private;
3738 int ret;
3739
3740 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
3741
3742 ret = mutex_lock_interruptible(&dev->struct_mutex);
3743 if (ret)
3744 return ret;
3745
3746 dev_priv->gpu_error.stop_rings = val;
3747 mutex_unlock(&dev->struct_mutex);
3748
3749 return 0;
3750 }
3751
3752 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
3753 i915_ring_stop_get, i915_ring_stop_set,
3754 "0x%08llx\n");
3755
3756 static int
3757 i915_ring_missed_irq_get(void *data, u64 *val)
3758 {
3759 struct drm_device *dev = data;
3760 struct drm_i915_private *dev_priv = dev->dev_private;
3761
3762 *val = dev_priv->gpu_error.missed_irq_rings;
3763 return 0;
3764 }
3765
3766 static int
3767 i915_ring_missed_irq_set(void *data, u64 val)
3768 {
3769 struct drm_device *dev = data;
3770 struct drm_i915_private *dev_priv = dev->dev_private;
3771 int ret;
3772
3773 /* Lock against concurrent debugfs callers */
3774 ret = mutex_lock_interruptible(&dev->struct_mutex);
3775 if (ret)
3776 return ret;
3777 dev_priv->gpu_error.missed_irq_rings = val;
3778 mutex_unlock(&dev->struct_mutex);
3779
3780 return 0;
3781 }
3782
3783 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
3784 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
3785 "0x%08llx\n");
3786
3787 static int
3788 i915_ring_test_irq_get(void *data, u64 *val)
3789 {
3790 struct drm_device *dev = data;
3791 struct drm_i915_private *dev_priv = dev->dev_private;
3792
3793 *val = dev_priv->gpu_error.test_irq_rings;
3794
3795 return 0;
3796 }
3797
3798 static int
3799 i915_ring_test_irq_set(void *data, u64 val)
3800 {
3801 struct drm_device *dev = data;
3802 struct drm_i915_private *dev_priv = dev->dev_private;
3803 int ret;
3804
3805 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
3806
3807 /* Lock against concurrent debugfs callers */
3808 ret = mutex_lock_interruptible(&dev->struct_mutex);
3809 if (ret)
3810 return ret;
3811
3812 dev_priv->gpu_error.test_irq_rings = val;
3813 mutex_unlock(&dev->struct_mutex);
3814
3815 return 0;
3816 }
3817
3818 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
3819 i915_ring_test_irq_get, i915_ring_test_irq_set,
3820 "0x%08llx\n");
3821
3822 #define DROP_UNBOUND 0x1
3823 #define DROP_BOUND 0x2
3824 #define DROP_RETIRE 0x4
3825 #define DROP_ACTIVE 0x8
3826 #define DROP_ALL (DROP_UNBOUND | \
3827 DROP_BOUND | \
3828 DROP_RETIRE | \
3829 DROP_ACTIVE)
3830 static int
3831 i915_drop_caches_get(void *data, u64 *val)
3832 {
3833 *val = DROP_ALL;
3834
3835 return 0;
3836 }
3837
3838 static int
3839 i915_drop_caches_set(void *data, u64 val)
3840 {
3841 struct drm_device *dev = data;
3842 struct drm_i915_private *dev_priv = dev->dev_private;
3843 int ret;
3844
3845 DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
3846
3847 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3848 * on ioctls on -EAGAIN. */
3849 ret = mutex_lock_interruptible(&dev->struct_mutex);
3850 if (ret)
3851 return ret;
3852
3853 if (val & DROP_ACTIVE) {
3854 ret = i915_gpu_idle(dev);
3855 if (ret)
3856 goto unlock;
3857 }
3858
3859 if (val & (DROP_RETIRE | DROP_ACTIVE))
3860 i915_gem_retire_requests(dev);
3861
3862 if (val & DROP_BOUND)
3863 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
3864
3865 if (val & DROP_UNBOUND)
3866 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
3867
3868 unlock:
3869 mutex_unlock(&dev->struct_mutex);
3870
3871 return ret;
3872 }
3873
3874 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3875 i915_drop_caches_get, i915_drop_caches_set,
3876 "0x%08llx\n");
3877
3878 static int
3879 i915_max_freq_get(void *data, u64 *val)
3880 {
3881 struct drm_device *dev = data;
3882 struct drm_i915_private *dev_priv = dev->dev_private;
3883 int ret;
3884
3885 if (INTEL_INFO(dev)->gen < 6)
3886 return -ENODEV;
3887
3888 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3889
3890 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3891 if (ret)
3892 return ret;
3893
3894 if (IS_VALLEYVIEW(dev))
3895 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
3896 else
3897 *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
3898 mutex_unlock(&dev_priv->rps.hw_lock);
3899
3900 return 0;
3901 }
3902
3903 static int
3904 i915_max_freq_set(void *data, u64 val)
3905 {
3906 struct drm_device *dev = data;
3907 struct drm_i915_private *dev_priv = dev->dev_private;
3908 u32 rp_state_cap, hw_max, hw_min;
3909 int ret;
3910
3911 if (INTEL_INFO(dev)->gen < 6)
3912 return -ENODEV;
3913
3914 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3915
3916 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
3917
3918 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3919 if (ret)
3920 return ret;
3921
3922 /*
3923 * Turbo will still be enabled, but won't go above the set value.
3924 */
3925 if (IS_VALLEYVIEW(dev)) {
3926 val = vlv_freq_opcode(dev_priv, val);
3927
3928 hw_max = dev_priv->rps.max_freq;
3929 hw_min = dev_priv->rps.min_freq;
3930 } else {
3931 do_div(val, GT_FREQUENCY_MULTIPLIER);
3932
3933 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3934 hw_max = dev_priv->rps.max_freq;
3935 hw_min = (rp_state_cap >> 16) & 0xff;
3936 }
3937
3938 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
3939 mutex_unlock(&dev_priv->rps.hw_lock);
3940 return -EINVAL;
3941 }
3942
3943 dev_priv->rps.max_freq_softlimit = val;
3944
3945 if (IS_VALLEYVIEW(dev))
3946 valleyview_set_rps(dev, val);
3947 else
3948 gen6_set_rps(dev, val);
3949
3950 mutex_unlock(&dev_priv->rps.hw_lock);
3951
3952 return 0;
3953 }
3954
3955 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
3956 i915_max_freq_get, i915_max_freq_set,
3957 "%llu\n");
3958
3959 static int
3960 i915_min_freq_get(void *data, u64 *val)
3961 {
3962 struct drm_device *dev = data;
3963 struct drm_i915_private *dev_priv = dev->dev_private;
3964 int ret;
3965
3966 if (INTEL_INFO(dev)->gen < 6)
3967 return -ENODEV;
3968
3969 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3970
3971 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3972 if (ret)
3973 return ret;
3974
3975 if (IS_VALLEYVIEW(dev))
3976 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
3977 else
3978 *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
3979 mutex_unlock(&dev_priv->rps.hw_lock);
3980
3981 return 0;
3982 }
3983
3984 static int
3985 i915_min_freq_set(void *data, u64 val)
3986 {
3987 struct drm_device *dev = data;
3988 struct drm_i915_private *dev_priv = dev->dev_private;
3989 u32 rp_state_cap, hw_max, hw_min;
3990 int ret;
3991
3992 if (INTEL_INFO(dev)->gen < 6)
3993 return -ENODEV;
3994
3995 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3996
3997 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
3998
3999 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4000 if (ret)
4001 return ret;
4002
4003 /*
4004 * Turbo will still be enabled, but won't go below the set value.
4005 */
4006 if (IS_VALLEYVIEW(dev)) {
4007 val = vlv_freq_opcode(dev_priv, val);
4008
4009 hw_max = dev_priv->rps.max_freq;
4010 hw_min = dev_priv->rps.min_freq;
4011 } else {
4012 do_div(val, GT_FREQUENCY_MULTIPLIER);
4013
4014 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4015 hw_max = dev_priv->rps.max_freq;
4016 hw_min = (rp_state_cap >> 16) & 0xff;
4017 }
4018
4019 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
4020 mutex_unlock(&dev_priv->rps.hw_lock);
4021 return -EINVAL;
4022 }
4023
4024 dev_priv->rps.min_freq_softlimit = val;
4025
4026 if (IS_VALLEYVIEW(dev))
4027 valleyview_set_rps(dev, val);
4028 else
4029 gen6_set_rps(dev, val);
4030
4031 mutex_unlock(&dev_priv->rps.hw_lock);
4032
4033 return 0;
4034 }
4035
4036 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
4037 i915_min_freq_get, i915_min_freq_set,
4038 "%llu\n");
4039
4040 static int
4041 i915_cache_sharing_get(void *data, u64 *val)
4042 {
4043 struct drm_device *dev = data;
4044 struct drm_i915_private *dev_priv = dev->dev_private;
4045 u32 snpcr;
4046 int ret;
4047
4048 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
4049 return -ENODEV;
4050
4051 ret = mutex_lock_interruptible(&dev->struct_mutex);
4052 if (ret)
4053 return ret;
4054 intel_runtime_pm_get(dev_priv);
4055
4056 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4057
4058 intel_runtime_pm_put(dev_priv);
4059 mutex_unlock(&dev_priv->dev->struct_mutex);
4060
4061 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4062
4063 return 0;
4064 }
4065
4066 static int
4067 i915_cache_sharing_set(void *data, u64 val)
4068 {
4069 struct drm_device *dev = data;
4070 struct drm_i915_private *dev_priv = dev->dev_private;
4071 u32 snpcr;
4072
4073 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
4074 return -ENODEV;
4075
4076 if (val > 3)
4077 return -EINVAL;
4078
4079 intel_runtime_pm_get(dev_priv);
4080 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4081
4082 /* Update the cache sharing policy here as well */
4083 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4084 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4085 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4086 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4087
4088 intel_runtime_pm_put(dev_priv);
4089 return 0;
4090 }
4091
4092 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4093 i915_cache_sharing_get, i915_cache_sharing_set,
4094 "%llu\n");
4095
4096 static int i915_forcewake_open(struct inode *inode, struct file *file)
4097 {
4098 struct drm_device *dev = inode->i_private;
4099 struct drm_i915_private *dev_priv = dev->dev_private;
4100
4101 if (INTEL_INFO(dev)->gen < 6)
4102 return 0;
4103
4104 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4105
4106 return 0;
4107 }
4108
4109 static int i915_forcewake_release(struct inode *inode, struct file *file)
4110 {
4111 struct drm_device *dev = inode->i_private;
4112 struct drm_i915_private *dev_priv = dev->dev_private;
4113
4114 if (INTEL_INFO(dev)->gen < 6)
4115 return 0;
4116
4117 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4118
4119 return 0;
4120 }
4121
4122 static const struct file_operations i915_forcewake_fops = {
4123 .owner = THIS_MODULE,
4124 .open = i915_forcewake_open,
4125 .release = i915_forcewake_release,
4126 };
4127
4128 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
4129 {
4130 struct drm_device *dev = minor->dev;
4131 struct dentry *ent;
4132
4133 ent = debugfs_create_file("i915_forcewake_user",
4134 S_IRUSR,
4135 root, dev,
4136 &i915_forcewake_fops);
4137 if (!ent)
4138 return -ENOMEM;
4139
4140 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
4141 }
4142
4143 static int i915_debugfs_create(struct dentry *root,
4144 struct drm_minor *minor,
4145 const char *name,
4146 const struct file_operations *fops)
4147 {
4148 struct drm_device *dev = minor->dev;
4149 struct dentry *ent;
4150
4151 ent = debugfs_create_file(name,
4152 S_IRUGO | S_IWUSR,
4153 root, dev,
4154 fops);
4155 if (!ent)
4156 return -ENOMEM;
4157
4158 return drm_add_fake_info_node(minor, ent, fops);
4159 }
4160
4161 static const struct drm_info_list i915_debugfs_list[] = {
4162 {"i915_capabilities", i915_capabilities, 0},
4163 {"i915_gem_objects", i915_gem_object_info, 0},
4164 {"i915_gem_gtt", i915_gem_gtt_info, 0},
4165 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
4166 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
4167 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
4168 {"i915_gem_stolen", i915_gem_stolen_list_info },
4169 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
4170 {"i915_gem_request", i915_gem_request_info, 0},
4171 {"i915_gem_seqno", i915_gem_seqno_info, 0},
4172 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4173 {"i915_gem_interrupt", i915_interrupt_info, 0},
4174 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
4175 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
4176 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
4177 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
4178 {"i915_frequency_info", i915_frequency_info, 0},
4179 {"i915_drpc_info", i915_drpc_info, 0},
4180 {"i915_emon_status", i915_emon_status, 0},
4181 {"i915_ring_freq_table", i915_ring_freq_table, 0},
4182 {"i915_fbc_status", i915_fbc_status, 0},
4183 {"i915_ips_status", i915_ips_status, 0},
4184 {"i915_sr_status", i915_sr_status, 0},
4185 {"i915_opregion", i915_opregion, 0},
4186 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4187 {"i915_context_status", i915_context_status, 0},
4188 {"i915_dump_lrc", i915_dump_lrc, 0},
4189 {"i915_execlists", i915_execlists, 0},
4190 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
4191 {"i915_swizzle_info", i915_swizzle_info, 0},
4192 {"i915_ppgtt_info", i915_ppgtt_info, 0},
4193 {"i915_llc", i915_llc, 0},
4194 {"i915_edp_psr_status", i915_edp_psr_status, 0},
4195 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
4196 {"i915_energy_uJ", i915_energy_uJ, 0},
4197 {"i915_pc8_status", i915_pc8_status, 0},
4198 {"i915_power_domain_info", i915_power_domain_info, 0},
4199 {"i915_display_info", i915_display_info, 0},
4200 {"i915_semaphore_status", i915_semaphore_status, 0},
4201 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4202 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4203 {"i915_wa_registers", i915_wa_registers, 0},
4204 };
4205 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4206
4207 static const struct i915_debugfs_files {
4208 const char *name;
4209 const struct file_operations *fops;
4210 } i915_debugfs_files[] = {
4211 {"i915_wedged", &i915_wedged_fops},
4212 {"i915_max_freq", &i915_max_freq_fops},
4213 {"i915_min_freq", &i915_min_freq_fops},
4214 {"i915_cache_sharing", &i915_cache_sharing_fops},
4215 {"i915_ring_stop", &i915_ring_stop_fops},
4216 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4217 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
4218 {"i915_gem_drop_caches", &i915_drop_caches_fops},
4219 {"i915_error_state", &i915_error_state_fops},
4220 {"i915_next_seqno", &i915_next_seqno_fops},
4221 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
4222 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4223 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4224 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4225 {"i915_fbc_false_color", &i915_fbc_fc_fops},
4226 };
4227
4228 void intel_display_crc_init(struct drm_device *dev)
4229 {
4230 struct drm_i915_private *dev_priv = dev->dev_private;
4231 enum pipe pipe;
4232
4233 for_each_pipe(dev_priv, pipe) {
4234 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
4235
4236 pipe_crc->opened = false;
4237 spin_lock_init(&pipe_crc->lock);
4238 init_waitqueue_head(&pipe_crc->wq);
4239 }
4240 }
4241
4242 int i915_debugfs_init(struct drm_minor *minor)
4243 {
4244 int ret, i;
4245
4246 ret = i915_forcewake_create(minor->debugfs_root, minor);
4247 if (ret)
4248 return ret;
4249
4250 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
4251 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
4252 if (ret)
4253 return ret;
4254 }
4255
4256 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4257 ret = i915_debugfs_create(minor->debugfs_root, minor,
4258 i915_debugfs_files[i].name,
4259 i915_debugfs_files[i].fops);
4260 if (ret)
4261 return ret;
4262 }
4263
4264 return drm_debugfs_create_files(i915_debugfs_list,
4265 I915_DEBUGFS_ENTRIES,
4266 minor->debugfs_root, minor);
4267 }
4268
4269 void i915_debugfs_cleanup(struct drm_minor *minor)
4270 {
4271 int i;
4272
4273 drm_debugfs_remove_files(i915_debugfs_list,
4274 I915_DEBUGFS_ENTRIES, minor);
4275
4276 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
4277 1, minor);
4278
4279 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
4280 struct drm_info_list *info_list =
4281 (struct drm_info_list *)&i915_pipe_crc_data[i];
4282
4283 drm_debugfs_remove_files(info_list, 1, minor);
4284 }
4285
4286 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4287 struct drm_info_list *info_list =
4288 (struct drm_info_list *) i915_debugfs_files[i].fops;
4289
4290 drm_debugfs_remove_files(info_list, 1, minor);
4291 }
4292 }
This page took 0.158142 seconds and 6 git commands to generate.