drm/i915: Implement blocking read for pipe CRC files
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
37 #include <drm/drmP.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
41 #include "i915_drv.h"
42
43 #if defined(CONFIG_DEBUG_FS)
44
45 enum {
46 ACTIVE_LIST,
47 INACTIVE_LIST,
48 PINNED_LIST,
49 };
50
51 static const char *yesno(int v)
52 {
53 return v ? "yes" : "no";
54 }
55
56 /* As the drm_debugfs_init() routines are called before dev->dev_private is
57 * allocated we need to hook into the minor for release. */
58 static int
59 drm_add_fake_info_node(struct drm_minor *minor,
60 struct dentry *ent,
61 const void *key)
62 {
63 struct drm_info_node *node;
64
65 node = kmalloc(sizeof(*node), GFP_KERNEL);
66 if (node == NULL) {
67 debugfs_remove(ent);
68 return -ENOMEM;
69 }
70
71 node->minor = minor;
72 node->dent = ent;
73 node->info_ent = (void *) key;
74
75 mutex_lock(&minor->debugfs_lock);
76 list_add(&node->list, &minor->debugfs_list);
77 mutex_unlock(&minor->debugfs_lock);
78
79 return 0;
80 }
81
82 static int i915_capabilities(struct seq_file *m, void *data)
83 {
84 struct drm_info_node *node = (struct drm_info_node *) m->private;
85 struct drm_device *dev = node->minor->dev;
86 const struct intel_device_info *info = INTEL_INFO(dev);
87
88 seq_printf(m, "gen: %d\n", info->gen);
89 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
90 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
91 #define SEP_SEMICOLON ;
92 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
93 #undef PRINT_FLAG
94 #undef SEP_SEMICOLON
95
96 return 0;
97 }
98
99 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
100 {
101 if (obj->user_pin_count > 0)
102 return "P";
103 else if (obj->pin_count > 0)
104 return "p";
105 else
106 return " ";
107 }
108
109 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
110 {
111 switch (obj->tiling_mode) {
112 default:
113 case I915_TILING_NONE: return " ";
114 case I915_TILING_X: return "X";
115 case I915_TILING_Y: return "Y";
116 }
117 }
118
119 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
120 {
121 return obj->has_global_gtt_mapping ? "g" : " ";
122 }
123
124 static void
125 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
126 {
127 struct i915_vma *vma;
128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
129 &obj->base,
130 get_pin_flag(obj),
131 get_tiling_flag(obj),
132 get_global_flag(obj),
133 obj->base.size / 1024,
134 obj->base.read_domains,
135 obj->base.write_domain,
136 obj->last_read_seqno,
137 obj->last_write_seqno,
138 obj->last_fenced_seqno,
139 i915_cache_level_str(obj->cache_level),
140 obj->dirty ? " dirty" : "",
141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
142 if (obj->base.name)
143 seq_printf(m, " (name: %d)", obj->base.name);
144 if (obj->pin_count)
145 seq_printf(m, " (pinned x %d)", obj->pin_count);
146 if (obj->pin_display)
147 seq_printf(m, " (display)");
148 if (obj->fence_reg != I915_FENCE_REG_NONE)
149 seq_printf(m, " (fence: %d)", obj->fence_reg);
150 list_for_each_entry(vma, &obj->vma_list, vma_link) {
151 if (!i915_is_ggtt(vma->vm))
152 seq_puts(m, " (pp");
153 else
154 seq_puts(m, " (g");
155 seq_printf(m, "gtt offset: %08lx, size: %08lx)",
156 vma->node.start, vma->node.size);
157 }
158 if (obj->stolen)
159 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
160 if (obj->pin_mappable || obj->fault_mappable) {
161 char s[3], *t = s;
162 if (obj->pin_mappable)
163 *t++ = 'p';
164 if (obj->fault_mappable)
165 *t++ = 'f';
166 *t = '\0';
167 seq_printf(m, " (%s mappable)", s);
168 }
169 if (obj->ring != NULL)
170 seq_printf(m, " (%s)", obj->ring->name);
171 }
172
173 static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
174 {
175 seq_putc(m, ctx->is_initialized ? 'I' : 'i');
176 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
177 seq_putc(m, ' ');
178 }
179
180 static int i915_gem_object_list_info(struct seq_file *m, void *data)
181 {
182 struct drm_info_node *node = (struct drm_info_node *) m->private;
183 uintptr_t list = (uintptr_t) node->info_ent->data;
184 struct list_head *head;
185 struct drm_device *dev = node->minor->dev;
186 struct drm_i915_private *dev_priv = dev->dev_private;
187 struct i915_address_space *vm = &dev_priv->gtt.base;
188 struct i915_vma *vma;
189 size_t total_obj_size, total_gtt_size;
190 int count, ret;
191
192 ret = mutex_lock_interruptible(&dev->struct_mutex);
193 if (ret)
194 return ret;
195
196 /* FIXME: the user of this interface might want more than just GGTT */
197 switch (list) {
198 case ACTIVE_LIST:
199 seq_puts(m, "Active:\n");
200 head = &vm->active_list;
201 break;
202 case INACTIVE_LIST:
203 seq_puts(m, "Inactive:\n");
204 head = &vm->inactive_list;
205 break;
206 default:
207 mutex_unlock(&dev->struct_mutex);
208 return -EINVAL;
209 }
210
211 total_obj_size = total_gtt_size = count = 0;
212 list_for_each_entry(vma, head, mm_list) {
213 seq_printf(m, " ");
214 describe_obj(m, vma->obj);
215 seq_printf(m, "\n");
216 total_obj_size += vma->obj->base.size;
217 total_gtt_size += vma->node.size;
218 count++;
219 }
220 mutex_unlock(&dev->struct_mutex);
221
222 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
223 count, total_obj_size, total_gtt_size);
224 return 0;
225 }
226
227 static int obj_rank_by_stolen(void *priv,
228 struct list_head *A, struct list_head *B)
229 {
230 struct drm_i915_gem_object *a =
231 container_of(A, struct drm_i915_gem_object, obj_exec_link);
232 struct drm_i915_gem_object *b =
233 container_of(B, struct drm_i915_gem_object, obj_exec_link);
234
235 return a->stolen->start - b->stolen->start;
236 }
237
238 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
239 {
240 struct drm_info_node *node = (struct drm_info_node *) m->private;
241 struct drm_device *dev = node->minor->dev;
242 struct drm_i915_private *dev_priv = dev->dev_private;
243 struct drm_i915_gem_object *obj;
244 size_t total_obj_size, total_gtt_size;
245 LIST_HEAD(stolen);
246 int count, ret;
247
248 ret = mutex_lock_interruptible(&dev->struct_mutex);
249 if (ret)
250 return ret;
251
252 total_obj_size = total_gtt_size = count = 0;
253 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
254 if (obj->stolen == NULL)
255 continue;
256
257 list_add(&obj->obj_exec_link, &stolen);
258
259 total_obj_size += obj->base.size;
260 total_gtt_size += i915_gem_obj_ggtt_size(obj);
261 count++;
262 }
263 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
264 if (obj->stolen == NULL)
265 continue;
266
267 list_add(&obj->obj_exec_link, &stolen);
268
269 total_obj_size += obj->base.size;
270 count++;
271 }
272 list_sort(NULL, &stolen, obj_rank_by_stolen);
273 seq_puts(m, "Stolen:\n");
274 while (!list_empty(&stolen)) {
275 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
276 seq_puts(m, " ");
277 describe_obj(m, obj);
278 seq_putc(m, '\n');
279 list_del_init(&obj->obj_exec_link);
280 }
281 mutex_unlock(&dev->struct_mutex);
282
283 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
284 count, total_obj_size, total_gtt_size);
285 return 0;
286 }
287
288 #define count_objects(list, member) do { \
289 list_for_each_entry(obj, list, member) { \
290 size += i915_gem_obj_ggtt_size(obj); \
291 ++count; \
292 if (obj->map_and_fenceable) { \
293 mappable_size += i915_gem_obj_ggtt_size(obj); \
294 ++mappable_count; \
295 } \
296 } \
297 } while (0)
298
299 struct file_stats {
300 int count;
301 size_t total, active, inactive, unbound;
302 };
303
304 static int per_file_stats(int id, void *ptr, void *data)
305 {
306 struct drm_i915_gem_object *obj = ptr;
307 struct file_stats *stats = data;
308
309 stats->count++;
310 stats->total += obj->base.size;
311
312 if (i915_gem_obj_ggtt_bound(obj)) {
313 if (!list_empty(&obj->ring_list))
314 stats->active += obj->base.size;
315 else
316 stats->inactive += obj->base.size;
317 } else {
318 if (!list_empty(&obj->global_list))
319 stats->unbound += obj->base.size;
320 }
321
322 return 0;
323 }
324
325 #define count_vmas(list, member) do { \
326 list_for_each_entry(vma, list, member) { \
327 size += i915_gem_obj_ggtt_size(vma->obj); \
328 ++count; \
329 if (vma->obj->map_and_fenceable) { \
330 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
331 ++mappable_count; \
332 } \
333 } \
334 } while (0)
335
336 static int i915_gem_object_info(struct seq_file *m, void* data)
337 {
338 struct drm_info_node *node = (struct drm_info_node *) m->private;
339 struct drm_device *dev = node->minor->dev;
340 struct drm_i915_private *dev_priv = dev->dev_private;
341 u32 count, mappable_count, purgeable_count;
342 size_t size, mappable_size, purgeable_size;
343 struct drm_i915_gem_object *obj;
344 struct i915_address_space *vm = &dev_priv->gtt.base;
345 struct drm_file *file;
346 struct i915_vma *vma;
347 int ret;
348
349 ret = mutex_lock_interruptible(&dev->struct_mutex);
350 if (ret)
351 return ret;
352
353 seq_printf(m, "%u objects, %zu bytes\n",
354 dev_priv->mm.object_count,
355 dev_priv->mm.object_memory);
356
357 size = count = mappable_size = mappable_count = 0;
358 count_objects(&dev_priv->mm.bound_list, global_list);
359 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
360 count, mappable_count, size, mappable_size);
361
362 size = count = mappable_size = mappable_count = 0;
363 count_vmas(&vm->active_list, mm_list);
364 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
365 count, mappable_count, size, mappable_size);
366
367 size = count = mappable_size = mappable_count = 0;
368 count_vmas(&vm->inactive_list, mm_list);
369 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
370 count, mappable_count, size, mappable_size);
371
372 size = count = purgeable_size = purgeable_count = 0;
373 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
374 size += obj->base.size, ++count;
375 if (obj->madv == I915_MADV_DONTNEED)
376 purgeable_size += obj->base.size, ++purgeable_count;
377 }
378 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
379
380 size = count = mappable_size = mappable_count = 0;
381 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
382 if (obj->fault_mappable) {
383 size += i915_gem_obj_ggtt_size(obj);
384 ++count;
385 }
386 if (obj->pin_mappable) {
387 mappable_size += i915_gem_obj_ggtt_size(obj);
388 ++mappable_count;
389 }
390 if (obj->madv == I915_MADV_DONTNEED) {
391 purgeable_size += obj->base.size;
392 ++purgeable_count;
393 }
394 }
395 seq_printf(m, "%u purgeable objects, %zu bytes\n",
396 purgeable_count, purgeable_size);
397 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
398 mappable_count, mappable_size);
399 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
400 count, size);
401
402 seq_printf(m, "%zu [%lu] gtt total\n",
403 dev_priv->gtt.base.total,
404 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
405
406 seq_putc(m, '\n');
407 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
408 struct file_stats stats;
409
410 memset(&stats, 0, sizeof(stats));
411 idr_for_each(&file->object_idr, per_file_stats, &stats);
412 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
413 get_pid_task(file->pid, PIDTYPE_PID)->comm,
414 stats.count,
415 stats.total,
416 stats.active,
417 stats.inactive,
418 stats.unbound);
419 }
420
421 mutex_unlock(&dev->struct_mutex);
422
423 return 0;
424 }
425
426 static int i915_gem_gtt_info(struct seq_file *m, void *data)
427 {
428 struct drm_info_node *node = (struct drm_info_node *) m->private;
429 struct drm_device *dev = node->minor->dev;
430 uintptr_t list = (uintptr_t) node->info_ent->data;
431 struct drm_i915_private *dev_priv = dev->dev_private;
432 struct drm_i915_gem_object *obj;
433 size_t total_obj_size, total_gtt_size;
434 int count, ret;
435
436 ret = mutex_lock_interruptible(&dev->struct_mutex);
437 if (ret)
438 return ret;
439
440 total_obj_size = total_gtt_size = count = 0;
441 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
442 if (list == PINNED_LIST && obj->pin_count == 0)
443 continue;
444
445 seq_puts(m, " ");
446 describe_obj(m, obj);
447 seq_putc(m, '\n');
448 total_obj_size += obj->base.size;
449 total_gtt_size += i915_gem_obj_ggtt_size(obj);
450 count++;
451 }
452
453 mutex_unlock(&dev->struct_mutex);
454
455 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
456 count, total_obj_size, total_gtt_size);
457
458 return 0;
459 }
460
461 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
462 {
463 struct drm_info_node *node = (struct drm_info_node *) m->private;
464 struct drm_device *dev = node->minor->dev;
465 unsigned long flags;
466 struct intel_crtc *crtc;
467
468 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
469 const char pipe = pipe_name(crtc->pipe);
470 const char plane = plane_name(crtc->plane);
471 struct intel_unpin_work *work;
472
473 spin_lock_irqsave(&dev->event_lock, flags);
474 work = crtc->unpin_work;
475 if (work == NULL) {
476 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
477 pipe, plane);
478 } else {
479 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
480 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
481 pipe, plane);
482 } else {
483 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
484 pipe, plane);
485 }
486 if (work->enable_stall_check)
487 seq_puts(m, "Stall check enabled, ");
488 else
489 seq_puts(m, "Stall check waiting for page flip ioctl, ");
490 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
491
492 if (work->old_fb_obj) {
493 struct drm_i915_gem_object *obj = work->old_fb_obj;
494 if (obj)
495 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
496 i915_gem_obj_ggtt_offset(obj));
497 }
498 if (work->pending_flip_obj) {
499 struct drm_i915_gem_object *obj = work->pending_flip_obj;
500 if (obj)
501 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
502 i915_gem_obj_ggtt_offset(obj));
503 }
504 }
505 spin_unlock_irqrestore(&dev->event_lock, flags);
506 }
507
508 return 0;
509 }
510
511 static int i915_gem_request_info(struct seq_file *m, void *data)
512 {
513 struct drm_info_node *node = (struct drm_info_node *) m->private;
514 struct drm_device *dev = node->minor->dev;
515 drm_i915_private_t *dev_priv = dev->dev_private;
516 struct intel_ring_buffer *ring;
517 struct drm_i915_gem_request *gem_request;
518 int ret, count, i;
519
520 ret = mutex_lock_interruptible(&dev->struct_mutex);
521 if (ret)
522 return ret;
523
524 count = 0;
525 for_each_ring(ring, dev_priv, i) {
526 if (list_empty(&ring->request_list))
527 continue;
528
529 seq_printf(m, "%s requests:\n", ring->name);
530 list_for_each_entry(gem_request,
531 &ring->request_list,
532 list) {
533 seq_printf(m, " %d @ %d\n",
534 gem_request->seqno,
535 (int) (jiffies - gem_request->emitted_jiffies));
536 }
537 count++;
538 }
539 mutex_unlock(&dev->struct_mutex);
540
541 if (count == 0)
542 seq_puts(m, "No requests\n");
543
544 return 0;
545 }
546
547 static void i915_ring_seqno_info(struct seq_file *m,
548 struct intel_ring_buffer *ring)
549 {
550 if (ring->get_seqno) {
551 seq_printf(m, "Current sequence (%s): %u\n",
552 ring->name, ring->get_seqno(ring, false));
553 }
554 }
555
556 static int i915_gem_seqno_info(struct seq_file *m, void *data)
557 {
558 struct drm_info_node *node = (struct drm_info_node *) m->private;
559 struct drm_device *dev = node->minor->dev;
560 drm_i915_private_t *dev_priv = dev->dev_private;
561 struct intel_ring_buffer *ring;
562 int ret, i;
563
564 ret = mutex_lock_interruptible(&dev->struct_mutex);
565 if (ret)
566 return ret;
567
568 for_each_ring(ring, dev_priv, i)
569 i915_ring_seqno_info(m, ring);
570
571 mutex_unlock(&dev->struct_mutex);
572
573 return 0;
574 }
575
576
577 static int i915_interrupt_info(struct seq_file *m, void *data)
578 {
579 struct drm_info_node *node = (struct drm_info_node *) m->private;
580 struct drm_device *dev = node->minor->dev;
581 drm_i915_private_t *dev_priv = dev->dev_private;
582 struct intel_ring_buffer *ring;
583 int ret, i, pipe;
584
585 ret = mutex_lock_interruptible(&dev->struct_mutex);
586 if (ret)
587 return ret;
588
589 if (IS_VALLEYVIEW(dev)) {
590 seq_printf(m, "Display IER:\t%08x\n",
591 I915_READ(VLV_IER));
592 seq_printf(m, "Display IIR:\t%08x\n",
593 I915_READ(VLV_IIR));
594 seq_printf(m, "Display IIR_RW:\t%08x\n",
595 I915_READ(VLV_IIR_RW));
596 seq_printf(m, "Display IMR:\t%08x\n",
597 I915_READ(VLV_IMR));
598 for_each_pipe(pipe)
599 seq_printf(m, "Pipe %c stat:\t%08x\n",
600 pipe_name(pipe),
601 I915_READ(PIPESTAT(pipe)));
602
603 seq_printf(m, "Master IER:\t%08x\n",
604 I915_READ(VLV_MASTER_IER));
605
606 seq_printf(m, "Render IER:\t%08x\n",
607 I915_READ(GTIER));
608 seq_printf(m, "Render IIR:\t%08x\n",
609 I915_READ(GTIIR));
610 seq_printf(m, "Render IMR:\t%08x\n",
611 I915_READ(GTIMR));
612
613 seq_printf(m, "PM IER:\t\t%08x\n",
614 I915_READ(GEN6_PMIER));
615 seq_printf(m, "PM IIR:\t\t%08x\n",
616 I915_READ(GEN6_PMIIR));
617 seq_printf(m, "PM IMR:\t\t%08x\n",
618 I915_READ(GEN6_PMIMR));
619
620 seq_printf(m, "Port hotplug:\t%08x\n",
621 I915_READ(PORT_HOTPLUG_EN));
622 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
623 I915_READ(VLV_DPFLIPSTAT));
624 seq_printf(m, "DPINVGTT:\t%08x\n",
625 I915_READ(DPINVGTT));
626
627 } else if (!HAS_PCH_SPLIT(dev)) {
628 seq_printf(m, "Interrupt enable: %08x\n",
629 I915_READ(IER));
630 seq_printf(m, "Interrupt identity: %08x\n",
631 I915_READ(IIR));
632 seq_printf(m, "Interrupt mask: %08x\n",
633 I915_READ(IMR));
634 for_each_pipe(pipe)
635 seq_printf(m, "Pipe %c stat: %08x\n",
636 pipe_name(pipe),
637 I915_READ(PIPESTAT(pipe)));
638 } else {
639 seq_printf(m, "North Display Interrupt enable: %08x\n",
640 I915_READ(DEIER));
641 seq_printf(m, "North Display Interrupt identity: %08x\n",
642 I915_READ(DEIIR));
643 seq_printf(m, "North Display Interrupt mask: %08x\n",
644 I915_READ(DEIMR));
645 seq_printf(m, "South Display Interrupt enable: %08x\n",
646 I915_READ(SDEIER));
647 seq_printf(m, "South Display Interrupt identity: %08x\n",
648 I915_READ(SDEIIR));
649 seq_printf(m, "South Display Interrupt mask: %08x\n",
650 I915_READ(SDEIMR));
651 seq_printf(m, "Graphics Interrupt enable: %08x\n",
652 I915_READ(GTIER));
653 seq_printf(m, "Graphics Interrupt identity: %08x\n",
654 I915_READ(GTIIR));
655 seq_printf(m, "Graphics Interrupt mask: %08x\n",
656 I915_READ(GTIMR));
657 }
658 seq_printf(m, "Interrupts received: %d\n",
659 atomic_read(&dev_priv->irq_received));
660 for_each_ring(ring, dev_priv, i) {
661 if (IS_GEN6(dev) || IS_GEN7(dev)) {
662 seq_printf(m,
663 "Graphics Interrupt mask (%s): %08x\n",
664 ring->name, I915_READ_IMR(ring));
665 }
666 i915_ring_seqno_info(m, ring);
667 }
668 mutex_unlock(&dev->struct_mutex);
669
670 return 0;
671 }
672
673 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
674 {
675 struct drm_info_node *node = (struct drm_info_node *) m->private;
676 struct drm_device *dev = node->minor->dev;
677 drm_i915_private_t *dev_priv = dev->dev_private;
678 int i, ret;
679
680 ret = mutex_lock_interruptible(&dev->struct_mutex);
681 if (ret)
682 return ret;
683
684 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
685 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
686 for (i = 0; i < dev_priv->num_fence_regs; i++) {
687 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
688
689 seq_printf(m, "Fence %d, pin count = %d, object = ",
690 i, dev_priv->fence_regs[i].pin_count);
691 if (obj == NULL)
692 seq_puts(m, "unused");
693 else
694 describe_obj(m, obj);
695 seq_putc(m, '\n');
696 }
697
698 mutex_unlock(&dev->struct_mutex);
699 return 0;
700 }
701
702 static int i915_hws_info(struct seq_file *m, void *data)
703 {
704 struct drm_info_node *node = (struct drm_info_node *) m->private;
705 struct drm_device *dev = node->minor->dev;
706 drm_i915_private_t *dev_priv = dev->dev_private;
707 struct intel_ring_buffer *ring;
708 const u32 *hws;
709 int i;
710
711 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
712 hws = ring->status_page.page_addr;
713 if (hws == NULL)
714 return 0;
715
716 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
717 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
718 i * 4,
719 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
720 }
721 return 0;
722 }
723
724 static ssize_t
725 i915_error_state_write(struct file *filp,
726 const char __user *ubuf,
727 size_t cnt,
728 loff_t *ppos)
729 {
730 struct i915_error_state_file_priv *error_priv = filp->private_data;
731 struct drm_device *dev = error_priv->dev;
732 int ret;
733
734 DRM_DEBUG_DRIVER("Resetting error state\n");
735
736 ret = mutex_lock_interruptible(&dev->struct_mutex);
737 if (ret)
738 return ret;
739
740 i915_destroy_error_state(dev);
741 mutex_unlock(&dev->struct_mutex);
742
743 return cnt;
744 }
745
746 static int i915_error_state_open(struct inode *inode, struct file *file)
747 {
748 struct drm_device *dev = inode->i_private;
749 struct i915_error_state_file_priv *error_priv;
750
751 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
752 if (!error_priv)
753 return -ENOMEM;
754
755 error_priv->dev = dev;
756
757 i915_error_state_get(dev, error_priv);
758
759 file->private_data = error_priv;
760
761 return 0;
762 }
763
764 static int i915_error_state_release(struct inode *inode, struct file *file)
765 {
766 struct i915_error_state_file_priv *error_priv = file->private_data;
767
768 i915_error_state_put(error_priv);
769 kfree(error_priv);
770
771 return 0;
772 }
773
774 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
775 size_t count, loff_t *pos)
776 {
777 struct i915_error_state_file_priv *error_priv = file->private_data;
778 struct drm_i915_error_state_buf error_str;
779 loff_t tmp_pos = 0;
780 ssize_t ret_count = 0;
781 int ret;
782
783 ret = i915_error_state_buf_init(&error_str, count, *pos);
784 if (ret)
785 return ret;
786
787 ret = i915_error_state_to_str(&error_str, error_priv);
788 if (ret)
789 goto out;
790
791 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
792 error_str.buf,
793 error_str.bytes);
794
795 if (ret_count < 0)
796 ret = ret_count;
797 else
798 *pos = error_str.start + ret_count;
799 out:
800 i915_error_state_buf_release(&error_str);
801 return ret ?: ret_count;
802 }
803
804 static const struct file_operations i915_error_state_fops = {
805 .owner = THIS_MODULE,
806 .open = i915_error_state_open,
807 .read = i915_error_state_read,
808 .write = i915_error_state_write,
809 .llseek = default_llseek,
810 .release = i915_error_state_release,
811 };
812
813 static int
814 i915_next_seqno_get(void *data, u64 *val)
815 {
816 struct drm_device *dev = data;
817 drm_i915_private_t *dev_priv = dev->dev_private;
818 int ret;
819
820 ret = mutex_lock_interruptible(&dev->struct_mutex);
821 if (ret)
822 return ret;
823
824 *val = dev_priv->next_seqno;
825 mutex_unlock(&dev->struct_mutex);
826
827 return 0;
828 }
829
830 static int
831 i915_next_seqno_set(void *data, u64 val)
832 {
833 struct drm_device *dev = data;
834 int ret;
835
836 ret = mutex_lock_interruptible(&dev->struct_mutex);
837 if (ret)
838 return ret;
839
840 ret = i915_gem_set_seqno(dev, val);
841 mutex_unlock(&dev->struct_mutex);
842
843 return ret;
844 }
845
846 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
847 i915_next_seqno_get, i915_next_seqno_set,
848 "0x%llx\n");
849
850 static int i915_rstdby_delays(struct seq_file *m, void *unused)
851 {
852 struct drm_info_node *node = (struct drm_info_node *) m->private;
853 struct drm_device *dev = node->minor->dev;
854 drm_i915_private_t *dev_priv = dev->dev_private;
855 u16 crstanddelay;
856 int ret;
857
858 ret = mutex_lock_interruptible(&dev->struct_mutex);
859 if (ret)
860 return ret;
861
862 crstanddelay = I915_READ16(CRSTANDVID);
863
864 mutex_unlock(&dev->struct_mutex);
865
866 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
867
868 return 0;
869 }
870
871 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
872 {
873 struct drm_info_node *node = (struct drm_info_node *) m->private;
874 struct drm_device *dev = node->minor->dev;
875 drm_i915_private_t *dev_priv = dev->dev_private;
876 int ret;
877
878 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
879
880 if (IS_GEN5(dev)) {
881 u16 rgvswctl = I915_READ16(MEMSWCTL);
882 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
883
884 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
885 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
886 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
887 MEMSTAT_VID_SHIFT);
888 seq_printf(m, "Current P-state: %d\n",
889 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
890 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
891 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
892 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
893 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
894 u32 rpstat, cagf, reqf;
895 u32 rpupei, rpcurup, rpprevup;
896 u32 rpdownei, rpcurdown, rpprevdown;
897 int max_freq;
898
899 /* RPSTAT1 is in the GT power well */
900 ret = mutex_lock_interruptible(&dev->struct_mutex);
901 if (ret)
902 return ret;
903
904 gen6_gt_force_wake_get(dev_priv);
905
906 reqf = I915_READ(GEN6_RPNSWREQ);
907 reqf &= ~GEN6_TURBO_DISABLE;
908 if (IS_HASWELL(dev))
909 reqf >>= 24;
910 else
911 reqf >>= 25;
912 reqf *= GT_FREQUENCY_MULTIPLIER;
913
914 rpstat = I915_READ(GEN6_RPSTAT1);
915 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
916 rpcurup = I915_READ(GEN6_RP_CUR_UP);
917 rpprevup = I915_READ(GEN6_RP_PREV_UP);
918 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
919 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
920 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
921 if (IS_HASWELL(dev))
922 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
923 else
924 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
925 cagf *= GT_FREQUENCY_MULTIPLIER;
926
927 gen6_gt_force_wake_put(dev_priv);
928 mutex_unlock(&dev->struct_mutex);
929
930 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
931 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
932 seq_printf(m, "Render p-state ratio: %d\n",
933 (gt_perf_status & 0xff00) >> 8);
934 seq_printf(m, "Render p-state VID: %d\n",
935 gt_perf_status & 0xff);
936 seq_printf(m, "Render p-state limit: %d\n",
937 rp_state_limits & 0xff);
938 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
939 seq_printf(m, "CAGF: %dMHz\n", cagf);
940 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
941 GEN6_CURICONT_MASK);
942 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
943 GEN6_CURBSYTAVG_MASK);
944 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
945 GEN6_CURBSYTAVG_MASK);
946 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
947 GEN6_CURIAVG_MASK);
948 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
949 GEN6_CURBSYTAVG_MASK);
950 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
951 GEN6_CURBSYTAVG_MASK);
952
953 max_freq = (rp_state_cap & 0xff0000) >> 16;
954 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
955 max_freq * GT_FREQUENCY_MULTIPLIER);
956
957 max_freq = (rp_state_cap & 0xff00) >> 8;
958 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
959 max_freq * GT_FREQUENCY_MULTIPLIER);
960
961 max_freq = rp_state_cap & 0xff;
962 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
963 max_freq * GT_FREQUENCY_MULTIPLIER);
964
965 seq_printf(m, "Max overclocked frequency: %dMHz\n",
966 dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
967 } else if (IS_VALLEYVIEW(dev)) {
968 u32 freq_sts, val;
969
970 mutex_lock(&dev_priv->rps.hw_lock);
971 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
972 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
973 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
974
975 val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1);
976 seq_printf(m, "max GPU freq: %d MHz\n",
977 vlv_gpu_freq(dev_priv->mem_freq, val));
978
979 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM);
980 seq_printf(m, "min GPU freq: %d MHz\n",
981 vlv_gpu_freq(dev_priv->mem_freq, val));
982
983 seq_printf(m, "current GPU freq: %d MHz\n",
984 vlv_gpu_freq(dev_priv->mem_freq,
985 (freq_sts >> 8) & 0xff));
986 mutex_unlock(&dev_priv->rps.hw_lock);
987 } else {
988 seq_puts(m, "no P-state info available\n");
989 }
990
991 return 0;
992 }
993
994 static int i915_delayfreq_table(struct seq_file *m, void *unused)
995 {
996 struct drm_info_node *node = (struct drm_info_node *) m->private;
997 struct drm_device *dev = node->minor->dev;
998 drm_i915_private_t *dev_priv = dev->dev_private;
999 u32 delayfreq;
1000 int ret, i;
1001
1002 ret = mutex_lock_interruptible(&dev->struct_mutex);
1003 if (ret)
1004 return ret;
1005
1006 for (i = 0; i < 16; i++) {
1007 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
1008 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
1009 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
1010 }
1011
1012 mutex_unlock(&dev->struct_mutex);
1013
1014 return 0;
1015 }
1016
1017 static inline int MAP_TO_MV(int map)
1018 {
1019 return 1250 - (map * 25);
1020 }
1021
1022 static int i915_inttoext_table(struct seq_file *m, void *unused)
1023 {
1024 struct drm_info_node *node = (struct drm_info_node *) m->private;
1025 struct drm_device *dev = node->minor->dev;
1026 drm_i915_private_t *dev_priv = dev->dev_private;
1027 u32 inttoext;
1028 int ret, i;
1029
1030 ret = mutex_lock_interruptible(&dev->struct_mutex);
1031 if (ret)
1032 return ret;
1033
1034 for (i = 1; i <= 32; i++) {
1035 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
1036 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
1037 }
1038
1039 mutex_unlock(&dev->struct_mutex);
1040
1041 return 0;
1042 }
1043
1044 static int ironlake_drpc_info(struct seq_file *m)
1045 {
1046 struct drm_info_node *node = (struct drm_info_node *) m->private;
1047 struct drm_device *dev = node->minor->dev;
1048 drm_i915_private_t *dev_priv = dev->dev_private;
1049 u32 rgvmodectl, rstdbyctl;
1050 u16 crstandvid;
1051 int ret;
1052
1053 ret = mutex_lock_interruptible(&dev->struct_mutex);
1054 if (ret)
1055 return ret;
1056
1057 rgvmodectl = I915_READ(MEMMODECTL);
1058 rstdbyctl = I915_READ(RSTDBYCTL);
1059 crstandvid = I915_READ16(CRSTANDVID);
1060
1061 mutex_unlock(&dev->struct_mutex);
1062
1063 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1064 "yes" : "no");
1065 seq_printf(m, "Boost freq: %d\n",
1066 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1067 MEMMODE_BOOST_FREQ_SHIFT);
1068 seq_printf(m, "HW control enabled: %s\n",
1069 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1070 seq_printf(m, "SW control enabled: %s\n",
1071 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1072 seq_printf(m, "Gated voltage change: %s\n",
1073 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1074 seq_printf(m, "Starting frequency: P%d\n",
1075 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1076 seq_printf(m, "Max P-state: P%d\n",
1077 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1078 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1079 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1080 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1081 seq_printf(m, "Render standby enabled: %s\n",
1082 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1083 seq_puts(m, "Current RS state: ");
1084 switch (rstdbyctl & RSX_STATUS_MASK) {
1085 case RSX_STATUS_ON:
1086 seq_puts(m, "on\n");
1087 break;
1088 case RSX_STATUS_RC1:
1089 seq_puts(m, "RC1\n");
1090 break;
1091 case RSX_STATUS_RC1E:
1092 seq_puts(m, "RC1E\n");
1093 break;
1094 case RSX_STATUS_RS1:
1095 seq_puts(m, "RS1\n");
1096 break;
1097 case RSX_STATUS_RS2:
1098 seq_puts(m, "RS2 (RC6)\n");
1099 break;
1100 case RSX_STATUS_RS3:
1101 seq_puts(m, "RC3 (RC6+)\n");
1102 break;
1103 default:
1104 seq_puts(m, "unknown\n");
1105 break;
1106 }
1107
1108 return 0;
1109 }
1110
1111 static int gen6_drpc_info(struct seq_file *m)
1112 {
1113
1114 struct drm_info_node *node = (struct drm_info_node *) m->private;
1115 struct drm_device *dev = node->minor->dev;
1116 struct drm_i915_private *dev_priv = dev->dev_private;
1117 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1118 unsigned forcewake_count;
1119 int count = 0, ret;
1120
1121 ret = mutex_lock_interruptible(&dev->struct_mutex);
1122 if (ret)
1123 return ret;
1124
1125 spin_lock_irq(&dev_priv->uncore.lock);
1126 forcewake_count = dev_priv->uncore.forcewake_count;
1127 spin_unlock_irq(&dev_priv->uncore.lock);
1128
1129 if (forcewake_count) {
1130 seq_puts(m, "RC information inaccurate because somebody "
1131 "holds a forcewake reference \n");
1132 } else {
1133 /* NB: we cannot use forcewake, else we read the wrong values */
1134 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1135 udelay(10);
1136 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1137 }
1138
1139 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1140 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1141
1142 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1143 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1144 mutex_unlock(&dev->struct_mutex);
1145 mutex_lock(&dev_priv->rps.hw_lock);
1146 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1147 mutex_unlock(&dev_priv->rps.hw_lock);
1148
1149 seq_printf(m, "Video Turbo Mode: %s\n",
1150 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1151 seq_printf(m, "HW control enabled: %s\n",
1152 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1153 seq_printf(m, "SW control enabled: %s\n",
1154 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1155 GEN6_RP_MEDIA_SW_MODE));
1156 seq_printf(m, "RC1e Enabled: %s\n",
1157 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1158 seq_printf(m, "RC6 Enabled: %s\n",
1159 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1160 seq_printf(m, "Deep RC6 Enabled: %s\n",
1161 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1162 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1163 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1164 seq_puts(m, "Current RC state: ");
1165 switch (gt_core_status & GEN6_RCn_MASK) {
1166 case GEN6_RC0:
1167 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1168 seq_puts(m, "Core Power Down\n");
1169 else
1170 seq_puts(m, "on\n");
1171 break;
1172 case GEN6_RC3:
1173 seq_puts(m, "RC3\n");
1174 break;
1175 case GEN6_RC6:
1176 seq_puts(m, "RC6\n");
1177 break;
1178 case GEN6_RC7:
1179 seq_puts(m, "RC7\n");
1180 break;
1181 default:
1182 seq_puts(m, "Unknown\n");
1183 break;
1184 }
1185
1186 seq_printf(m, "Core Power Down: %s\n",
1187 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1188
1189 /* Not exactly sure what this is */
1190 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1191 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1192 seq_printf(m, "RC6 residency since boot: %u\n",
1193 I915_READ(GEN6_GT_GFX_RC6));
1194 seq_printf(m, "RC6+ residency since boot: %u\n",
1195 I915_READ(GEN6_GT_GFX_RC6p));
1196 seq_printf(m, "RC6++ residency since boot: %u\n",
1197 I915_READ(GEN6_GT_GFX_RC6pp));
1198
1199 seq_printf(m, "RC6 voltage: %dmV\n",
1200 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1201 seq_printf(m, "RC6+ voltage: %dmV\n",
1202 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1203 seq_printf(m, "RC6++ voltage: %dmV\n",
1204 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1205 return 0;
1206 }
1207
1208 static int i915_drpc_info(struct seq_file *m, void *unused)
1209 {
1210 struct drm_info_node *node = (struct drm_info_node *) m->private;
1211 struct drm_device *dev = node->minor->dev;
1212
1213 if (IS_GEN6(dev) || IS_GEN7(dev))
1214 return gen6_drpc_info(m);
1215 else
1216 return ironlake_drpc_info(m);
1217 }
1218
1219 static int i915_fbc_status(struct seq_file *m, void *unused)
1220 {
1221 struct drm_info_node *node = (struct drm_info_node *) m->private;
1222 struct drm_device *dev = node->minor->dev;
1223 drm_i915_private_t *dev_priv = dev->dev_private;
1224
1225 if (!I915_HAS_FBC(dev)) {
1226 seq_puts(m, "FBC unsupported on this chipset\n");
1227 return 0;
1228 }
1229
1230 if (intel_fbc_enabled(dev)) {
1231 seq_puts(m, "FBC enabled\n");
1232 } else {
1233 seq_puts(m, "FBC disabled: ");
1234 switch (dev_priv->fbc.no_fbc_reason) {
1235 case FBC_OK:
1236 seq_puts(m, "FBC actived, but currently disabled in hardware");
1237 break;
1238 case FBC_UNSUPPORTED:
1239 seq_puts(m, "unsupported by this chipset");
1240 break;
1241 case FBC_NO_OUTPUT:
1242 seq_puts(m, "no outputs");
1243 break;
1244 case FBC_STOLEN_TOO_SMALL:
1245 seq_puts(m, "not enough stolen memory");
1246 break;
1247 case FBC_UNSUPPORTED_MODE:
1248 seq_puts(m, "mode not supported");
1249 break;
1250 case FBC_MODE_TOO_LARGE:
1251 seq_puts(m, "mode too large");
1252 break;
1253 case FBC_BAD_PLANE:
1254 seq_puts(m, "FBC unsupported on plane");
1255 break;
1256 case FBC_NOT_TILED:
1257 seq_puts(m, "scanout buffer not tiled");
1258 break;
1259 case FBC_MULTIPLE_PIPES:
1260 seq_puts(m, "multiple pipes are enabled");
1261 break;
1262 case FBC_MODULE_PARAM:
1263 seq_puts(m, "disabled per module param (default off)");
1264 break;
1265 case FBC_CHIP_DEFAULT:
1266 seq_puts(m, "disabled per chip default");
1267 break;
1268 default:
1269 seq_puts(m, "unknown reason");
1270 }
1271 seq_putc(m, '\n');
1272 }
1273 return 0;
1274 }
1275
1276 static int i915_ips_status(struct seq_file *m, void *unused)
1277 {
1278 struct drm_info_node *node = (struct drm_info_node *) m->private;
1279 struct drm_device *dev = node->minor->dev;
1280 struct drm_i915_private *dev_priv = dev->dev_private;
1281
1282 if (!HAS_IPS(dev)) {
1283 seq_puts(m, "not supported\n");
1284 return 0;
1285 }
1286
1287 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1288 seq_puts(m, "enabled\n");
1289 else
1290 seq_puts(m, "disabled\n");
1291
1292 return 0;
1293 }
1294
1295 static int i915_sr_status(struct seq_file *m, void *unused)
1296 {
1297 struct drm_info_node *node = (struct drm_info_node *) m->private;
1298 struct drm_device *dev = node->minor->dev;
1299 drm_i915_private_t *dev_priv = dev->dev_private;
1300 bool sr_enabled = false;
1301
1302 if (HAS_PCH_SPLIT(dev))
1303 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1304 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1305 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1306 else if (IS_I915GM(dev))
1307 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1308 else if (IS_PINEVIEW(dev))
1309 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1310
1311 seq_printf(m, "self-refresh: %s\n",
1312 sr_enabled ? "enabled" : "disabled");
1313
1314 return 0;
1315 }
1316
1317 static int i915_emon_status(struct seq_file *m, void *unused)
1318 {
1319 struct drm_info_node *node = (struct drm_info_node *) m->private;
1320 struct drm_device *dev = node->minor->dev;
1321 drm_i915_private_t *dev_priv = dev->dev_private;
1322 unsigned long temp, chipset, gfx;
1323 int ret;
1324
1325 if (!IS_GEN5(dev))
1326 return -ENODEV;
1327
1328 ret = mutex_lock_interruptible(&dev->struct_mutex);
1329 if (ret)
1330 return ret;
1331
1332 temp = i915_mch_val(dev_priv);
1333 chipset = i915_chipset_val(dev_priv);
1334 gfx = i915_gfx_val(dev_priv);
1335 mutex_unlock(&dev->struct_mutex);
1336
1337 seq_printf(m, "GMCH temp: %ld\n", temp);
1338 seq_printf(m, "Chipset power: %ld\n", chipset);
1339 seq_printf(m, "GFX power: %ld\n", gfx);
1340 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1341
1342 return 0;
1343 }
1344
1345 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1346 {
1347 struct drm_info_node *node = (struct drm_info_node *) m->private;
1348 struct drm_device *dev = node->minor->dev;
1349 drm_i915_private_t *dev_priv = dev->dev_private;
1350 int ret;
1351 int gpu_freq, ia_freq;
1352
1353 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1354 seq_puts(m, "unsupported on this chipset\n");
1355 return 0;
1356 }
1357
1358 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1359
1360 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1361 if (ret)
1362 return ret;
1363
1364 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1365
1366 for (gpu_freq = dev_priv->rps.min_delay;
1367 gpu_freq <= dev_priv->rps.max_delay;
1368 gpu_freq++) {
1369 ia_freq = gpu_freq;
1370 sandybridge_pcode_read(dev_priv,
1371 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1372 &ia_freq);
1373 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1374 gpu_freq * GT_FREQUENCY_MULTIPLIER,
1375 ((ia_freq >> 0) & 0xff) * 100,
1376 ((ia_freq >> 8) & 0xff) * 100);
1377 }
1378
1379 mutex_unlock(&dev_priv->rps.hw_lock);
1380
1381 return 0;
1382 }
1383
1384 static int i915_gfxec(struct seq_file *m, void *unused)
1385 {
1386 struct drm_info_node *node = (struct drm_info_node *) m->private;
1387 struct drm_device *dev = node->minor->dev;
1388 drm_i915_private_t *dev_priv = dev->dev_private;
1389 int ret;
1390
1391 ret = mutex_lock_interruptible(&dev->struct_mutex);
1392 if (ret)
1393 return ret;
1394
1395 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1396
1397 mutex_unlock(&dev->struct_mutex);
1398
1399 return 0;
1400 }
1401
1402 static int i915_opregion(struct seq_file *m, void *unused)
1403 {
1404 struct drm_info_node *node = (struct drm_info_node *) m->private;
1405 struct drm_device *dev = node->minor->dev;
1406 drm_i915_private_t *dev_priv = dev->dev_private;
1407 struct intel_opregion *opregion = &dev_priv->opregion;
1408 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1409 int ret;
1410
1411 if (data == NULL)
1412 return -ENOMEM;
1413
1414 ret = mutex_lock_interruptible(&dev->struct_mutex);
1415 if (ret)
1416 goto out;
1417
1418 if (opregion->header) {
1419 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1420 seq_write(m, data, OPREGION_SIZE);
1421 }
1422
1423 mutex_unlock(&dev->struct_mutex);
1424
1425 out:
1426 kfree(data);
1427 return 0;
1428 }
1429
1430 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1431 {
1432 struct drm_info_node *node = (struct drm_info_node *) m->private;
1433 struct drm_device *dev = node->minor->dev;
1434 struct intel_fbdev *ifbdev = NULL;
1435 struct intel_framebuffer *fb;
1436
1437 #ifdef CONFIG_DRM_I915_FBDEV
1438 struct drm_i915_private *dev_priv = dev->dev_private;
1439 int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1440 if (ret)
1441 return ret;
1442
1443 ifbdev = dev_priv->fbdev;
1444 fb = to_intel_framebuffer(ifbdev->helper.fb);
1445
1446 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1447 fb->base.width,
1448 fb->base.height,
1449 fb->base.depth,
1450 fb->base.bits_per_pixel,
1451 atomic_read(&fb->base.refcount.refcount));
1452 describe_obj(m, fb->obj);
1453 seq_putc(m, '\n');
1454 mutex_unlock(&dev->mode_config.mutex);
1455 #endif
1456
1457 mutex_lock(&dev->mode_config.fb_lock);
1458 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1459 if (&fb->base == ifbdev->helper.fb)
1460 continue;
1461
1462 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1463 fb->base.width,
1464 fb->base.height,
1465 fb->base.depth,
1466 fb->base.bits_per_pixel,
1467 atomic_read(&fb->base.refcount.refcount));
1468 describe_obj(m, fb->obj);
1469 seq_putc(m, '\n');
1470 }
1471 mutex_unlock(&dev->mode_config.fb_lock);
1472
1473 return 0;
1474 }
1475
1476 static int i915_context_status(struct seq_file *m, void *unused)
1477 {
1478 struct drm_info_node *node = (struct drm_info_node *) m->private;
1479 struct drm_device *dev = node->minor->dev;
1480 drm_i915_private_t *dev_priv = dev->dev_private;
1481 struct intel_ring_buffer *ring;
1482 struct i915_hw_context *ctx;
1483 int ret, i;
1484
1485 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1486 if (ret)
1487 return ret;
1488
1489 if (dev_priv->ips.pwrctx) {
1490 seq_puts(m, "power context ");
1491 describe_obj(m, dev_priv->ips.pwrctx);
1492 seq_putc(m, '\n');
1493 }
1494
1495 if (dev_priv->ips.renderctx) {
1496 seq_puts(m, "render context ");
1497 describe_obj(m, dev_priv->ips.renderctx);
1498 seq_putc(m, '\n');
1499 }
1500
1501 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1502 seq_puts(m, "HW context ");
1503 describe_ctx(m, ctx);
1504 for_each_ring(ring, dev_priv, i)
1505 if (ring->default_context == ctx)
1506 seq_printf(m, "(default context %s) ", ring->name);
1507
1508 describe_obj(m, ctx->obj);
1509 seq_putc(m, '\n');
1510 }
1511
1512 mutex_unlock(&dev->mode_config.mutex);
1513
1514 return 0;
1515 }
1516
1517 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1518 {
1519 struct drm_info_node *node = (struct drm_info_node *) m->private;
1520 struct drm_device *dev = node->minor->dev;
1521 struct drm_i915_private *dev_priv = dev->dev_private;
1522 unsigned forcewake_count;
1523
1524 spin_lock_irq(&dev_priv->uncore.lock);
1525 forcewake_count = dev_priv->uncore.forcewake_count;
1526 spin_unlock_irq(&dev_priv->uncore.lock);
1527
1528 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1529
1530 return 0;
1531 }
1532
1533 static const char *swizzle_string(unsigned swizzle)
1534 {
1535 switch (swizzle) {
1536 case I915_BIT_6_SWIZZLE_NONE:
1537 return "none";
1538 case I915_BIT_6_SWIZZLE_9:
1539 return "bit9";
1540 case I915_BIT_6_SWIZZLE_9_10:
1541 return "bit9/bit10";
1542 case I915_BIT_6_SWIZZLE_9_11:
1543 return "bit9/bit11";
1544 case I915_BIT_6_SWIZZLE_9_10_11:
1545 return "bit9/bit10/bit11";
1546 case I915_BIT_6_SWIZZLE_9_17:
1547 return "bit9/bit17";
1548 case I915_BIT_6_SWIZZLE_9_10_17:
1549 return "bit9/bit10/bit17";
1550 case I915_BIT_6_SWIZZLE_UNKNOWN:
1551 return "unknown";
1552 }
1553
1554 return "bug";
1555 }
1556
1557 static int i915_swizzle_info(struct seq_file *m, void *data)
1558 {
1559 struct drm_info_node *node = (struct drm_info_node *) m->private;
1560 struct drm_device *dev = node->minor->dev;
1561 struct drm_i915_private *dev_priv = dev->dev_private;
1562 int ret;
1563
1564 ret = mutex_lock_interruptible(&dev->struct_mutex);
1565 if (ret)
1566 return ret;
1567
1568 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1569 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1570 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1571 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1572
1573 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1574 seq_printf(m, "DDC = 0x%08x\n",
1575 I915_READ(DCC));
1576 seq_printf(m, "C0DRB3 = 0x%04x\n",
1577 I915_READ16(C0DRB3));
1578 seq_printf(m, "C1DRB3 = 0x%04x\n",
1579 I915_READ16(C1DRB3));
1580 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
1581 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1582 I915_READ(MAD_DIMM_C0));
1583 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1584 I915_READ(MAD_DIMM_C1));
1585 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1586 I915_READ(MAD_DIMM_C2));
1587 seq_printf(m, "TILECTL = 0x%08x\n",
1588 I915_READ(TILECTL));
1589 seq_printf(m, "ARB_MODE = 0x%08x\n",
1590 I915_READ(ARB_MODE));
1591 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1592 I915_READ(DISP_ARB_CTL));
1593 }
1594 mutex_unlock(&dev->struct_mutex);
1595
1596 return 0;
1597 }
1598
1599 static int i915_ppgtt_info(struct seq_file *m, void *data)
1600 {
1601 struct drm_info_node *node = (struct drm_info_node *) m->private;
1602 struct drm_device *dev = node->minor->dev;
1603 struct drm_i915_private *dev_priv = dev->dev_private;
1604 struct intel_ring_buffer *ring;
1605 int i, ret;
1606
1607
1608 ret = mutex_lock_interruptible(&dev->struct_mutex);
1609 if (ret)
1610 return ret;
1611 if (INTEL_INFO(dev)->gen == 6)
1612 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1613
1614 for_each_ring(ring, dev_priv, i) {
1615 seq_printf(m, "%s\n", ring->name);
1616 if (INTEL_INFO(dev)->gen == 7)
1617 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1618 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1619 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1620 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1621 }
1622 if (dev_priv->mm.aliasing_ppgtt) {
1623 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1624
1625 seq_puts(m, "aliasing PPGTT:\n");
1626 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1627 }
1628 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1629 mutex_unlock(&dev->struct_mutex);
1630
1631 return 0;
1632 }
1633
1634 static int i915_dpio_info(struct seq_file *m, void *data)
1635 {
1636 struct drm_info_node *node = (struct drm_info_node *) m->private;
1637 struct drm_device *dev = node->minor->dev;
1638 struct drm_i915_private *dev_priv = dev->dev_private;
1639 int ret;
1640
1641
1642 if (!IS_VALLEYVIEW(dev)) {
1643 seq_puts(m, "unsupported\n");
1644 return 0;
1645 }
1646
1647 ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
1648 if (ret)
1649 return ret;
1650
1651 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1652
1653 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1654 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
1655 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1656 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
1657
1658 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1659 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
1660 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1661 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
1662
1663 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1664 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
1665 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1666 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
1667
1668 seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
1669 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
1670 seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
1671 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
1672
1673 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1674 vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
1675
1676 mutex_unlock(&dev_priv->dpio_lock);
1677
1678 return 0;
1679 }
1680
1681 static int i915_llc(struct seq_file *m, void *data)
1682 {
1683 struct drm_info_node *node = (struct drm_info_node *) m->private;
1684 struct drm_device *dev = node->minor->dev;
1685 struct drm_i915_private *dev_priv = dev->dev_private;
1686
1687 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1688 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
1689 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
1690
1691 return 0;
1692 }
1693
1694 static int i915_edp_psr_status(struct seq_file *m, void *data)
1695 {
1696 struct drm_info_node *node = m->private;
1697 struct drm_device *dev = node->minor->dev;
1698 struct drm_i915_private *dev_priv = dev->dev_private;
1699 u32 psrperf = 0;
1700 bool enabled = false;
1701
1702 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
1703 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
1704
1705 enabled = HAS_PSR(dev) &&
1706 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1707 seq_printf(m, "Enabled: %s\n", yesno(enabled));
1708
1709 if (HAS_PSR(dev))
1710 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
1711 EDP_PSR_PERF_CNT_MASK;
1712 seq_printf(m, "Performance_Counter: %u\n", psrperf);
1713
1714 return 0;
1715 }
1716
1717 static int i915_energy_uJ(struct seq_file *m, void *data)
1718 {
1719 struct drm_info_node *node = m->private;
1720 struct drm_device *dev = node->minor->dev;
1721 struct drm_i915_private *dev_priv = dev->dev_private;
1722 u64 power;
1723 u32 units;
1724
1725 if (INTEL_INFO(dev)->gen < 6)
1726 return -ENODEV;
1727
1728 rdmsrl(MSR_RAPL_POWER_UNIT, power);
1729 power = (power & 0x1f00) >> 8;
1730 units = 1000000 / (1 << power); /* convert to uJ */
1731 power = I915_READ(MCH_SECP_NRG_STTS);
1732 power *= units;
1733
1734 seq_printf(m, "%llu", (long long unsigned)power);
1735
1736 return 0;
1737 }
1738
1739 static int i915_pc8_status(struct seq_file *m, void *unused)
1740 {
1741 struct drm_info_node *node = (struct drm_info_node *) m->private;
1742 struct drm_device *dev = node->minor->dev;
1743 struct drm_i915_private *dev_priv = dev->dev_private;
1744
1745 if (!IS_HASWELL(dev)) {
1746 seq_puts(m, "not supported\n");
1747 return 0;
1748 }
1749
1750 mutex_lock(&dev_priv->pc8.lock);
1751 seq_printf(m, "Requirements met: %s\n",
1752 yesno(dev_priv->pc8.requirements_met));
1753 seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
1754 seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
1755 seq_printf(m, "IRQs disabled: %s\n",
1756 yesno(dev_priv->pc8.irqs_disabled));
1757 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled));
1758 mutex_unlock(&dev_priv->pc8.lock);
1759
1760 return 0;
1761 }
1762
1763 struct pipe_crc_info {
1764 const char *name;
1765 struct drm_device *dev;
1766 enum pipe pipe;
1767 };
1768
1769 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
1770 {
1771 filep->private_data = inode->i_private;
1772
1773 return 0;
1774 }
1775
1776 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
1777 {
1778 return 0;
1779 }
1780
1781 /* (6 fields, 8 chars each, space separated (5) + '\n') */
1782 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
1783 /* account for \'0' */
1784 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
1785
1786 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
1787 {
1788 int head, tail;
1789
1790 head = atomic_read(&pipe_crc->head);
1791 tail = atomic_read(&pipe_crc->tail);
1792
1793 return CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR);
1794 }
1795
1796 static ssize_t
1797 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
1798 loff_t *pos)
1799 {
1800 struct pipe_crc_info *info = filep->private_data;
1801 struct drm_device *dev = info->dev;
1802 struct drm_i915_private *dev_priv = dev->dev_private;
1803 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
1804 char buf[PIPE_CRC_BUFFER_LEN];
1805 int head, tail, n_entries, n;
1806 ssize_t bytes_read;
1807
1808 /*
1809 * Don't allow user space to provide buffers not big enough to hold
1810 * a line of data.
1811 */
1812 if (count < PIPE_CRC_LINE_LEN)
1813 return -EINVAL;
1814
1815 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
1816 return 0;
1817
1818 /* nothing to read */
1819 while (pipe_crc_data_count(pipe_crc) == 0) {
1820 if (filep->f_flags & O_NONBLOCK)
1821 return -EAGAIN;
1822
1823 if (wait_event_interruptible(pipe_crc->wq,
1824 pipe_crc_data_count(pipe_crc)))
1825 return -ERESTARTSYS;
1826 }
1827
1828 /* We now have one or more entries to read */
1829 head = atomic_read(&pipe_crc->head);
1830 tail = atomic_read(&pipe_crc->tail);
1831 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
1832 count / PIPE_CRC_LINE_LEN);
1833 bytes_read = 0;
1834 n = 0;
1835 do {
1836 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
1837 int ret;
1838
1839 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
1840 "%8u %8x %8x %8x %8x %8x\n",
1841 entry->frame, entry->crc[0],
1842 entry->crc[1], entry->crc[2],
1843 entry->crc[3], entry->crc[4]);
1844
1845 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
1846 buf, PIPE_CRC_LINE_LEN);
1847 if (ret == PIPE_CRC_LINE_LEN)
1848 return -EFAULT;
1849
1850 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
1851 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1852 atomic_set(&pipe_crc->tail, tail);
1853 n++;
1854 } while (--n_entries);
1855
1856 return bytes_read;
1857 }
1858
1859 static const struct file_operations i915_pipe_crc_fops = {
1860 .owner = THIS_MODULE,
1861 .open = i915_pipe_crc_open,
1862 .read = i915_pipe_crc_read,
1863 .release = i915_pipe_crc_release,
1864 };
1865
1866 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
1867 {
1868 .name = "i915_pipe_A_crc",
1869 .pipe = PIPE_A,
1870 },
1871 {
1872 .name = "i915_pipe_B_crc",
1873 .pipe = PIPE_B,
1874 },
1875 {
1876 .name = "i915_pipe_C_crc",
1877 .pipe = PIPE_C,
1878 },
1879 };
1880
1881 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
1882 enum pipe pipe)
1883 {
1884 struct drm_device *dev = minor->dev;
1885 struct dentry *ent;
1886 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
1887
1888 info->dev = dev;
1889 ent = debugfs_create_file(info->name, S_IRUGO, root, info,
1890 &i915_pipe_crc_fops);
1891 if (IS_ERR(ent))
1892 return PTR_ERR(ent);
1893
1894 return drm_add_fake_info_node(minor, ent, info);
1895 }
1896
1897 static const char *pipe_crc_sources[] = {
1898 "none",
1899 "plane1",
1900 "plane2",
1901 "pf",
1902 };
1903
1904 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
1905 {
1906 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
1907 return pipe_crc_sources[source];
1908 }
1909
1910 static int display_crc_ctl_show(struct seq_file *m, void *data)
1911 {
1912 struct drm_device *dev = m->private;
1913 struct drm_i915_private *dev_priv = dev->dev_private;
1914 int i;
1915
1916 for (i = 0; i < I915_MAX_PIPES; i++)
1917 seq_printf(m, "%c %s\n", pipe_name(i),
1918 pipe_crc_source_name(dev_priv->pipe_crc[i].source));
1919
1920 return 0;
1921 }
1922
1923 static int display_crc_ctl_open(struct inode *inode, struct file *file)
1924 {
1925 struct drm_device *dev = inode->i_private;
1926
1927 return single_open(file, display_crc_ctl_show, dev);
1928 }
1929
1930 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
1931 enum intel_pipe_crc_source source)
1932 {
1933 struct drm_i915_private *dev_priv = dev->dev_private;
1934 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1935 u32 val;
1936
1937
1938 return -ENODEV;
1939
1940 if (!IS_IVYBRIDGE(dev))
1941 return -ENODEV;
1942
1943 if (pipe_crc->source == source)
1944 return 0;
1945
1946 /* forbid changing the source without going back to 'none' */
1947 if (pipe_crc->source && source)
1948 return -EINVAL;
1949
1950 /* none -> real source transition */
1951 if (source) {
1952 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
1953 pipe_name(pipe), pipe_crc_source_name(source));
1954
1955 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
1956 INTEL_PIPE_CRC_ENTRIES_NR,
1957 GFP_KERNEL);
1958 if (!pipe_crc->entries)
1959 return -ENOMEM;
1960
1961 atomic_set(&pipe_crc->head, 0);
1962 atomic_set(&pipe_crc->tail, 0);
1963 }
1964
1965 pipe_crc->source = source;
1966
1967 switch (source) {
1968 case INTEL_PIPE_CRC_SOURCE_PLANE1:
1969 val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
1970 break;
1971 case INTEL_PIPE_CRC_SOURCE_PLANE2:
1972 val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
1973 break;
1974 case INTEL_PIPE_CRC_SOURCE_PF:
1975 val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
1976 break;
1977 case INTEL_PIPE_CRC_SOURCE_NONE:
1978 default:
1979 val = 0;
1980 break;
1981 }
1982
1983 I915_WRITE(PIPE_CRC_CTL(pipe), val);
1984 POSTING_READ(PIPE_CRC_CTL(pipe));
1985
1986 /* real source -> none transition */
1987 if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
1988 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
1989 pipe_name(pipe));
1990
1991 kfree(pipe_crc->entries);
1992 pipe_crc->entries = NULL;
1993 }
1994
1995 return 0;
1996 }
1997
1998 /*
1999 * Parse pipe CRC command strings:
2000 * command: wsp* object wsp+ name wsp+ source wsp*
2001 * object: 'pipe'
2002 * name: (A | B | C)
2003 * source: (none | plane1 | plane2 | pf)
2004 * wsp: (#0x20 | #0x9 | #0xA)+
2005 *
2006 * eg.:
2007 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
2008 * "pipe A none" -> Stop CRC
2009 */
2010 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
2011 {
2012 int n_words = 0;
2013
2014 while (*buf) {
2015 char *end;
2016
2017 /* skip leading white space */
2018 buf = skip_spaces(buf);
2019 if (!*buf)
2020 break; /* end of buffer */
2021
2022 /* find end of word */
2023 for (end = buf; *end && !isspace(*end); end++)
2024 ;
2025
2026 if (n_words == max_words) {
2027 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
2028 max_words);
2029 return -EINVAL; /* ran out of words[] before bytes */
2030 }
2031
2032 if (*end)
2033 *end++ = '\0';
2034 words[n_words++] = buf;
2035 buf = end;
2036 }
2037
2038 return n_words;
2039 }
2040
2041 enum intel_pipe_crc_object {
2042 PIPE_CRC_OBJECT_PIPE,
2043 };
2044
2045 static const char *pipe_crc_objects[] = {
2046 "pipe",
2047 };
2048
2049 static int
2050 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
2051 {
2052 int i;
2053
2054 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
2055 if (!strcmp(buf, pipe_crc_objects[i])) {
2056 *o = i;
2057 return 0;
2058 }
2059
2060 return -EINVAL;
2061 }
2062
2063 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
2064 {
2065 const char name = buf[0];
2066
2067 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
2068 return -EINVAL;
2069
2070 *pipe = name - 'A';
2071
2072 return 0;
2073 }
2074
2075 static int
2076 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
2077 {
2078 int i;
2079
2080 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
2081 if (!strcmp(buf, pipe_crc_sources[i])) {
2082 *s = i;
2083 return 0;
2084 }
2085
2086 return -EINVAL;
2087 }
2088
2089 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
2090 {
2091 #define N_WORDS 3
2092 int n_words;
2093 char *words[N_WORDS];
2094 enum pipe pipe;
2095 enum intel_pipe_crc_object object;
2096 enum intel_pipe_crc_source source;
2097
2098 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
2099 if (n_words != N_WORDS) {
2100 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
2101 N_WORDS);
2102 return -EINVAL;
2103 }
2104
2105 if (display_crc_ctl_parse_object(words[0], &object) < 0) {
2106 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
2107 return -EINVAL;
2108 }
2109
2110 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
2111 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
2112 return -EINVAL;
2113 }
2114
2115 if (display_crc_ctl_parse_source(words[2], &source) < 0) {
2116 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
2117 return -EINVAL;
2118 }
2119
2120 return pipe_crc_set_source(dev, pipe, source);
2121 }
2122
2123 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
2124 size_t len, loff_t *offp)
2125 {
2126 struct seq_file *m = file->private_data;
2127 struct drm_device *dev = m->private;
2128 char *tmpbuf;
2129 int ret;
2130
2131 if (len == 0)
2132 return 0;
2133
2134 if (len > PAGE_SIZE - 1) {
2135 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
2136 PAGE_SIZE);
2137 return -E2BIG;
2138 }
2139
2140 tmpbuf = kmalloc(len + 1, GFP_KERNEL);
2141 if (!tmpbuf)
2142 return -ENOMEM;
2143
2144 if (copy_from_user(tmpbuf, ubuf, len)) {
2145 ret = -EFAULT;
2146 goto out;
2147 }
2148 tmpbuf[len] = '\0';
2149
2150 ret = display_crc_ctl_parse(dev, tmpbuf, len);
2151
2152 out:
2153 kfree(tmpbuf);
2154 if (ret < 0)
2155 return ret;
2156
2157 *offp += len;
2158 return len;
2159 }
2160
2161 static const struct file_operations i915_display_crc_ctl_fops = {
2162 .owner = THIS_MODULE,
2163 .open = display_crc_ctl_open,
2164 .read = seq_read,
2165 .llseek = seq_lseek,
2166 .release = single_release,
2167 .write = display_crc_ctl_write
2168 };
2169
2170 static int
2171 i915_wedged_get(void *data, u64 *val)
2172 {
2173 struct drm_device *dev = data;
2174 drm_i915_private_t *dev_priv = dev->dev_private;
2175
2176 *val = atomic_read(&dev_priv->gpu_error.reset_counter);
2177
2178 return 0;
2179 }
2180
2181 static int
2182 i915_wedged_set(void *data, u64 val)
2183 {
2184 struct drm_device *dev = data;
2185
2186 DRM_INFO("Manually setting wedged to %llu\n", val);
2187 i915_handle_error(dev, val);
2188
2189 return 0;
2190 }
2191
2192 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
2193 i915_wedged_get, i915_wedged_set,
2194 "%llu\n");
2195
2196 static int
2197 i915_ring_stop_get(void *data, u64 *val)
2198 {
2199 struct drm_device *dev = data;
2200 drm_i915_private_t *dev_priv = dev->dev_private;
2201
2202 *val = dev_priv->gpu_error.stop_rings;
2203
2204 return 0;
2205 }
2206
2207 static int
2208 i915_ring_stop_set(void *data, u64 val)
2209 {
2210 struct drm_device *dev = data;
2211 struct drm_i915_private *dev_priv = dev->dev_private;
2212 int ret;
2213
2214 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
2215
2216 ret = mutex_lock_interruptible(&dev->struct_mutex);
2217 if (ret)
2218 return ret;
2219
2220 dev_priv->gpu_error.stop_rings = val;
2221 mutex_unlock(&dev->struct_mutex);
2222
2223 return 0;
2224 }
2225
2226 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
2227 i915_ring_stop_get, i915_ring_stop_set,
2228 "0x%08llx\n");
2229
2230 static int
2231 i915_ring_missed_irq_get(void *data, u64 *val)
2232 {
2233 struct drm_device *dev = data;
2234 struct drm_i915_private *dev_priv = dev->dev_private;
2235
2236 *val = dev_priv->gpu_error.missed_irq_rings;
2237 return 0;
2238 }
2239
2240 static int
2241 i915_ring_missed_irq_set(void *data, u64 val)
2242 {
2243 struct drm_device *dev = data;
2244 struct drm_i915_private *dev_priv = dev->dev_private;
2245 int ret;
2246
2247 /* Lock against concurrent debugfs callers */
2248 ret = mutex_lock_interruptible(&dev->struct_mutex);
2249 if (ret)
2250 return ret;
2251 dev_priv->gpu_error.missed_irq_rings = val;
2252 mutex_unlock(&dev->struct_mutex);
2253
2254 return 0;
2255 }
2256
2257 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
2258 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
2259 "0x%08llx\n");
2260
2261 static int
2262 i915_ring_test_irq_get(void *data, u64 *val)
2263 {
2264 struct drm_device *dev = data;
2265 struct drm_i915_private *dev_priv = dev->dev_private;
2266
2267 *val = dev_priv->gpu_error.test_irq_rings;
2268
2269 return 0;
2270 }
2271
2272 static int
2273 i915_ring_test_irq_set(void *data, u64 val)
2274 {
2275 struct drm_device *dev = data;
2276 struct drm_i915_private *dev_priv = dev->dev_private;
2277 int ret;
2278
2279 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
2280
2281 /* Lock against concurrent debugfs callers */
2282 ret = mutex_lock_interruptible(&dev->struct_mutex);
2283 if (ret)
2284 return ret;
2285
2286 dev_priv->gpu_error.test_irq_rings = val;
2287 mutex_unlock(&dev->struct_mutex);
2288
2289 return 0;
2290 }
2291
2292 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
2293 i915_ring_test_irq_get, i915_ring_test_irq_set,
2294 "0x%08llx\n");
2295
2296 #define DROP_UNBOUND 0x1
2297 #define DROP_BOUND 0x2
2298 #define DROP_RETIRE 0x4
2299 #define DROP_ACTIVE 0x8
2300 #define DROP_ALL (DROP_UNBOUND | \
2301 DROP_BOUND | \
2302 DROP_RETIRE | \
2303 DROP_ACTIVE)
2304 static int
2305 i915_drop_caches_get(void *data, u64 *val)
2306 {
2307 *val = DROP_ALL;
2308
2309 return 0;
2310 }
2311
2312 static int
2313 i915_drop_caches_set(void *data, u64 val)
2314 {
2315 struct drm_device *dev = data;
2316 struct drm_i915_private *dev_priv = dev->dev_private;
2317 struct drm_i915_gem_object *obj, *next;
2318 struct i915_address_space *vm;
2319 struct i915_vma *vma, *x;
2320 int ret;
2321
2322 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
2323
2324 /* No need to check and wait for gpu resets, only libdrm auto-restarts
2325 * on ioctls on -EAGAIN. */
2326 ret = mutex_lock_interruptible(&dev->struct_mutex);
2327 if (ret)
2328 return ret;
2329
2330 if (val & DROP_ACTIVE) {
2331 ret = i915_gpu_idle(dev);
2332 if (ret)
2333 goto unlock;
2334 }
2335
2336 if (val & (DROP_RETIRE | DROP_ACTIVE))
2337 i915_gem_retire_requests(dev);
2338
2339 if (val & DROP_BOUND) {
2340 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2341 list_for_each_entry_safe(vma, x, &vm->inactive_list,
2342 mm_list) {
2343 if (vma->obj->pin_count)
2344 continue;
2345
2346 ret = i915_vma_unbind(vma);
2347 if (ret)
2348 goto unlock;
2349 }
2350 }
2351 }
2352
2353 if (val & DROP_UNBOUND) {
2354 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
2355 global_list)
2356 if (obj->pages_pin_count == 0) {
2357 ret = i915_gem_object_put_pages(obj);
2358 if (ret)
2359 goto unlock;
2360 }
2361 }
2362
2363 unlock:
2364 mutex_unlock(&dev->struct_mutex);
2365
2366 return ret;
2367 }
2368
2369 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
2370 i915_drop_caches_get, i915_drop_caches_set,
2371 "0x%08llx\n");
2372
2373 static int
2374 i915_max_freq_get(void *data, u64 *val)
2375 {
2376 struct drm_device *dev = data;
2377 drm_i915_private_t *dev_priv = dev->dev_private;
2378 int ret;
2379
2380 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2381 return -ENODEV;
2382
2383 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2384
2385 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2386 if (ret)
2387 return ret;
2388
2389 if (IS_VALLEYVIEW(dev))
2390 *val = vlv_gpu_freq(dev_priv->mem_freq,
2391 dev_priv->rps.max_delay);
2392 else
2393 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
2394 mutex_unlock(&dev_priv->rps.hw_lock);
2395
2396 return 0;
2397 }
2398
2399 static int
2400 i915_max_freq_set(void *data, u64 val)
2401 {
2402 struct drm_device *dev = data;
2403 struct drm_i915_private *dev_priv = dev->dev_private;
2404 int ret;
2405
2406 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2407 return -ENODEV;
2408
2409 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2410
2411 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
2412
2413 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2414 if (ret)
2415 return ret;
2416
2417 /*
2418 * Turbo will still be enabled, but won't go above the set value.
2419 */
2420 if (IS_VALLEYVIEW(dev)) {
2421 val = vlv_freq_opcode(dev_priv->mem_freq, val);
2422 dev_priv->rps.max_delay = val;
2423 gen6_set_rps(dev, val);
2424 } else {
2425 do_div(val, GT_FREQUENCY_MULTIPLIER);
2426 dev_priv->rps.max_delay = val;
2427 gen6_set_rps(dev, val);
2428 }
2429
2430 mutex_unlock(&dev_priv->rps.hw_lock);
2431
2432 return 0;
2433 }
2434
2435 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
2436 i915_max_freq_get, i915_max_freq_set,
2437 "%llu\n");
2438
2439 static int
2440 i915_min_freq_get(void *data, u64 *val)
2441 {
2442 struct drm_device *dev = data;
2443 drm_i915_private_t *dev_priv = dev->dev_private;
2444 int ret;
2445
2446 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2447 return -ENODEV;
2448
2449 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2450
2451 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2452 if (ret)
2453 return ret;
2454
2455 if (IS_VALLEYVIEW(dev))
2456 *val = vlv_gpu_freq(dev_priv->mem_freq,
2457 dev_priv->rps.min_delay);
2458 else
2459 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
2460 mutex_unlock(&dev_priv->rps.hw_lock);
2461
2462 return 0;
2463 }
2464
2465 static int
2466 i915_min_freq_set(void *data, u64 val)
2467 {
2468 struct drm_device *dev = data;
2469 struct drm_i915_private *dev_priv = dev->dev_private;
2470 int ret;
2471
2472 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2473 return -ENODEV;
2474
2475 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2476
2477 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
2478
2479 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2480 if (ret)
2481 return ret;
2482
2483 /*
2484 * Turbo will still be enabled, but won't go below the set value.
2485 */
2486 if (IS_VALLEYVIEW(dev)) {
2487 val = vlv_freq_opcode(dev_priv->mem_freq, val);
2488 dev_priv->rps.min_delay = val;
2489 valleyview_set_rps(dev, val);
2490 } else {
2491 do_div(val, GT_FREQUENCY_MULTIPLIER);
2492 dev_priv->rps.min_delay = val;
2493 gen6_set_rps(dev, val);
2494 }
2495 mutex_unlock(&dev_priv->rps.hw_lock);
2496
2497 return 0;
2498 }
2499
2500 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
2501 i915_min_freq_get, i915_min_freq_set,
2502 "%llu\n");
2503
2504 static int
2505 i915_cache_sharing_get(void *data, u64 *val)
2506 {
2507 struct drm_device *dev = data;
2508 drm_i915_private_t *dev_priv = dev->dev_private;
2509 u32 snpcr;
2510 int ret;
2511
2512 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2513 return -ENODEV;
2514
2515 ret = mutex_lock_interruptible(&dev->struct_mutex);
2516 if (ret)
2517 return ret;
2518
2519 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2520 mutex_unlock(&dev_priv->dev->struct_mutex);
2521
2522 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
2523
2524 return 0;
2525 }
2526
2527 static int
2528 i915_cache_sharing_set(void *data, u64 val)
2529 {
2530 struct drm_device *dev = data;
2531 struct drm_i915_private *dev_priv = dev->dev_private;
2532 u32 snpcr;
2533
2534 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2535 return -ENODEV;
2536
2537 if (val > 3)
2538 return -EINVAL;
2539
2540 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
2541
2542 /* Update the cache sharing policy here as well */
2543 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2544 snpcr &= ~GEN6_MBC_SNPCR_MASK;
2545 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
2546 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
2547
2548 return 0;
2549 }
2550
2551 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
2552 i915_cache_sharing_get, i915_cache_sharing_set,
2553 "%llu\n");
2554
2555 static int i915_forcewake_open(struct inode *inode, struct file *file)
2556 {
2557 struct drm_device *dev = inode->i_private;
2558 struct drm_i915_private *dev_priv = dev->dev_private;
2559
2560 if (INTEL_INFO(dev)->gen < 6)
2561 return 0;
2562
2563 gen6_gt_force_wake_get(dev_priv);
2564
2565 return 0;
2566 }
2567
2568 static int i915_forcewake_release(struct inode *inode, struct file *file)
2569 {
2570 struct drm_device *dev = inode->i_private;
2571 struct drm_i915_private *dev_priv = dev->dev_private;
2572
2573 if (INTEL_INFO(dev)->gen < 6)
2574 return 0;
2575
2576 gen6_gt_force_wake_put(dev_priv);
2577
2578 return 0;
2579 }
2580
2581 static const struct file_operations i915_forcewake_fops = {
2582 .owner = THIS_MODULE,
2583 .open = i915_forcewake_open,
2584 .release = i915_forcewake_release,
2585 };
2586
2587 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
2588 {
2589 struct drm_device *dev = minor->dev;
2590 struct dentry *ent;
2591
2592 ent = debugfs_create_file("i915_forcewake_user",
2593 S_IRUSR,
2594 root, dev,
2595 &i915_forcewake_fops);
2596 if (IS_ERR(ent))
2597 return PTR_ERR(ent);
2598
2599 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
2600 }
2601
2602 static int i915_debugfs_create(struct dentry *root,
2603 struct drm_minor *minor,
2604 const char *name,
2605 const struct file_operations *fops)
2606 {
2607 struct drm_device *dev = minor->dev;
2608 struct dentry *ent;
2609
2610 ent = debugfs_create_file(name,
2611 S_IRUGO | S_IWUSR,
2612 root, dev,
2613 fops);
2614 if (IS_ERR(ent))
2615 return PTR_ERR(ent);
2616
2617 return drm_add_fake_info_node(minor, ent, fops);
2618 }
2619
2620 static struct drm_info_list i915_debugfs_list[] = {
2621 {"i915_capabilities", i915_capabilities, 0},
2622 {"i915_gem_objects", i915_gem_object_info, 0},
2623 {"i915_gem_gtt", i915_gem_gtt_info, 0},
2624 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
2625 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
2626 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
2627 {"i915_gem_stolen", i915_gem_stolen_list_info },
2628 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2629 {"i915_gem_request", i915_gem_request_info, 0},
2630 {"i915_gem_seqno", i915_gem_seqno_info, 0},
2631 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2632 {"i915_gem_interrupt", i915_interrupt_info, 0},
2633 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
2634 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
2635 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
2636 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
2637 {"i915_rstdby_delays", i915_rstdby_delays, 0},
2638 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
2639 {"i915_delayfreq_table", i915_delayfreq_table, 0},
2640 {"i915_inttoext_table", i915_inttoext_table, 0},
2641 {"i915_drpc_info", i915_drpc_info, 0},
2642 {"i915_emon_status", i915_emon_status, 0},
2643 {"i915_ring_freq_table", i915_ring_freq_table, 0},
2644 {"i915_gfxec", i915_gfxec, 0},
2645 {"i915_fbc_status", i915_fbc_status, 0},
2646 {"i915_ips_status", i915_ips_status, 0},
2647 {"i915_sr_status", i915_sr_status, 0},
2648 {"i915_opregion", i915_opregion, 0},
2649 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2650 {"i915_context_status", i915_context_status, 0},
2651 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
2652 {"i915_swizzle_info", i915_swizzle_info, 0},
2653 {"i915_ppgtt_info", i915_ppgtt_info, 0},
2654 {"i915_dpio", i915_dpio_info, 0},
2655 {"i915_llc", i915_llc, 0},
2656 {"i915_edp_psr_status", i915_edp_psr_status, 0},
2657 {"i915_energy_uJ", i915_energy_uJ, 0},
2658 {"i915_pc8_status", i915_pc8_status, 0},
2659 };
2660 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2661
2662 static struct i915_debugfs_files {
2663 const char *name;
2664 const struct file_operations *fops;
2665 } i915_debugfs_files[] = {
2666 {"i915_wedged", &i915_wedged_fops},
2667 {"i915_max_freq", &i915_max_freq_fops},
2668 {"i915_min_freq", &i915_min_freq_fops},
2669 {"i915_cache_sharing", &i915_cache_sharing_fops},
2670 {"i915_ring_stop", &i915_ring_stop_fops},
2671 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
2672 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
2673 {"i915_gem_drop_caches", &i915_drop_caches_fops},
2674 {"i915_error_state", &i915_error_state_fops},
2675 {"i915_next_seqno", &i915_next_seqno_fops},
2676 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
2677 };
2678
2679 void intel_display_crc_init(struct drm_device *dev)
2680 {
2681 struct drm_i915_private *dev_priv = dev->dev_private;
2682 int i;
2683
2684 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
2685 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
2686
2687 init_waitqueue_head(&pipe_crc->wq);
2688 }
2689 }
2690
2691 int i915_debugfs_init(struct drm_minor *minor)
2692 {
2693 int ret, i;
2694
2695 ret = i915_forcewake_create(minor->debugfs_root, minor);
2696 if (ret)
2697 return ret;
2698
2699 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
2700 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
2701 if (ret)
2702 return ret;
2703 }
2704
2705 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2706 ret = i915_debugfs_create(minor->debugfs_root, minor,
2707 i915_debugfs_files[i].name,
2708 i915_debugfs_files[i].fops);
2709 if (ret)
2710 return ret;
2711 }
2712
2713 return drm_debugfs_create_files(i915_debugfs_list,
2714 I915_DEBUGFS_ENTRIES,
2715 minor->debugfs_root, minor);
2716 }
2717
2718 void i915_debugfs_cleanup(struct drm_minor *minor)
2719 {
2720 struct drm_device *dev = minor->dev;
2721 int i;
2722
2723 drm_debugfs_remove_files(i915_debugfs_list,
2724 I915_DEBUGFS_ENTRIES, minor);
2725
2726 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
2727 1, minor);
2728
2729 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
2730 struct drm_info_list *info_list =
2731 (struct drm_info_list *)&i915_pipe_crc_data[i];
2732
2733 drm_debugfs_remove_files(info_list, 1, minor);
2734 }
2735
2736 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2737 struct drm_info_list *info_list =
2738 (struct drm_info_list *) i915_debugfs_files[i].fops;
2739
2740 drm_debugfs_remove_files(info_list, 1, minor);
2741 }
2742 }
2743
2744 #endif /* CONFIG_DEBUG_FS */
This page took 0.111138 seconds and 5 git commands to generate.