X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fi915%2Fi915_debugfs.c;h=644e80ba13e0d8ff1d1674f400abd787b57cc532;hb=19ab3f8bb4edbc576d4a2cc94a80d0e28296b649;hp=cf39ed3133d63aaa434120dc085bd6ea0811b2ee;hpb=70cf769c5ba283483a42c46f3734202b55dd3041;p=deliverable%2Flinux.git diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index cf39ed3133d6..644e80ba13e0 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -117,9 +117,8 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) u64 size = 0; struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) { - if (i915_is_ggtt(vma->vm) && - drm_mm_node_allocated(&vma->node)) + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (vma->is_ggtt && drm_mm_node_allocated(&vma->node)) size += vma->node.size; } @@ -130,10 +129,12 @@ static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct i915_vma *vma; int pin_count = 0; - int i; + enum intel_engine_id id; + + lockdep_assert_held(&obj->base.dev->struct_mutex); seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ", &obj->base, @@ -144,9 +145,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->base.size / 1024, obj->base.read_domains, obj->base.write_domain); - for_each_ring(ring, dev_priv, i) + for_each_engine_id(engine, dev_priv, id) seq_printf(m, "%x ", - i915_gem_request_get_seqno(obj->last_read_req[i])); + i915_gem_request_get_seqno(obj->last_read_req[id])); seq_printf(m, "] %x %x%s%s%s", i915_gem_request_get_seqno(obj->last_write_req), i915_gem_request_get_seqno(obj->last_fenced_req), @@ -155,7 +156,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) seq_printf(m, " (name: %d)", obj->base.name); - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (vma->pin_count > 0) pin_count++; } @@ -164,14 +165,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (display)"); if (obj->fence_reg != I915_FENCE_REG_NONE) seq_printf(m, " (fence: %d)", obj->fence_reg); - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", - i915_is_ggtt(vma->vm) ? "g" : "pp", + vma->is_ggtt ? "g" : "pp", vma->node.start, vma->node.size); - if (i915_is_ggtt(vma->vm)) - seq_printf(m, ", type: %u)", vma->ggtt_view.type); - else - seq_puts(m, ")"); + if (vma->is_ggtt) + seq_printf(m, ", type: %u", vma->ggtt_view.type); + seq_puts(m, ")"); } if (obj->stolen) seq_printf(m, " (stolen: %08llx)", obj->stolen->start); @@ -186,7 +186,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) } if (obj->last_write_req != NULL) seq_printf(m, " (%s)", - i915_gem_request_get_ring(obj->last_write_req)->name); + i915_gem_request_get_engine(obj->last_write_req)->name); if (obj->frontbuffer_bits) seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); } @@ -204,8 +204,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) uintptr_t list = (uintptr_t) node->info_ent->data; struct list_head *head; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_address_space *vm = &dev_priv->gtt.base; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma; u64 total_obj_size, total_gtt_size; int count, ret; @@ -218,11 +218,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) switch (list) { case ACTIVE_LIST: seq_puts(m, "Active:\n"); - head = &vm->active_list; + head = &ggtt->base.active_list; break; case INACTIVE_LIST: seq_puts(m, "Inactive:\n"); - head = &vm->inactive_list; + head = &ggtt->base.inactive_list; break; default: mutex_unlock(&dev->struct_mutex); @@ -230,7 +230,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) } total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(vma, head, mm_list) { + list_for_each_entry(vma, head, vm_link) { seq_printf(m, " "); describe_obj(m, vma->obj); seq_printf(m, "\n"); @@ -342,13 +342,13 @@ static int per_file_stats(int id, void *ptr, void *data) stats->shared += obj->base.size; if (USES_FULL_PPGTT(obj->base.dev)) { - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { struct i915_hw_ppgtt *ppgtt; if (!drm_mm_node_allocated(&vma->node)) continue; - if (i915_is_ggtt(vma->vm)) { + if (vma->is_ggtt) { stats->global += obj->base.size; continue; } @@ -399,15 +399,15 @@ static void print_batch_pool_stats(struct seq_file *m, { struct drm_i915_gem_object *obj; struct file_stats stats; - struct intel_engine_cs *ring; - int i, j; + struct intel_engine_cs *engine; + int j; memset(&stats, 0, sizeof(stats)); - for_each_ring(ring, dev_priv, i) { - for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) { + for_each_engine(engine, dev_priv) { + for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { list_for_each_entry(obj, - &ring->batch_pool.cache_list[j], + &engine->batch_pool.cache_list[j], batch_pool_link) per_file_stats(0, obj, &stats); } @@ -431,11 +431,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; u32 count, mappable_count, purgeable_count; u64 size, mappable_size, purgeable_size; struct drm_i915_gem_object *obj; - struct i915_address_space *vm = &dev_priv->gtt.base; struct drm_file *file; struct i915_vma *vma; int ret; @@ -454,12 +454,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data) count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_vmas(&vm->active_list, mm_list); + count_vmas(&ggtt->base.active_list, vm_link); seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_vmas(&vm->inactive_list, mm_list); + count_vmas(&ggtt->base.inactive_list, vm_link); seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", count, mappable_count, size, mappable_size); @@ -494,8 +494,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) count, size); seq_printf(m, "%llu [%llu] gtt total\n", - dev_priv->gtt.base.total, - (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); + ggtt->base.total, ggtt->mappable_end - ggtt->base.start); seq_putc(m, '\n'); print_batch_pool_stats(m, dev_priv); @@ -593,14 +592,13 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) pipe, plane); } if (work->flip_queued_req) { - struct intel_engine_cs *ring = - i915_gem_request_get_ring(work->flip_queued_req); + struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req); seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", - ring->name, + engine->name, i915_gem_request_get_seqno(work->flip_queued_req), dev_priv->next_seqno, - ring->get_seqno(ring, true), + engine->get_seqno(engine), i915_gem_request_completed(work->flip_queued_req, true)); } else seq_printf(m, "Flip not associated with any ring\n"); @@ -639,28 +637,28 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; int total = 0; - int ret, i, j; + int ret, j; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - for_each_ring(ring, dev_priv, i) { - for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) { + for_each_engine(engine, dev_priv) { + for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { int count; count = 0; list_for_each_entry(obj, - &ring->batch_pool.cache_list[j], + &engine->batch_pool.cache_list[j], batch_pool_link) count++; seq_printf(m, "%s cache[%d]: %d objects\n", - ring->name, j, count); + engine->name, j, count); list_for_each_entry(obj, - &ring->batch_pool.cache_list[j], + &engine->batch_pool.cache_list[j], batch_pool_link) { seq_puts(m, " "); describe_obj(m, obj); @@ -683,26 +681,26 @@ static int i915_gem_request_info(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct drm_i915_gem_request *req; - int ret, any, i; + int ret, any; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; any = 0; - for_each_ring(ring, dev_priv, i) { + for_each_engine(engine, dev_priv) { int count; count = 0; - list_for_each_entry(req, &ring->request_list, list) + list_for_each_entry(req, &engine->request_list, list) count++; if (count == 0) continue; - seq_printf(m, "%s requests: %d\n", ring->name, count); - list_for_each_entry(req, &ring->request_list, list) { + seq_printf(m, "%s requests: %d\n", engine->name, count); + list_for_each_entry(req, &engine->request_list, list) { struct task_struct *task; rcu_read_lock(); @@ -728,12 +726,12 @@ static int i915_gem_request_info(struct seq_file *m, void *data) } static void i915_ring_seqno_info(struct seq_file *m, - struct intel_engine_cs *ring) + struct intel_engine_cs *engine) { - if (ring->get_seqno) { - seq_printf(m, "Current sequence (%s): %x\n", - ring->name, ring->get_seqno(ring, false)); - } + seq_printf(m, "Current sequence (%s): %x\n", + engine->name, engine->get_seqno(engine)); + seq_printf(m, "Current user interrupts (%s): %x\n", + engine->name, READ_ONCE(engine->user_interrupts)); } static int i915_gem_seqno_info(struct seq_file *m, void *data) @@ -741,16 +739,16 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - int ret, i; + struct intel_engine_cs *engine; + int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; intel_runtime_pm_get(dev_priv); - for_each_ring(ring, dev_priv, i) - i915_ring_seqno_info(m, ring); + for_each_engine(engine, dev_priv) + i915_ring_seqno_info(m, engine); intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); @@ -764,7 +762,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; int ret, i, pipe; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -936,13 +934,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "Graphics Interrupt mask: %08x\n", I915_READ(GTIMR)); } - for_each_ring(ring, dev_priv, i) { + for_each_engine(engine, dev_priv) { if (INTEL_INFO(dev)->gen >= 6) { seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", - ring->name, I915_READ_IMR(ring)); + engine->name, I915_READ_IMR(engine)); } - i915_ring_seqno_info(m, ring); + i915_ring_seqno_info(m, engine); } intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); @@ -983,12 +981,12 @@ static int i915_hws_info(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; const u32 *hws; int i; - ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; - hws = ring->status_page.page_addr; + engine = &dev_priv->engine[(uintptr_t)node->info_ent->data]; + hws = engine->status_page.page_addr; if (hws == NULL) return 0; @@ -1333,10 +1331,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - u64 acthd[I915_NUM_RINGS]; - u32 seqno[I915_NUM_RINGS]; - int i; + struct intel_engine_cs *engine; + u64 acthd[I915_NUM_ENGINES]; + u32 seqno[I915_NUM_ENGINES]; + u32 instdone[I915_NUM_INSTDONE_REG]; + enum intel_engine_id id; + int j; if (!i915.enable_hangcheck) { seq_printf(m, "Hangcheck disabled\n"); @@ -1345,11 +1345,13 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); - for_each_ring(ring, dev_priv, i) { - seqno[i] = ring->get_seqno(ring, false); - acthd[i] = intel_ring_get_active_head(ring); + for_each_engine_id(engine, dev_priv, id) { + acthd[id] = intel_ring_get_active_head(engine); + seqno[id] = engine->get_seqno(engine); } + i915_get_extra_instdone(dev, instdone); + intel_runtime_pm_put(dev_priv); if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) { @@ -1359,17 +1361,35 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) } else seq_printf(m, "Hangcheck inactive\n"); - for_each_ring(ring, dev_priv, i) { - seq_printf(m, "%s:\n", ring->name); - seq_printf(m, "\tseqno = %x [current %x]\n", - ring->hangcheck.seqno, seqno[i]); + for_each_engine_id(engine, dev_priv, id) { + seq_printf(m, "%s:\n", engine->name); + seq_printf(m, "\tseqno = %x [current %x, last %x]\n", + engine->hangcheck.seqno, + seqno[id], + engine->last_submitted_seqno); + seq_printf(m, "\tuser interrupts = %x [current %x]\n", + engine->hangcheck.user_interrupts, + READ_ONCE(engine->user_interrupts)); seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", - (long long)ring->hangcheck.acthd, - (long long)acthd[i]); - seq_printf(m, "\tmax ACTHD = 0x%08llx\n", - (long long)ring->hangcheck.max_acthd); - seq_printf(m, "\tscore = %d\n", ring->hangcheck.score); - seq_printf(m, "\taction = %d\n", ring->hangcheck.action); + (long long)engine->hangcheck.acthd, + (long long)acthd[id]); + seq_printf(m, "\tscore = %d\n", engine->hangcheck.score); + seq_printf(m, "\taction = %d\n", engine->hangcheck.action); + + if (engine->id == RCS) { + seq_puts(m, "\tinstdone read ="); + + for (j = 0; j < I915_NUM_INSTDONE_REG; j++) + seq_printf(m, " 0x%08x", instdone[j]); + + seq_puts(m, "\n\tinstdone accu ="); + + for (j = 0; j < I915_NUM_INSTDONE_REG; j++) + seq_printf(m, " 0x%08x", + engine->hangcheck.instdone[j]); + + seq_puts(m, "\n"); + } } return 0; @@ -1881,6 +1901,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; struct intel_framebuffer *fbdev_fb = NULL; struct drm_framebuffer *drm_fb; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; #ifdef CONFIG_DRM_FBDEV_EMULATION if (to_i915(dev)->fbdev) { @@ -1915,6 +1940,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) seq_putc(m, '\n'); } mutex_unlock(&dev->mode_config.fb_lock); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -1932,9 +1958,10 @@ static int i915_context_status(struct seq_file *m, void *unused) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct intel_context *ctx; - int ret, i; + enum intel_engine_id id; + int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) @@ -1947,21 +1974,18 @@ static int i915_context_status(struct seq_file *m, void *unused) seq_puts(m, "HW context "); describe_ctx(m, ctx); - for_each_ring(ring, dev_priv, i) { - if (ring->default_context == ctx) - seq_printf(m, "(default context %s) ", - ring->name); - } + if (ctx == dev_priv->kernel_context) + seq_printf(m, "(kernel context) "); if (i915.enable_execlists) { seq_putc(m, '\n'); - for_each_ring(ring, dev_priv, i) { + for_each_engine_id(engine, dev_priv, id) { struct drm_i915_gem_object *ctx_obj = - ctx->engine[i].state; + ctx->engine[id].state; struct intel_ringbuffer *ringbuf = - ctx->engine[i].ringbuf; + ctx->engine[id].ringbuf; - seq_printf(m, "%s: ", ring->name); + seq_printf(m, "%s: ", engine->name); if (ctx_obj) describe_obj(m, ctx_obj); if (ringbuf) @@ -1981,22 +2005,23 @@ static int i915_context_status(struct seq_file *m, void *unused) } static void i915_dump_lrc_obj(struct seq_file *m, - struct intel_engine_cs *ring, - struct drm_i915_gem_object *ctx_obj) + struct intel_context *ctx, + struct intel_engine_cs *engine) { struct page *page; uint32_t *reg_state; int j; + struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; unsigned long ggtt_offset = 0; if (ctx_obj == NULL) { seq_printf(m, "Context on %s with no gem object\n", - ring->name); + engine->name); return; } - seq_printf(m, "CONTEXT: %s %u\n", ring->name, - intel_execlists_ctx_id(ctx_obj)); + seq_printf(m, "CONTEXT: %s %u\n", engine->name, + intel_execlists_ctx_id(ctx, engine)); if (!i915_gem_obj_ggtt_bound(ctx_obj)) seq_puts(m, "\tNot bound in GGTT\n"); @@ -2029,9 +2054,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct intel_context *ctx; - int ret, i; + int ret; if (!i915.enable_execlists) { seq_printf(m, "Logical Ring Contexts are disabled\n"); @@ -2042,13 +2067,10 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) if (ret) return ret; - list_for_each_entry(ctx, &dev_priv->context_list, link) { - for_each_ring(ring, dev_priv, i) { - if (ring->default_context != ctx) - i915_dump_lrc_obj(m, ring, - ctx->engine[i].state); - } - } + list_for_each_entry(ctx, &dev_priv->context_list, link) + if (ctx != dev_priv->kernel_context) + for_each_engine(engine, dev_priv) + i915_dump_lrc_obj(m, ctx, engine); mutex_unlock(&dev->struct_mutex); @@ -2060,15 +2082,14 @@ static int i915_execlists(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; u32 status_pointer; u8 read_pointer; u8 write_pointer; u32 status; u32 ctx_id; struct list_head *cursor; - int ring_id, i; - int ret; + int i, ret; if (!i915.enable_execlists) { seq_puts(m, "Logical Ring Contexts are disabled\n"); @@ -2081,50 +2102,47 @@ static int i915_execlists(struct seq_file *m, void *data) intel_runtime_pm_get(dev_priv); - for_each_ring(ring, dev_priv, ring_id) { + for_each_engine(engine, dev_priv) { struct drm_i915_gem_request *head_req = NULL; int count = 0; - unsigned long flags; - seq_printf(m, "%s\n", ring->name); + seq_printf(m, "%s\n", engine->name); - status = I915_READ(RING_EXECLIST_STATUS_LO(ring)); - ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring)); + status = I915_READ(RING_EXECLIST_STATUS_LO(engine)); + ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine)); seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n", status, ctx_id); - status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); + status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer); - read_pointer = ring->next_context_status_buffer; - write_pointer = status_pointer & 0x07; + read_pointer = engine->next_context_status_buffer; + write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); if (read_pointer > write_pointer) - write_pointer += 6; + write_pointer += GEN8_CSB_ENTRIES; seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n", read_pointer, write_pointer); - for (i = 0; i < 6; i++) { - status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i)); - ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i)); + for (i = 0; i < GEN8_CSB_ENTRIES; i++) { + status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i)); + ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i)); seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n", i, status, ctx_id); } - spin_lock_irqsave(&ring->execlist_lock, flags); - list_for_each(cursor, &ring->execlist_queue) + spin_lock_bh(&engine->execlist_lock); + list_for_each(cursor, &engine->execlist_queue) count++; - head_req = list_first_entry_or_null(&ring->execlist_queue, - struct drm_i915_gem_request, execlist_link); - spin_unlock_irqrestore(&ring->execlist_lock, flags); + head_req = list_first_entry_or_null(&engine->execlist_queue, + struct drm_i915_gem_request, + execlist_link); + spin_unlock_bh(&engine->execlist_lock); seq_printf(m, "\t%d requests in queue\n", count); if (head_req) { - struct drm_i915_gem_object *ctx_obj; - - ctx_obj = head_req->ctx->engine[ring_id].state; seq_printf(m, "\tHead request id: %u\n", - intel_execlists_ctx_id(ctx_obj)); + intel_execlists_ctx_id(head_req->ctx, engine)); seq_printf(m, "\tHead request tail: %u\n", head_req->tail); } @@ -2240,19 +2258,19 @@ static int per_file_ctx(int id, void *ptr, void *data) static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - int unused, i; + int i; if (!ppgtt) return; - for_each_ring(ring, dev_priv, unused) { - seq_printf(m, "%s\n", ring->name); + for_each_engine(engine, dev_priv) { + seq_printf(m, "%s\n", engine->name); for (i = 0; i < 4; i++) { - u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i)); + u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i)); pdp <<= 32; - pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i)); + pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i)); seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); } } @@ -2261,19 +2279,22 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - int i; + struct intel_engine_cs *engine; if (INTEL_INFO(dev)->gen == 6) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); - for_each_ring(ring, dev_priv, i) { - seq_printf(m, "%s\n", ring->name); + for_each_engine(engine, dev_priv) { + seq_printf(m, "%s\n", engine->name); if (INTEL_INFO(dev)->gen == 7) - seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); - seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); - seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); - seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); + seq_printf(m, "GFX_MODE: 0x%08x\n", + I915_READ(RING_MODE_GEN7(engine))); + seq_printf(m, "PP_DIR_BASE: 0x%08x\n", + I915_READ(RING_PP_DIR_BASE(engine))); + seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", + I915_READ(RING_PP_DIR_BASE_READ(engine))); + seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", + I915_READ(RING_PP_DIR_DCLV(engine))); } if (dev_priv->mm.aliasing_ppgtt) { struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; @@ -2328,12 +2349,11 @@ out_put: static int count_irq_waiters(struct drm_i915_private *i915) { - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; int count = 0; - int i; - for_each_ring(ring, i915, i) - count += ring->irq_refcount; + for_each_engine(engine, i915) + count += engine->irq_refcount; return count; } @@ -2400,7 +2420,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; u32 tmp, i; - if (!HAS_GUC_UCODE(dev_priv->dev)) + if (!HAS_GUC_UCODE(dev_priv)) return 0; seq_printf(m, "GuC firmware status:\n"); @@ -2441,9 +2461,8 @@ static void i915_guc_client_info(struct seq_file *m, struct drm_i915_private *dev_priv, struct i915_guc_client *client) { - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; uint64_t tot = 0; - uint32_t i; seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", client->priority, client->ctx_index, client->proc_desc_offset); @@ -2456,11 +2475,11 @@ static void i915_guc_client_info(struct seq_file *m, seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); seq_printf(m, "\tLast submission result: %d\n", client->retcode); - for_each_ring(ring, dev_priv, i) { + for_each_engine(engine, dev_priv) { seq_printf(m, "\tSubmissions: %llu %s\n", - client->submissions[i], - ring->name); - tot += client->submissions[i]; + client->submissions[engine->guc_id], + engine->name); + tot += client->submissions[engine->guc_id]; } seq_printf(m, "\tTotal: %llu\n", tot); } @@ -2472,11 +2491,10 @@ static int i915_guc_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc guc; struct i915_guc_client client = {}; - struct intel_engine_cs *ring; - enum intel_ring_id i; + struct intel_engine_cs *engine; u64 total = 0; - if (!HAS_GUC_SCHED(dev_priv->dev)) + if (!HAS_GUC_SCHED(dev_priv)) return 0; if (mutex_lock_interruptible(&dev->struct_mutex)) @@ -2496,11 +2514,11 @@ static int i915_guc_info(struct seq_file *m, void *data) seq_printf(m, "GuC last action error code: %d\n", guc.action_err); seq_printf(m, "\nGuC submissions:\n"); - for_each_ring(ring, dev_priv, i) { - seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n", - ring->name, guc.submissions[i], - guc.last_seqno[i], guc.last_seqno[i]); - total += guc.submissions[i]; + for_each_engine(engine, dev_priv) { + seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", + engine->name, guc.submissions[engine->guc_id], + guc.last_seqno[engine->guc_id]); + total += guc.submissions[engine->guc_id]; } seq_printf(m, "\t%s: %llu\n", "Total", total); @@ -2578,6 +2596,10 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) enabled = true; } } + + seq_printf(m, "Main link in standby mode: %s\n", + yesno(dev_priv->psr.link_standby)); + seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); if (!HAS_DDI(dev)) @@ -2676,10 +2698,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - if (!HAS_RUNTIME_PM(dev)) { - seq_puts(m, "not supported\n"); - return 0; - } + if (!HAS_RUNTIME_PM(dev_priv)) + seq_puts(m, "Runtime power management not supported\n"); seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); seq_printf(m, "IRQs disabled: %s\n", @@ -2690,6 +2710,9 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) #else seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); #endif + seq_printf(m, "PCI device power state: %s [%d]\n", + pci_power_name(dev_priv->dev->pdev->current_state), + dev_priv->dev->pdev->current_state); return 0; } @@ -3118,9 +3141,10 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); - int i, j, ret; + enum intel_engine_id id; + int j, ret; if (!i915_semaphore_is_enabled(dev)) { seq_puts(m, "Semaphores are disabled\n"); @@ -3139,14 +3163,14 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); seqno = (uint64_t *)kmap_atomic(page); - for_each_ring(ring, dev_priv, i) { + for_each_engine_id(engine, dev_priv, id) { uint64_t offset; - seq_printf(m, "%s\n", ring->name); + seq_printf(m, "%s\n", engine->name); seq_puts(m, " Last signal:"); for (j = 0; j < num_rings; j++) { - offset = i * I915_NUM_RINGS + j; + offset = id * I915_NUM_ENGINES + j; seq_printf(m, "0x%08llx (0x%02llx) ", seqno[offset], offset * 8); } @@ -3154,7 +3178,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) seq_puts(m, " Last wait: "); for (j = 0; j < num_rings; j++) { - offset = i + (j * I915_NUM_RINGS); + offset = id + (j * I915_NUM_ENGINES); seq_printf(m, "0x%08llx (0x%02llx) ", seqno[offset], offset * 8); } @@ -3164,18 +3188,18 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) kunmap_atomic(seqno); } else { seq_puts(m, " Last signal:"); - for_each_ring(ring, dev_priv, i) + for_each_engine(engine, dev_priv) for (j = 0; j < num_rings; j++) seq_printf(m, "0x%08x\n", - I915_READ(ring->semaphore.mbox.signal[j])); + I915_READ(engine->semaphore.mbox.signal[j])); seq_putc(m, '\n'); } seq_puts(m, "\nSync seqno:\n"); - for_each_ring(ring, dev_priv, i) { - for (j = 0; j < num_rings; j++) { - seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]); - } + for_each_engine(engine, dev_priv) { + for (j = 0; j < num_rings; j++) + seq_printf(m, " 0x%08x ", + engine->semaphore.sync_seqno[j]); seq_putc(m, '\n'); } seq_putc(m, '\n'); @@ -3197,8 +3221,8 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); - seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n", - pll->config.crtc_mask, pll->active, yesno(pll->on)); + seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", + pll->config.crtc_mask, pll->active_mask, yesno(pll->on)); seq_printf(m, " tracked hardware state:\n"); seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll); seq_printf(m, " dpll_md: 0x%08x\n", @@ -3216,9 +3240,12 @@ static int i915_wa_registers(struct seq_file *m, void *unused) { int i; int ret; + struct intel_engine_cs *engine; struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_workarounds *workarounds = &dev_priv->workarounds; + enum intel_engine_id id; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) @@ -3226,15 +3253,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); - seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); - for (i = 0; i < dev_priv->workarounds.count; ++i) { + seq_printf(m, "Workarounds applied: %d\n", workarounds->count); + for_each_engine_id(engine, dev_priv, id) + seq_printf(m, "HW whitelist count for %s: %d\n", + engine->name, workarounds->hw_whitelist_count[id]); + for (i = 0; i < workarounds->count; ++i) { i915_reg_t addr; u32 mask, value, read; bool ok; - addr = dev_priv->workarounds.reg[i].addr; - mask = dev_priv->workarounds.reg[i].mask; - value = dev_priv->workarounds.reg[i].value; + addr = workarounds->reg[i].addr; + mask = workarounds->reg[i].mask; + value = workarounds->reg[i].value; read = I915_READ(addr); ok = (value & mask) == (read & mask); seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", @@ -3416,7 +3446,8 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) intel_dig_port = enc_to_dig_port(encoder); if (!intel_dig_port->dp.can_mst) continue; - + seq_printf(m, "MST Source Port %c\n", + port_name(intel_dig_port->port)); drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); } drm_modeset_unlock_all(dev);