drm/i915/skl: Make sure to allocate mininum sizes in the DDB
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
index 0d11cbe9f80c5d1b0496be1c80f811f1a3df6a88..ac3f07e5511985820b11e11beef98dbb74d59e01 100644 (file)
@@ -123,7 +123,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
        struct i915_vma *vma;
        int pin_count = 0;
 
-       seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
+       seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %x %x %x%s%s%s",
                   &obj->base,
                   get_pin_flag(obj),
                   get_tiling_flag(obj),
@@ -569,7 +569,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                                struct intel_engine_cs *ring =
                                        i915_gem_request_get_ring(work->flip_queued_req);
 
-                               seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n",
+                               seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
                                           ring->name,
                                           i915_gem_request_get_seqno(work->flip_queued_req),
                                           dev_priv->next_seqno,
@@ -580,7 +580,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                        seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
                                   work->flip_queued_vblank,
                                   work->flip_ready_vblank,
-                                  drm_vblank_count(dev, crtc->pipe));
+                                  drm_crtc_vblank_count(&crtc->base));
                        if (work->enable_stall_check)
                                seq_puts(m, "Stall check enabled, ");
                        else
@@ -658,7 +658,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
                list_for_each_entry(gem_request,
                                    &ring->request_list,
                                    list) {
-                       seq_printf(m, "    %d @ %d\n",
+                       seq_printf(m, "    %x @ %d\n",
                                   gem_request->seqno,
                                   (int) (jiffies - gem_request->emitted_jiffies));
                }
@@ -676,7 +676,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
                                 struct intel_engine_cs *ring)
 {
        if (ring->get_seqno) {
-               seq_printf(m, "Current sequence (%s): %u\n",
+               seq_printf(m, "Current sequence (%s): %x\n",
                           ring->name, ring->get_seqno(ring, false));
        }
 }
@@ -1105,7 +1105,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                if (ret)
                        goto out;
 
-               gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+               intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
                reqf = I915_READ(GEN6_RPNSWREQ);
                reqf &= ~GEN6_TURBO_DISABLE;
@@ -1113,7 +1113,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                        reqf >>= 24;
                else
                        reqf >>= 25;
-               reqf *= GT_FREQUENCY_MULTIPLIER;
+               reqf = intel_gpu_freq(dev_priv, reqf);
 
                rpmodectl = I915_READ(GEN6_RP_CONTROL);
                rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
@@ -1130,9 +1130,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                        cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
                else
                        cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
-               cagf *= GT_FREQUENCY_MULTIPLIER;
+               cagf = intel_gpu_freq(dev_priv, cagf);
 
-               gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+               intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
                mutex_unlock(&dev->struct_mutex);
 
                if (IS_GEN6(dev) || IS_GEN7(dev)) {
@@ -1178,18 +1178,18 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 
                max_freq = (rp_state_cap & 0xff0000) >> 16;
                seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
-                          max_freq * GT_FREQUENCY_MULTIPLIER);
+                          intel_gpu_freq(dev_priv, max_freq));
 
                max_freq = (rp_state_cap & 0xff00) >> 8;
                seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
-                          max_freq * GT_FREQUENCY_MULTIPLIER);
+                          intel_gpu_freq(dev_priv, max_freq));
 
                max_freq = rp_state_cap & 0xff;
                seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
-                          max_freq * GT_FREQUENCY_MULTIPLIER);
+                          intel_gpu_freq(dev_priv, max_freq));
 
                seq_printf(m, "Max overclocked frequency: %dMHz\n",
-                          dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
+                          intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
        } else if (IS_VALLEYVIEW(dev)) {
                u32 freq_sts;
 
@@ -1199,16 +1199,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
 
                seq_printf(m, "max GPU freq: %d MHz\n",
-                          vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+                          intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
 
                seq_printf(m, "min GPU freq: %d MHz\n",
-                          vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+                          intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
 
-               seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
-                          vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+               seq_printf(m,
+                          "efficient (RPe) frequency: %d MHz\n",
+                          intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
 
                seq_printf(m, "current GPU freq: %d MHz\n",
-                          vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
+                          intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
                mutex_unlock(&dev_priv->rps.hw_lock);
        } else {
                seq_puts(m, "no P-state info available\n");
@@ -1219,6 +1220,41 @@ out:
        return ret;
 }
 
+static int i915_hangcheck_info(struct seq_file *m, void *unused)
+{
+       struct drm_info_node *node = m->private;
+       struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
+       struct intel_engine_cs *ring;
+       int i;
+
+       if (!i915.enable_hangcheck) {
+               seq_printf(m, "Hangcheck disabled\n");
+               return 0;
+       }
+
+       if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
+               seq_printf(m, "Hangcheck active, fires in %dms\n",
+                          jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
+                                           jiffies));
+       } else
+               seq_printf(m, "Hangcheck inactive\n");
+
+       for_each_ring(ring, dev_priv, i) {
+               seq_printf(m, "%s:\n", ring->name);
+               seq_printf(m, "\tseqno = %x [current %x]\n",
+                          ring->hangcheck.seqno, ring->get_seqno(ring, false));
+               seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
+               seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
+               seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
+                          (long long)ring->hangcheck.acthd,
+                          (long long)intel_ring_get_active_head(ring));
+               seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
+                          (long long)ring->hangcheck.max_acthd);
+       }
+
+       return 0;
+}
+
 static int ironlake_drpc_info(struct seq_file *m)
 {
        struct drm_info_node *node = m->private;
@@ -1288,14 +1324,31 @@ static int ironlake_drpc_info(struct seq_file *m)
        return 0;
 }
 
-static int vlv_drpc_info(struct seq_file *m)
+static int i915_forcewake_domains(struct seq_file *m, void *data)
 {
+       struct drm_info_node *node = m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_uncore_forcewake_domain *fw_domain;
+       int i;
 
+       spin_lock_irq(&dev_priv->uncore.lock);
+       for_each_fw_domain(fw_domain, dev_priv, i) {
+               seq_printf(m, "%s.wake_count = %u\n",
+                          intel_uncore_forcewake_domain_to_str(i),
+                          fw_domain->wake_count);
+       }
+       spin_unlock_irq(&dev_priv->uncore.lock);
+
+       return 0;
+}
+
+static int vlv_drpc_info(struct seq_file *m)
+{
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 rpmodectl1, rcctl1, pw_status;
-       unsigned fw_rendercount = 0, fw_mediacount = 0;
 
        intel_runtime_pm_get(dev_priv);
 
@@ -1327,22 +1380,11 @@ static int vlv_drpc_info(struct seq_file *m)
        seq_printf(m, "Media RC6 residency since boot: %u\n",
                   I915_READ(VLV_GT_MEDIA_RC6));
 
-       spin_lock_irq(&dev_priv->uncore.lock);
-       fw_rendercount = dev_priv->uncore.fw_rendercount;
-       fw_mediacount = dev_priv->uncore.fw_mediacount;
-       spin_unlock_irq(&dev_priv->uncore.lock);
-
-       seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
-       seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
-
-
-       return 0;
+       return i915_forcewake_domains(m, NULL);
 }
 
-
 static int gen6_drpc_info(struct seq_file *m)
 {
-
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1356,7 +1398,7 @@ static int gen6_drpc_info(struct seq_file *m)
        intel_runtime_pm_get(dev_priv);
 
        spin_lock_irq(&dev_priv->uncore.lock);
-       forcewake_count = dev_priv->uncore.forcewake_count;
+       forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
        spin_unlock_irq(&dev_priv->uncore.lock);
 
        if (forcewake_count) {
@@ -1671,7 +1713,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
                                       GEN6_PCODE_READ_MIN_FREQ_TABLE,
                                       &ia_freq);
                seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
-                          gpu_freq * GT_FREQUENCY_MULTIPLIER,
+                          intel_gpu_freq(dev_priv, gpu_freq),
                           ((ia_freq >> 0) & 0xff) * 100,
                           ((ia_freq >> 8) & 0xff) * 100);
        }
@@ -1724,11 +1766,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
        ifbdev = dev_priv->fbdev;
        fb = to_intel_framebuffer(ifbdev->helper.fb);
 
-       seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
+       seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
                   fb->base.width,
                   fb->base.height,
                   fb->base.depth,
                   fb->base.bits_per_pixel,
+                  fb->base.modifier[0],
                   atomic_read(&fb->base.refcount.refcount));
        describe_obj(m, fb->obj);
        seq_putc(m, '\n');
@@ -1739,11 +1782,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                if (ifbdev && &fb->base == ifbdev->helper.fb)
                        continue;
 
-               seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
+               seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
                           fb->base.width,
                           fb->base.height,
                           fb->base.depth,
                           fb->base.bits_per_pixel,
+                          fb->base.modifier[0],
                           atomic_read(&fb->base.refcount.refcount));
                describe_obj(m, fb->obj);
                seq_putc(m, '\n');
@@ -1928,7 +1972,7 @@ static int i915_execlists(struct seq_file *m, void *data)
        intel_runtime_pm_get(dev_priv);
 
        for_each_ring(ring, dev_priv, ring_id) {
-               struct intel_ctx_submit_request *head_req = NULL;
+               struct drm_i915_gem_request *head_req = NULL;
                int count = 0;
                unsigned long flags;
 
@@ -1961,7 +2005,7 @@ static int i915_execlists(struct seq_file *m, void *data)
                list_for_each(cursor, &ring->execlist_queue)
                        count++;
                head_req = list_first_entry_or_null(&ring->execlist_queue,
-                               struct intel_ctx_submit_request, execlist_link);
+                               struct drm_i915_gem_request, execlist_link);
                spin_unlock_irqrestore(&ring->execlist_lock, flags);
 
                seq_printf(m, "\t%d requests in queue\n", count);
@@ -1984,30 +2028,6 @@ static int i915_execlists(struct seq_file *m, void *data)
        return 0;
 }
 
-static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
-{
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
-
-       spin_lock_irq(&dev_priv->uncore.lock);
-       if (IS_VALLEYVIEW(dev)) {
-               fw_rendercount = dev_priv->uncore.fw_rendercount;
-               fw_mediacount = dev_priv->uncore.fw_mediacount;
-       } else
-               forcewake_count = dev_priv->uncore.forcewake_count;
-       spin_unlock_irq(&dev_priv->uncore.lock);
-
-       if (IS_VALLEYVIEW(dev)) {
-               seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
-               seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
-       } else
-               seq_printf(m, "forcewake count = %u\n", forcewake_count);
-
-       return 0;
-}
-
 static const char *swizzle_string(unsigned swizzle)
 {
        switch (swizzle) {
@@ -2400,6 +2420,14 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
                return "AUDIO";
        case POWER_DOMAIN_PLLS:
                return "PLLS";
+       case POWER_DOMAIN_AUX_A:
+               return "AUX_A";
+       case POWER_DOMAIN_AUX_B:
+               return "AUX_B";
+       case POWER_DOMAIN_AUX_C:
+               return "AUX_C";
+       case POWER_DOMAIN_AUX_D:
+               return "AUX_D";
        case POWER_DOMAIN_INIT:
                return "INIT";
        default:
@@ -2628,7 +2656,8 @@ static int i915_display_info(struct seq_file *m, void *unused)
 
                seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
                           crtc->base.base.id, pipe_name(crtc->pipe),
-                          yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
+                          yesno(crtc->active), crtc->config->pipe_src_w,
+                          crtc->config->pipe_src_h);
                if (crtc->active) {
                        intel_crtc_info(m, crtc);
 
@@ -3362,9 +3391,9 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
         * relevant on hsw with pipe A when using the always-on power well
         * routing.
         */
-       if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
-           !crtc->config.pch_pfit.enabled) {
-               crtc->config.pch_pfit.force_thru = true;
+       if (crtc->config->cpu_transcoder == TRANSCODER_EDP &&
+           !crtc->config->pch_pfit.enabled) {
+               crtc->config->pch_pfit.force_thru = true;
 
                intel_display_power_get(dev_priv,
                                        POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
@@ -3388,8 +3417,8 @@ static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
         * relevant on hsw with pipe A when using the always-on power well
         * routing.
         */
-       if (crtc->config.pch_pfit.force_thru) {
-               crtc->config.pch_pfit.force_thru = false;
+       if (crtc->config->pch_pfit.force_thru) {
+               crtc->config->pch_pfit.force_thru = false;
 
                dev_priv->display.crtc_disable(&crtc->base);
                dev_priv->display.crtc_enable(&crtc->base);
@@ -3942,6 +3971,17 @@ i915_wedged_set(void *data, u64 val)
        struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       /*
+        * There is no safeguard against this debugfs entry colliding
+        * with the hangcheck calling same i915_handle_error() in
+        * parallel, causing an explosion. For now we assume that the
+        * test harness is responsible enough not to inject gpu hangs
+        * while it is writing to 'i915_wedged'
+        */
+
+       if (i915_reset_in_progress(&dev_priv->gpu_error))
+               return -EAGAIN;
+
        intel_runtime_pm_get(dev_priv);
 
        i915_handle_error(dev, val,
@@ -4128,10 +4168,7 @@ i915_max_freq_get(void *data, u64 *val)
        if (ret)
                return ret;
 
-       if (IS_VALLEYVIEW(dev))
-               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
-       else
-               *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+       *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return 0;
@@ -4160,12 +4197,12 @@ i915_max_freq_set(void *data, u64 val)
         * Turbo will still be enabled, but won't go above the set value.
         */
        if (IS_VALLEYVIEW(dev)) {
-               val = vlv_freq_opcode(dev_priv, val);
+               val = intel_freq_opcode(dev_priv, val);
 
                hw_max = dev_priv->rps.max_freq;
                hw_min = dev_priv->rps.min_freq;
        } else {
-               do_div(val, GT_FREQUENCY_MULTIPLIER);
+               val = intel_freq_opcode(dev_priv, val);
 
                rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
                hw_max = dev_priv->rps.max_freq;
@@ -4179,10 +4216,7 @@ i915_max_freq_set(void *data, u64 val)
 
        dev_priv->rps.max_freq_softlimit = val;
 
-       if (IS_VALLEYVIEW(dev))
-               valleyview_set_rps(dev, val);
-       else
-               gen6_set_rps(dev, val);
+       intel_set_rps(dev, val);
 
        mutex_unlock(&dev_priv->rps.hw_lock);
 
@@ -4209,10 +4243,7 @@ i915_min_freq_get(void *data, u64 *val)
        if (ret)
                return ret;
 
-       if (IS_VALLEYVIEW(dev))
-               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
-       else
-               *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+       *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return 0;
@@ -4241,12 +4272,12 @@ i915_min_freq_set(void *data, u64 val)
         * Turbo will still be enabled, but won't go below the set value.
         */
        if (IS_VALLEYVIEW(dev)) {
-               val = vlv_freq_opcode(dev_priv, val);
+               val = intel_freq_opcode(dev_priv, val);
 
                hw_max = dev_priv->rps.max_freq;
                hw_min = dev_priv->rps.min_freq;
        } else {
-               do_div(val, GT_FREQUENCY_MULTIPLIER);
+               val = intel_freq_opcode(dev_priv, val);
 
                rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
                hw_max = dev_priv->rps.max_freq;
@@ -4260,10 +4291,7 @@ i915_min_freq_set(void *data, u64 val)
 
        dev_priv->rps.min_freq_softlimit = val;
 
-       if (IS_VALLEYVIEW(dev))
-               valleyview_set_rps(dev, val);
-       else
-               gen6_set_rps(dev, val);
+       intel_set_rps(dev, val);
 
        mutex_unlock(&dev_priv->rps.hw_lock);
 
@@ -4330,6 +4358,85 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
                        i915_cache_sharing_get, i915_cache_sharing_set,
                        "%llu\n");
 
+static int i915_sseu_status(struct seq_file *m, void *unused)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0;
+
+       if (INTEL_INFO(dev)->gen < 9)
+               return -ENODEV;
+
+       seq_puts(m, "SSEU Device Info\n");
+       seq_printf(m, "  Available Slice Total: %u\n",
+                  INTEL_INFO(dev)->slice_total);
+       seq_printf(m, "  Available Subslice Total: %u\n",
+                  INTEL_INFO(dev)->subslice_total);
+       seq_printf(m, "  Available Subslice Per Slice: %u\n",
+                  INTEL_INFO(dev)->subslice_per_slice);
+       seq_printf(m, "  Available EU Total: %u\n",
+                  INTEL_INFO(dev)->eu_total);
+       seq_printf(m, "  Available EU Per Subslice: %u\n",
+                  INTEL_INFO(dev)->eu_per_subslice);
+       seq_printf(m, "  Has Slice Power Gating: %s\n",
+                  yesno(INTEL_INFO(dev)->has_slice_pg));
+       seq_printf(m, "  Has Subslice Power Gating: %s\n",
+                  yesno(INTEL_INFO(dev)->has_subslice_pg));
+       seq_printf(m, "  Has EU Power Gating: %s\n",
+                  yesno(INTEL_INFO(dev)->has_eu_pg));
+
+       seq_puts(m, "SSEU Device Status\n");
+       if (IS_SKYLAKE(dev)) {
+               const int s_max = 3, ss_max = 4;
+               int s, ss;
+               u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
+
+               s_reg[0] = I915_READ(GEN9_SLICE0_PGCTL_ACK);
+               s_reg[1] = I915_READ(GEN9_SLICE1_PGCTL_ACK);
+               s_reg[2] = I915_READ(GEN9_SLICE2_PGCTL_ACK);
+               eu_reg[0] = I915_READ(GEN9_SLICE0_SS01_EU_PGCTL_ACK);
+               eu_reg[1] = I915_READ(GEN9_SLICE0_SS23_EU_PGCTL_ACK);
+               eu_reg[2] = I915_READ(GEN9_SLICE1_SS01_EU_PGCTL_ACK);
+               eu_reg[3] = I915_READ(GEN9_SLICE1_SS23_EU_PGCTL_ACK);
+               eu_reg[4] = I915_READ(GEN9_SLICE2_SS01_EU_PGCTL_ACK);
+               eu_reg[5] = I915_READ(GEN9_SLICE2_SS23_EU_PGCTL_ACK);
+               eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+                            GEN9_PGCTL_SSA_EU19_ACK |
+                            GEN9_PGCTL_SSA_EU210_ACK |
+                            GEN9_PGCTL_SSA_EU311_ACK;
+               eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+                            GEN9_PGCTL_SSB_EU19_ACK |
+                            GEN9_PGCTL_SSB_EU210_ACK |
+                            GEN9_PGCTL_SSB_EU311_ACK;
+
+               for (s = 0; s < s_max; s++) {
+                       if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+                               /* skip disabled slice */
+                               continue;
+
+                       s_tot++;
+                       ss_per = INTEL_INFO(dev)->subslice_per_slice;
+                       ss_tot += ss_per;
+                       for (ss = 0; ss < ss_max; ss++) {
+                               unsigned int eu_cnt;
+
+                               eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
+                                                      eu_mask[ss%2]);
+                               eu_tot += eu_cnt;
+                               eu_per = max(eu_per, eu_cnt);
+                       }
+               }
+       }
+       seq_printf(m, "  Enabled Slice Total: %u\n", s_tot);
+       seq_printf(m, "  Enabled Subslice Total: %u\n", ss_tot);
+       seq_printf(m, "  Enabled Subslice Per Slice: %u\n", ss_per);
+       seq_printf(m, "  Enabled EU Total: %u\n", eu_tot);
+       seq_printf(m, "  Enabled EU Per Subslice: %u\n", eu_per);
+
+       return 0;
+}
+
 static int i915_forcewake_open(struct inode *inode, struct file *file)
 {
        struct drm_device *dev = inode->i_private;
@@ -4338,7 +4445,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
        if (INTEL_INFO(dev)->gen < 6)
                return 0;
 
-       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+       intel_runtime_pm_get(dev_priv);
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
        return 0;
 }
@@ -4351,7 +4459,8 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
        if (INTEL_INFO(dev)->gen < 6)
                return 0;
 
-       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+       intel_runtime_pm_put(dev_priv);
 
        return 0;
 }
@@ -4414,6 +4523,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
        {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
        {"i915_frequency_info", i915_frequency_info, 0},
+       {"i915_hangcheck_info", i915_hangcheck_info, 0},
        {"i915_drpc_info", i915_drpc_info, 0},
        {"i915_emon_status", i915_emon_status, 0},
        {"i915_ring_freq_table", i915_ring_freq_table, 0},
@@ -4425,7 +4535,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_context_status", i915_context_status, 0},
        {"i915_dump_lrc", i915_dump_lrc, 0},
        {"i915_execlists", i915_execlists, 0},
-       {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
+       {"i915_forcewake_domains", i915_forcewake_domains, 0},
        {"i915_swizzle_info", i915_swizzle_info, 0},
        {"i915_ppgtt_info", i915_ppgtt_info, 0},
        {"i915_llc", i915_llc, 0},
@@ -4440,6 +4550,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_dp_mst_info", i915_dp_mst_info, 0},
        {"i915_wa_registers", i915_wa_registers, 0},
        {"i915_ddb_info", i915_ddb_info, 0},
+       {"i915_sseu_status", i915_sseu_status, 0},
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
This page took 0.035314 seconds and 5 git commands to generate.