Merge tag 'v3.14' into drm-intel-next-queued
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_pm.c
index 3db7c40cc9aeeac867a7e272a65ebc6c63fb1d05..b66a43b90d1b5dc4321fb48a5e6844873179c37c 100644 (file)
@@ -1136,7 +1136,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
        /* Use the large buffer method to calculate cursor watermark */
        line_time_us = max(htotal * 1000 / clock, 1);
        line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
-       entries = line_count * 64 * pixel_size;
+       entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
        tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
        if (tlb_miss > 0)
                entries += tlb_miss;
@@ -1222,7 +1222,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
        *display_wm = entries + display->guard_size;
 
        /* calculate the self-refresh watermark for display cursor */
-       entries = line_count * pixel_size * 64;
+       entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
        entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
        *cursor_wm = entries + cursor->guard_size;
 
@@ -1457,7 +1457,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
                              entries, srwm);
 
                entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-                       pixel_size * 64;
+                       pixel_size * to_intel_crtc(crtc)->cursor_width;
                entries = DIV_ROUND_UP(entries,
                                          i965_cursor_wm_info.cacheline_size);
                cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -2120,7 +2120,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
                p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
                p->cur.bytes_per_pixel = 4;
                p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
-               p->cur.horiz_pixels = 64;
+               p->cur.horiz_pixels = intel_crtc->cursor_width;
                /* TODO: for now, assume primary and cursor planes are always enabled. */
                p->pri.enabled = true;
                p->cur.enabled = true;
@@ -3160,7 +3160,8 @@ static void gen6_disable_rps_interrupts(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
-       I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
+       I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
+                               ~dev_priv->pm_rps_events);
        /* Complete PM interrupt masking here doesn't race with the rps work
         * item again unmasking PM interrupts because that is using a different
         * register (PMIMR) to mask PM interrupts. The only risk is in leaving
@@ -3170,7 +3171,7 @@ static void gen6_disable_rps_interrupts(struct drm_device *dev)
        dev_priv->rps.pm_iir = 0;
        spin_unlock_irq(&dev_priv->irq_lock);
 
-       I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
+       I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
 }
 
 static void gen6_disable_rps(struct drm_device *dev)
@@ -3232,12 +3233,12 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
 
        spin_lock_irq(&dev_priv->irq_lock);
        WARN_ON(dev_priv->rps.pm_iir);
-       snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
-       I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
+       snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+       I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
        spin_unlock_irq(&dev_priv->irq_lock);
 
        /* only unmask PM interrupts we need. Mask all others. */
-       enabled_intrs = GEN6_PM_RPS_EVENTS;
+       enabled_intrs = dev_priv->pm_rps_events;
 
        /* IVB and SNB hard hangs on looping batchbuffer
         * if GEN6_PM_UP_EI_EXPIRED is masked.
@@ -3324,7 +3325,7 @@ static void gen6_enable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
-       u32 rp_state_cap, hw_max, hw_min;
+       u32 rp_state_cap;
        u32 gt_perf_status;
        u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
        u32 gtfifodbg;
@@ -3353,21 +3354,22 @@ static void gen6_enable_rps(struct drm_device *dev)
        gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 
        /* All of these values are in units of 50MHz */
-       dev_priv->rps.cur_freq = 0;
-       /* hw_max = RP0 until we check for overclocking */
-       dev_priv->rps.max_freq = hw_max = rp_state_cap & 0xff;
+       dev_priv->rps.cur_freq          = 0;
        /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
-       dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
-       dev_priv->rps.rp0_freq = (rp_state_cap >>  0) & 0xff;
-       dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
-       dev_priv->rps.min_freq = hw_min = (rp_state_cap >> 16) & 0xff;
+       dev_priv->rps.rp1_freq          = (rp_state_cap >>  8) & 0xff;
+       dev_priv->rps.rp0_freq          = (rp_state_cap >>  0) & 0xff;
+       dev_priv->rps.min_freq          = (rp_state_cap >> 16) & 0xff;
+       /* XXX: only BYT has a special efficient freq */
+       dev_priv->rps.efficient_freq    = dev_priv->rps.rp1_freq;
+       /* hw_max = RP0 until we check for overclocking */
+       dev_priv->rps.max_freq          = dev_priv->rps.rp0_freq;
 
        /* Preserve min/max settings in case of re-init */
        if (dev_priv->rps.max_freq_softlimit == 0)
-               dev_priv->rps.max_freq_softlimit = hw_max;
+               dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
 
        if (dev_priv->rps.min_freq_softlimit == 0)
-               dev_priv->rps.min_freq_softlimit = hw_min;
+               dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
 
        /* disable the counters and set deterministic thresholds */
        I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3597,7 +3599,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
-       u32 gtfifodbg, val, hw_max, hw_min, rc6_mode = 0;
+       u32 gtfifodbg, val, rc6_mode = 0;
        int i;
 
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -3657,27 +3659,28 @@ static void valleyview_enable_rps(struct drm_device *dev)
                         vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
                         dev_priv->rps.cur_freq);
 
-       dev_priv->rps.max_freq = hw_max = valleyview_rps_max_freq(dev_priv);
+       dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
+       dev_priv->rps.rp0_freq  = dev_priv->rps.max_freq;
        DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, hw_max),
-                        hw_max);
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+                        dev_priv->rps.max_freq);
 
        dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
        DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
                         vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
                         dev_priv->rps.efficient_freq);
 
-       hw_min = valleyview_rps_min_freq(dev_priv);
+       dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
        DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, hw_min),
-                        hw_min);
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+                        dev_priv->rps.min_freq);
 
        /* Preserve min/max settings in case of re-init */
        if (dev_priv->rps.max_freq_softlimit == 0)
-               dev_priv->rps.max_freq_softlimit = hw_max;
+               dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
 
        if (dev_priv->rps.min_freq_softlimit == 0)
-               dev_priv->rps.min_freq_softlimit = hw_min;
+               dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
 
        DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
                         vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
@@ -4879,6 +4882,10 @@ static void gen8_init_clock_gating(struct drm_device *dev)
        /* WaDisableSDEUnitClockGating:bdw */
        I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
                   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+       /* Wa4x4STCOptimizationDisable:bdw */
+       I915_WRITE(CACHE_MODE_1,
+                  _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
 }
 
 static void haswell_init_clock_gating(struct drm_device *dev)
This page took 0.15551 seconds and 5 git commands to generate.