drm/i915: s/pm._irqs_disabled/pm.irqs_enabled/
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_display.c
index 474e6155b7c4e124d727e7fdd787a1f3b3caa636..6fc77a100cc66ea9d29fb00df145e4d44e6894e7 100644 (file)
@@ -76,8 +76,6 @@ static const uint32_t intel_cursor_formats[] = {
 #define DIV_ROUND_CLOSEST_ULL(ll, d)   \
 ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
 
-static void intel_increase_pllclock(struct drm_device *dev,
-                                   enum pipe pipe);
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -892,19 +890,6 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
        return intel_crtc->config.cpu_transcoder;
 }
 
-/**
- * intel_wait_for_vblank - wait for vblank on a given pipe
- * @dev: drm device
- * @pipe: pipe to wait for
- *
- * Wait for vblank to occur on a given pipe.  Needed for various bits of
- * mode setting code.
- */
-void intel_wait_for_vblank(struct drm_device *dev, int pipe)
-{
-       drm_wait_one_vblank(dev, pipe);
-}
-
 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1151,8 +1136,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
             state_string(state), state_string(cur_state));
 }
 
-static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
-                                 enum pipe pipe)
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+                          enum pipe pipe)
 {
        struct drm_device *dev = dev_priv->dev;
        int pp_reg;
@@ -1225,7 +1210,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
            (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
                state = true;
 
-       if (!intel_display_power_enabled(dev_priv,
+       if (!intel_display_power_is_enabled(dev_priv,
                                POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
                cur_state = false;
        } else {
@@ -1294,7 +1279,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
        int reg, sprite;
        u32 val;
 
-       if (IS_VALLEYVIEW(dev)) {
+       if (INTEL_INFO(dev)->gen >= 9) {
+               for_each_sprite(pipe, sprite) {
+                       val = I915_READ(PLANE_CTL(pipe, sprite));
+                       WARN(val & PLANE_CTL_ENABLE,
+                            "plane %d assertion failure, should be off on pipe %c but is still active\n",
+                            sprite, pipe_name(pipe));
+               }
+       } else if (IS_VALLEYVIEW(dev)) {
                for_each_sprite(pipe, sprite) {
                        reg = SPCNTR(pipe, sprite);
                        val = I915_READ(reg);
@@ -2195,7 +2187,9 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
 
        switch (obj->tiling_mode) {
        case I915_TILING_NONE:
-               if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+               if (INTEL_INFO(dev)->gen >= 9)
+                       alignment = 256 * 1024;
+               else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
                        alignment = 128 * 1024;
                else if (INTEL_INFO(dev)->gen >= 4)
                        alignment = 4 * 1024;
@@ -2203,8 +2197,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
                        alignment = 64 * 1024;
                break;
        case I915_TILING_X:
-               /* pin() will align the object as required by fence */
-               alignment = 0;
+               if (INTEL_INFO(dev)->gen >= 9)
+                       alignment = 256 * 1024;
+               else {
+                       /* pin() will align the object as required by fence */
+                       alignment = 0;
+               }
                break;
        case I915_TILING_Y:
                WARN(1, "Y tiled bo slipped through, driver bug!\n");
@@ -2634,6 +2632,90 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
        POSTING_READ(reg);
 }
 
+static void skylake_update_primary_plane(struct drm_crtc *crtc,
+                                        struct drm_framebuffer *fb,
+                                        int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_framebuffer *intel_fb;
+       struct drm_i915_gem_object *obj;
+       int pipe = intel_crtc->pipe;
+       u32 plane_ctl, stride;
+
+       if (!intel_crtc->primary_enabled) {
+               I915_WRITE(PLANE_CTL(pipe, 0), 0);
+               I915_WRITE(PLANE_SURF(pipe, 0), 0);
+               POSTING_READ(PLANE_CTL(pipe, 0));
+               return;
+       }
+
+       plane_ctl = PLANE_CTL_ENABLE |
+                   PLANE_CTL_PIPE_GAMMA_ENABLE |
+                   PLANE_CTL_PIPE_CSC_ENABLE;
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_RGB565:
+               plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               break;
+       case DRM_FORMAT_XBGR8888:
+               plane_ctl |= PLANE_CTL_ORDER_RGBX;
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               break;
+       case DRM_FORMAT_XRGB2101010:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+               break;
+       case DRM_FORMAT_XBGR2101010:
+               plane_ctl |= PLANE_CTL_ORDER_RGBX;
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+               break;
+       default:
+               BUG();
+       }
+
+       intel_fb = to_intel_framebuffer(fb);
+       obj = intel_fb->obj;
+
+       /*
+        * The stride is either expressed as a multiple of 64 bytes chunks for
+        * linear buffers or in number of tiles for tiled buffers.
+        */
+       switch (obj->tiling_mode) {
+       case I915_TILING_NONE:
+               stride = fb->pitches[0] >> 6;
+               break;
+       case I915_TILING_X:
+               plane_ctl |= PLANE_CTL_TILED_X;
+               stride = fb->pitches[0] >> 9;
+               break;
+       default:
+               BUG();
+       }
+
+       plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
+
+       I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
+
+       DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
+                     i915_gem_obj_ggtt_offset(obj),
+                     x, y, fb->width, fb->height,
+                     fb->pitches[0]);
+
+       I915_WRITE(PLANE_POS(pipe, 0), 0);
+       I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
+       I915_WRITE(PLANE_SIZE(pipe, 0),
+                  (intel_crtc->config.pipe_src_h - 1) << 16 |
+                  (intel_crtc->config.pipe_src_w - 1));
+       I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+       I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
+
+       POSTING_READ(PLANE_SURF(pipe, 0));
+}
+
 /* Assume fb object is pinned & idle & fenced and just update base pointers */
 static int
 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -2644,7 +2726,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
        if (dev_priv->display.disable_fbc)
                dev_priv->display.disable_fbc(dev);
-       intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
 
        dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
@@ -2724,16 +2805,15 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       unsigned long flags;
        bool pending;
 
        if (i915_reset_in_progress(&dev_priv->gpu_error) ||
            intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
                return false;
 
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock_irq(&dev->event_lock);
        pending = to_intel_crtc(crtc)->unpin_work != NULL;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock_irq(&dev->event_lock);
 
        return pending;
 }
@@ -3444,14 +3524,13 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
                                       !intel_crtc_has_pending_flip(crtc),
                                       60*HZ) == 0)) {
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-               unsigned long flags;
 
-               spin_lock_irqsave(&dev->event_lock, flags);
+               spin_lock_irq(&dev->event_lock);
                if (intel_crtc->unpin_work) {
                        WARN_ONCE(1, "Removing stuck page flip\n");
                        page_flip_completed(intel_crtc);
                }
-               spin_unlock_irqrestore(&dev->event_lock, flags);
+               spin_unlock_irq(&dev->event_lock);
        }
 
        if (crtc->primary->fb) {
@@ -4010,10 +4089,6 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
 
-       assert_vblank_disabled(crtc);
-
-       drm_vblank_on(dev, pipe);
-
        intel_enable_primary_hw_plane(crtc->primary, crtc);
        intel_enable_planes(crtc);
        intel_crtc_update_cursor(crtc, true);
@@ -4059,10 +4134,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
         * consider this a flip to a NULL plane.
         */
        intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
-
-       drm_vblank_off(dev, pipe);
-
-       assert_vblank_disabled(crtc);
 }
 
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4132,6 +4203,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        if (HAS_PCH_CPT(dev))
                cpt_verify_modeset(dev, intel_crtc->pipe);
 
+       assert_vblank_disabled(crtc);
+       drm_crtc_vblank_on(crtc);
+
        intel_crtc_enable_planes(crtc);
 }
 
@@ -4239,6 +4313,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
                intel_opregion_notify_encoder(encoder, true);
        }
 
+       assert_vblank_disabled(crtc);
+       drm_crtc_vblank_on(crtc);
+
        /* If we change the relative order between pipe/planes enabling, we need
         * to change the workaround. */
        haswell_mode_set_planes_workaround(intel_crtc);
@@ -4274,6 +4351,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 
        intel_crtc_disable_planes(crtc);
 
+       drm_crtc_vblank_off(crtc);
+       assert_vblank_disabled(crtc);
+
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->disable(encoder);
 
@@ -4336,6 +4416,9 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 
        intel_crtc_disable_planes(crtc);
 
+       drm_crtc_vblank_off(crtc);
+       assert_vblank_disabled(crtc);
+
        for_each_encoder_on_crtc(dev, crtc, encoder) {
                intel_opregion_notify_encoder(encoder, false);
                encoder->disable(encoder);
@@ -4477,20 +4560,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
        return mask;
 }
 
-void intel_display_set_init_power(struct drm_i915_private *dev_priv,
-                                 bool enable)
-{
-       if (dev_priv->power_domains.init_power_on == enable)
-               return;
-
-       if (enable)
-               intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-       else
-               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
-       dev_priv->power_domains.init_power_on = enable;
-}
-
 static void modeset_update_crtc_power_domains(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4802,6 +4871,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->enable(encoder);
 
+       assert_vblank_disabled(crtc);
+       drm_crtc_vblank_on(crtc);
+
        intel_crtc_enable_planes(crtc);
 
        /* Underruns don't raise interrupts, so check manually. */
@@ -4859,6 +4931,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->enable(encoder);
 
+       assert_vblank_disabled(crtc);
+       drm_crtc_vblank_on(crtc);
+
        intel_crtc_enable_planes(crtc);
 
        /*
@@ -4922,9 +4997,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
        intel_set_memory_cxsr(dev_priv, false);
        intel_crtc_disable_planes(crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->disable(encoder);
-
        /*
         * On gen2 planes are double buffered but the pipe isn't, so we must
         * wait for planes to fully turn off before disabling the pipe.
@@ -4933,6 +5005,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
         */
        intel_wait_for_vblank(dev, pipe);
 
+       drm_crtc_vblank_off(crtc);
+       assert_vblank_disabled(crtc);
+
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               encoder->disable(encoder);
+
        intel_disable_pipe(intel_crtc);
 
        i9xx_pfit_disable(intel_crtc);
@@ -6401,8 +6479,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
 
-       if (!intel_display_power_enabled(dev_priv,
-                                        POWER_DOMAIN_PIPE(crtc->pipe)))
+       if (!intel_display_power_is_enabled(dev_priv,
+                                           POWER_DOMAIN_PIPE(crtc->pipe)))
                return false;
 
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -6988,7 +7066,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
        I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
        POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
 
-       if (IS_BROADWELL(dev)) {
+       if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
                val = 0;
 
                switch (intel_crtc->config.pipe_bpp) {
@@ -7411,8 +7489,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
 
-       if (!intel_display_power_enabled(dev_priv,
-                                        POWER_DOMAIN_PIPE(crtc->pipe)))
+       if (!intel_display_power_is_enabled(dev_priv,
+                                           POWER_DOMAIN_PIPE(crtc->pipe)))
                return false;
 
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -7605,7 +7683,6 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 {
        uint32_t val;
-       unsigned long irqflags;
 
        val = I915_READ(LCPLL_CTL);
 
@@ -7625,10 +7702,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
         * to call special forcewake code that doesn't touch runtime PM and
         * doesn't enable the forcewake delayed work.
         */
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       spin_lock_irq(&dev_priv->uncore.lock);
        if (dev_priv->uncore.forcewake_count++ == 0)
                dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+       spin_unlock_irq(&dev_priv->uncore.lock);
 
        if (val & LCPLL_POWER_DOWN_ALLOW) {
                val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -7659,10 +7736,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
        }
 
        /* See the big comment above. */
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       spin_lock_irq(&dev_priv->uncore.lock);
        if (--dev_priv->uncore.forcewake_count == 0)
                dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+       spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
 /*
@@ -7791,7 +7868,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
         * DDI E. So just check whether this pipe is wired to DDI E and whether
         * the PCH transcoder is on.
         */
-       if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+       if (INTEL_INFO(dev)->gen < 9 &&
+           (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
                pipe_config->has_pch_encoder = true;
 
                tmp = I915_READ(FDI_RX_CTL(PIPE_A));
@@ -7810,7 +7888,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        enum intel_display_power_domain pfit_domain;
        uint32_t tmp;
 
-       if (!intel_display_power_enabled(dev_priv,
+       if (!intel_display_power_is_enabled(dev_priv,
                                         POWER_DOMAIN_PIPE(crtc->pipe)))
                return false;
 
@@ -7839,7 +7917,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
                        pipe_config->cpu_transcoder = TRANSCODER_EDP;
        }
 
-       if (!intel_display_power_enabled(dev_priv,
+       if (!intel_display_power_is_enabled(dev_priv,
                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
                return false;
 
@@ -7852,7 +7930,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        intel_get_pipe_timings(crtc, pipe_config);
 
        pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
-       if (intel_display_power_enabled(dev_priv, pfit_domain))
+       if (intel_display_power_is_enabled(dev_priv, pfit_domain))
                ironlake_get_pfit_config(crtc, pipe_config);
 
        if (IS_HASWELL(dev))
@@ -8989,35 +9067,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        return mode;
 }
 
-static void intel_increase_pllclock(struct drm_device *dev,
-                                   enum pipe pipe)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int dpll_reg = DPLL(pipe);
-       int dpll;
-
-       if (!HAS_GMCH_DISPLAY(dev))
-               return;
-
-       if (!dev_priv->lvds_downclock_avail)
-               return;
-
-       dpll = I915_READ(dpll_reg);
-       if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
-               DRM_DEBUG_DRIVER("upclocking LVDS\n");
-
-               assert_panel_unlocked(dev_priv, pipe);
-
-               dpll &= ~DISPLAY_RATE_SELECT_FPA1;
-               I915_WRITE(dpll_reg, dpll);
-               intel_wait_for_vblank(dev, pipe);
-
-               dpll = I915_READ(dpll_reg);
-               if (dpll & DISPLAY_RATE_SELECT_FPA1)
-                       DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
-       }
-}
-
 static void intel_decrease_pllclock(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -9093,199 +9142,16 @@ out:
        intel_runtime_pm_put(dev_priv);
 }
 
-
-/**
- * intel_mark_fb_busy - mark given planes as busy
- * @dev: DRM device
- * @frontbuffer_bits: bits for the affected planes
- * @ring: optional ring for asynchronous commands
- *
- * This function gets called every time the screen contents change. It can be
- * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
- */
-static void intel_mark_fb_busy(struct drm_device *dev,
-                              unsigned frontbuffer_bits,
-                              struct intel_engine_cs *ring)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       enum pipe pipe;
-
-       if (!i915.powersave)
-               return;
-
-       for_each_pipe(dev_priv, pipe) {
-               if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
-                       continue;
-
-               intel_increase_pllclock(dev, pipe);
-               if (ring && intel_fbc_enabled(dev))
-                       ring->fbc_dirty = true;
-       }
-}
-
-/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
- * @ring: set for asynchronous rendering
- *
- * This function gets called every time rendering on the given object starts and
- * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
- * until the rendering completes or a flip on this frontbuffer plane is
- * scheduled.
- */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                            struct intel_engine_cs *ring)
-{
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-       if (!obj->frontbuffer_bits)
-               return;
-
-       if (ring) {
-               mutex_lock(&dev_priv->fb_tracking.lock);
-               dev_priv->fb_tracking.busy_bits
-                       |= obj->frontbuffer_bits;
-               dev_priv->fb_tracking.flip_bits
-                       &= ~obj->frontbuffer_bits;
-               mutex_unlock(&dev_priv->fb_tracking.lock);
-       }
-
-       intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
-
-       intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
-}
-
-/**
- * intel_frontbuffer_flush - flush frontbuffer
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called every time rendering on the given planes has
- * completed and frontbuffer caching can be started again. Flushes will get
- * delayed if they're blocked by some oustanding asynchronous rendering.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flush(struct drm_device *dev,
-                            unsigned frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /* Delay flushing when rings are still busy.*/
-       mutex_lock(&dev_priv->fb_tracking.lock);
-       frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
-
-       intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
-
-       intel_edp_psr_flush(dev, frontbuffer_bits);
-
-       /*
-        * FIXME: Unconditional fbc flushing here is a rather gross hack and
-        * needs to be reworked into a proper frontbuffer tracking scheme like
-        * psr employs.
-        */
-       if (IS_BROADWELL(dev))
-               gen8_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
-}
-
-/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
- * @retire: set when retiring asynchronous rendering
- *
- * This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again. If @retire is true
- * then any delayed flushes will be unblocked.
- */
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-                       bool retire)
-{
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned frontbuffer_bits;
-
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-       if (!obj->frontbuffer_bits)
-               return;
-
-       frontbuffer_bits = obj->frontbuffer_bits;
-
-       if (retire) {
-               mutex_lock(&dev_priv->fb_tracking.lock);
-               /* Filter out new bits since rendering started. */
-               frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-
-               dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-               mutex_unlock(&dev_priv->fb_tracking.lock);
-       }
-
-       intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
-/**
- * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. The actual
- * frontbuffer flushing will be delayed until completion is signalled with
- * intel_frontbuffer_flip_complete. If an invalidate happens in between this
- * flush will be cancelled.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
-                                   unsigned frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       mutex_lock(&dev_priv->fb_tracking.lock);
-       dev_priv->fb_tracking.flip_bits
-               |= frontbuffer_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
-}
-
-/**
- * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after the flip has been latched and will complete
- * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
-                                    unsigned frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       mutex_lock(&dev_priv->fb_tracking.lock);
-       /* Mask any cancelled flips. */
-       frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
-       dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
-
-       intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
 static void intel_crtc_destroy(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct intel_unpin_work *work;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock_irq(&dev->event_lock);
        work = intel_crtc->unpin_work;
        intel_crtc->unpin_work = NULL;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock_irq(&dev->event_lock);
 
        if (work) {
                cancel_work_sync(&work->work);
@@ -9331,6 +9197,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
        if (intel_crtc == NULL)
                return;
 
+       /*
+        * This is called both by irq handlers and the reset code (to complete
+        * lost pageflips) so needs the full irqsave spinlocks.
+        */
        spin_lock_irqsave(&dev->event_lock, flags);
        work = intel_crtc->unpin_work;
 
@@ -9412,7 +9282,12 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
                to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
        unsigned long flags;
 
-       /* NB: An MMIO update of the plane base pointer will also
+
+       /*
+        * This is called both by irq handlers and the reset code (to complete
+        * lost pageflips) so needs the full irqsave spinlocks.
+        *
+        * NB: An MMIO update of the plane base pointer will also
         * generate a page-flip completion irq, i.e. every modeset
         * is also accompanied by a spurious intel_prepare_page_flip().
         */
@@ -9787,7 +9662,6 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       unsigned long irq_flags;
        int ret;
 
        if (WARN_ON(intel_crtc->mmio_flip.seqno))
@@ -9801,10 +9675,10 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
                return 0;
        }
 
-       spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+       spin_lock_irq(&dev_priv->mmio_flip_lock);
        intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
        intel_crtc->mmio_flip.ring_id = obj->ring->id;
-       spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+       spin_unlock_irq(&dev_priv->mmio_flip_lock);
 
        /*
         * Double check to catch cases where irq fired before
@@ -9869,18 +9743,19 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       unsigned long flags;
+
+       WARN_ON(!in_irq());
 
        if (crtc == NULL)
                return;
 
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock(&dev->event_lock);
        if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
                WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
                         intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
                page_flip_completed(intel_crtc);
        }
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock(&dev->event_lock);
 }
 
 static int intel_crtc_page_flip(struct drm_crtc *crtc,
@@ -9896,7 +9771,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        enum pipe pipe = intel_crtc->pipe;
        struct intel_unpin_work *work;
        struct intel_engine_cs *ring;
-       unsigned long flags;
        int ret;
 
        //trigger software GT busyness calculation
@@ -9940,7 +9814,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                goto free_work;
 
        /* We borrow the event spin lock for protecting unpin_work */
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock_irq(&dev->event_lock);
        if (intel_crtc->unpin_work) {
                /* Before declaring the flip queue wedged, check if
                 * the hardware completed the operation behind our backs.
@@ -9950,7 +9824,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                        page_flip_completed(intel_crtc);
                } else {
                        DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
-                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       spin_unlock_irq(&dev->event_lock);
 
                        drm_crtc_vblank_put(crtc);
                        kfree(work);
@@ -9958,7 +9832,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                }
        }
        intel_crtc->unpin_work = work;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock_irq(&dev->event_lock);
 
        if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
                flush_workqueue(dev_priv->wq);
@@ -10045,9 +9919,9 @@ cleanup_pending:
        mutex_unlock(&dev->struct_mutex);
 
 cleanup:
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock_irq(&dev->event_lock);
        intel_crtc->unpin_work = NULL;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock_irq(&dev->event_lock);
 
        drm_crtc_vblank_put(crtc);
 free_work:
@@ -10058,9 +9932,9 @@ out_hang:
                intel_crtc_wait_for_pending_flips(crtc);
                ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
                if (ret == 0 && event) {
-                       spin_lock_irqsave(&dev->event_lock, flags);
+                       spin_lock_irq(&dev->event_lock);
                        drm_send_vblank_event(dev, pipe, event);
-                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       spin_unlock_irq(&dev->event_lock);
                }
        }
        return ret;
@@ -11646,7 +11520,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
 {
        uint32_t val;
 
-       if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
                return false;
 
        val = I915_READ(PCH_DPLL(pll->id));
@@ -12276,6 +12150,9 @@ static bool intel_crt_present(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       if (INTEL_INFO(dev)->gen >= 9)
+               return false;
+
        if (IS_ULT(dev))
                return false;
 
@@ -12619,8 +12496,12 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.crtc_enable = haswell_crtc_enable;
                dev_priv->display.crtc_disable = haswell_crtc_disable;
                dev_priv->display.off = ironlake_crtc_off;
-               dev_priv->display.update_primary_plane =
-                       ironlake_update_primary_plane;
+               if (INTEL_INFO(dev)->gen >= 9)
+                       dev_priv->display.update_primary_plane =
+                               skylake_update_primary_plane;
+               else
+                       dev_priv->display.update_primary_plane =
+                               ironlake_update_primary_plane;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
                dev_priv->display.get_plane_config = ironlake_get_plane_config;
@@ -12704,6 +12585,10 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.modeset_global_resources =
                        valleyview_modeset_global_resources;
                dev_priv->display.write_eld = ironlake_write_eld;
+       } else if (INTEL_INFO(dev)->gen >= 9) {
+               dev_priv->display.write_eld = haswell_write_eld;
+               dev_priv->display.modeset_global_resources =
+                       haswell_modeset_global_resources;
        }
 
        /* Default just returns -ENODEV to indicate unsupported */
@@ -12931,11 +12816,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
        intel_enable_gt_powersave(dev);
 }
 
-void intel_modeset_suspend_hw(struct drm_device *dev)
-{
-       intel_suspend_hw(dev);
-}
-
 void intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -13271,7 +13151,7 @@ void i915_redisable_vga(struct drm_device *dev)
         * level, just check if the power well is enabled instead of trying to
         * follow the "don't touch the power well if we don't need it" policy
         * the rest of the driver uses. */
-       if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
+       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
                return;
 
        i915_redisable_vga_power_on(dev);
@@ -13492,9 +13372,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
         * Too much stuff here (turning of rps, connectors, ...) would
         * experience fancy races otherwise.
         */
-       drm_irq_uninstall(dev);
-       intel_hpd_cancel_work(dev_priv);
-       dev_priv->pm._irqs_disabled = true;
+       intel_irq_uninstall(dev_priv);
 
        /*
         * Due to the hpd irq storm handling the hotplug work can re-arm the
@@ -13649,8 +13527,8 @@ intel_display_capture_error_state(struct drm_device *dev)
 
        for_each_pipe(dev_priv, i) {
                error->pipe[i].power_domain_on =
-                       intel_display_power_enabled_unlocked(dev_priv,
-                                                          POWER_DOMAIN_PIPE(i));
+                       __intel_display_power_is_enabled(dev_priv,
+                                                        POWER_DOMAIN_PIPE(i));
                if (!error->pipe[i].power_domain_on)
                        continue;
 
@@ -13685,7 +13563,7 @@ intel_display_capture_error_state(struct drm_device *dev)
                enum transcoder cpu_transcoder = transcoders[i];
 
                error->transcoder[i].power_domain_on =
-                       intel_display_power_enabled_unlocked(dev_priv,
+                       __intel_display_power_is_enabled(dev_priv,
                                POWER_DOMAIN_TRANSCODER(cpu_transcoder));
                if (!error->transcoder[i].power_domain_on)
                        continue;
@@ -13769,9 +13647,8 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
 
        for_each_intel_crtc(dev, crtc) {
                struct intel_unpin_work *work;
-               unsigned long irqflags;
 
-               spin_lock_irqsave(&dev->event_lock, irqflags);
+               spin_lock_irq(&dev->event_lock);
 
                work = crtc->unpin_work;
 
@@ -13781,6 +13658,6 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
                        work->event = NULL;
                }
 
-               spin_unlock_irqrestore(&dev->event_lock, irqflags);
+               spin_unlock_irq(&dev->event_lock);
        }
 }
This page took 0.038588 seconds and 5 git commands to generate.