drm/i915: Rename edp vdd funcs for consistency
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
index 8a1a4fbc06ac85c5c41b58750d44219777c37d6b..190e617f128ee865763425498d676d7af04eec27 100644 (file)
@@ -111,10 +111,10 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
 }
 
 static void intel_dp_link_down(struct intel_dp *intel_dp);
-static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
+static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 
-static int
+int
 intel_dp_max_link_bw(struct intel_dp *intel_dp)
 {
        int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -308,9 +308,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
        for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
                u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
                        PANEL_PORT_SELECT_MASK;
-               if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
-                       return pipe;
-               if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
+               if (port_sel == PANEL_PORT_SELECT_VLV(port))
                        return pipe;
        }
 
@@ -535,7 +533,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        bool has_aux_irq = HAS_AUX_IRQ(dev);
        bool vdd;
 
-       vdd = _edp_panel_vdd_on(intel_dp);
+       vdd = edp_panel_vdd_on(intel_dp);
 
        /* dp aux is extremely sensitive to irq latency, hence request the
         * lowest possible wakeup latency and so prevent the cpu from going into
@@ -773,11 +771,28 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
 {
        struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
 
-       sysfs_remove_link(&intel_connector->base.kdev->kobj,
-                         intel_dp->aux.ddc.dev.kobj.name);
+       if (!intel_connector->mst_port)
+               sysfs_remove_link(&intel_connector->base.kdev->kobj,
+                                 intel_dp->aux.ddc.dev.kobj.name);
        intel_connector_unregister(intel_connector);
 }
 
+static void
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
+{
+       switch (link_bw) {
+       case DP_LINK_BW_1_62:
+               pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+               break;
+       case DP_LINK_BW_2_7:
+               pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+               break;
+       case DP_LINK_BW_5_4:
+               pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+               break;
+       }
+}
+
 static void
 intel_dp_set_clock(struct intel_encoder *encoder,
                   struct intel_crtc_config *pipe_config, int link_bw)
@@ -789,8 +804,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
        if (IS_G4X(dev)) {
                divisor = gen4_dpll;
                count = ARRAY_SIZE(gen4_dpll);
-       } else if (IS_HASWELL(dev)) {
-               /* Haswell has special-purpose DP DDI clocks. */
        } else if (HAS_PCH_SPLIT(dev)) {
                divisor = pch_dpll;
                count = ARRAY_SIZE(pch_dpll);
@@ -813,20 +826,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
        }
 }
 
-static void
-intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       enum transcoder transcoder = crtc->config.cpu_transcoder;
-
-       I915_WRITE(PIPE_DATA_M2(transcoder),
-               TU_SIZE(m_n->tu) | m_n->gmch_m);
-       I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
-       I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
-       I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
-}
-
 bool
 intel_dp_compute_config(struct intel_encoder *encoder,
                        struct intel_crtc_config *pipe_config)
@@ -852,6 +851,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                pipe_config->has_pch_encoder = true;
 
        pipe_config->has_dp_encoder = true;
+       pipe_config->has_drrs = false;
        pipe_config->has_audio = intel_dp->has_audio;
 
        if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
@@ -955,13 +955,17 @@ found:
 
        if (intel_connector->panel.downclock_mode != NULL &&
                intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
+                       pipe_config->has_drrs = true;
                        intel_link_compute_m_n(bpp, lane_count,
                                intel_connector->panel.downclock_mode->clock,
                                pipe_config->port_clock,
                                &pipe_config->dp_m2_n2);
        }
 
-       intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
+       else
+               intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
 
        return true;
 }
@@ -1161,7 +1165,7 @@ static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
        return control;
 }
 
-static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
+static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -1212,7 +1216,7 @@ static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
 {
        if (is_edp(intel_dp)) {
-               bool vdd = _edp_panel_vdd_on(intel_dp);
+               bool vdd = edp_panel_vdd_on(intel_dp);
 
                WARN(!vdd, "eDP VDD already requested on\n");
        }
@@ -1267,6 +1271,19 @@ static void edp_panel_vdd_work(struct work_struct *__work)
        drm_modeset_unlock(&dev->mode_config.connection_mutex);
 }
 
+static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
+{
+       unsigned long delay;
+
+       /*
+        * Queue the timer to fire a long time from now (relative to the power
+        * down delay) to keep the panel power up across a sequence of
+        * operations.
+        */
+       delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
+       schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
+}
+
 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
 {
        if (!is_edp(intel_dp))
@@ -1276,17 +1293,15 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
 
        intel_dp->want_panel_vdd = false;
 
-       if (sync) {
+       if (sync)
                edp_panel_vdd_off_sync(intel_dp);
-       } else {
-               /*
-                * Queue the timer to fire a long
-                * time from now (relative to the power down delay)
-                * to keep the panel power up across a sequence of operations
-                */
-               schedule_delayed_work(&intel_dp->panel_vdd_work,
-                                     msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
-       }
+       else
+               edp_panel_vdd_schedule_off(intel_dp);
+}
+
+static void intel_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+{
+       edp_panel_vdd_off(intel_dp, sync);
 }
 
 void intel_edp_panel_on(struct intel_dp *intel_dp)
@@ -1349,8 +1364,6 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
 
        DRM_DEBUG_KMS("Turn eDP power off\n");
 
-       edp_wait_backlight_off(intel_dp);
-
        WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
 
        pp = ironlake_get_pp_control(intel_dp);
@@ -1374,7 +1387,8 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
        intel_display_power_put(dev_priv, power_domain);
 }
 
-void intel_edp_backlight_on(struct intel_dp *intel_dp)
+/* Enable backlight in the panel power control. */
+static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -1382,10 +1396,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
        u32 pp;
        u32 pp_ctrl_reg;
 
-       if (!is_edp(intel_dp))
-               return;
-
-       DRM_DEBUG_KMS("\n");
        /*
         * If we enable the backlight right away following a panel power
         * on, we may see slight flicker as the panel syncs with the eDP
@@ -1400,23 +1410,28 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
 
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
+}
+
+/* Enable backlight PWM and backlight PP control. */
+void intel_edp_backlight_on(struct intel_dp *intel_dp)
+{
+       if (!is_edp(intel_dp))
+               return;
+
+       DRM_DEBUG_KMS("\n");
 
        intel_panel_enable_backlight(intel_dp->attached_connector);
+       _intel_edp_backlight_on(intel_dp);
 }
 
-void intel_edp_backlight_off(struct intel_dp *intel_dp)
+/* Disable backlight in the panel power control. */
+static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp;
        u32 pp_ctrl_reg;
 
-       if (!is_edp(intel_dp))
-               return;
-
-       intel_panel_disable_backlight(intel_dp->attached_connector);
-
-       DRM_DEBUG_KMS("\n");
        pp = ironlake_get_pp_control(intel_dp);
        pp &= ~EDP_BLC_ENABLE;
 
@@ -1425,6 +1440,41 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
        intel_dp->last_backlight_off = jiffies;
+
+       edp_wait_backlight_off(intel_dp);
+}
+
+/* Disable backlight PP control and backlight PWM. */
+void intel_edp_backlight_off(struct intel_dp *intel_dp)
+{
+       if (!is_edp(intel_dp))
+               return;
+
+       DRM_DEBUG_KMS("\n");
+
+       _intel_edp_backlight_off(intel_dp);
+       intel_panel_disable_backlight(intel_dp->attached_connector);
+}
+
+/*
+ * Hook for controlling the panel power control backlight through the bl_power
+ * sysfs attribute. Take care to handle multiple calls.
+ */
+static void intel_edp_backlight_power(struct intel_connector *connector,
+                                     bool enable)
+{
+       struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+       bool is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
+
+       if (is_enabled == enable)
+               return;
+
+       DRM_DEBUG_KMS("\n");
+
+       if (enable)
+               _intel_edp_backlight_on(intel_dp);
+       else
+               _intel_edp_backlight_off(intel_dp);
 }
 
 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@ -1551,7 +1601,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
                        return true;
                }
 
-               for_each_pipe(i) {
+               for_each_pipe(dev_priv, i) {
                        trans_dp = I915_READ(TRANS_DP_CTL(i));
                        if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
                                *pipe = i;
@@ -1646,11 +1696,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
        }
 }
 
-static bool is_edp_psr(struct drm_device *dev)
+static bool is_edp_psr(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       return dev_priv->psr.sink_support;
+       return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
 }
 
 static bool intel_edp_is_psr_enabled(struct drm_device *dev)
@@ -1698,9 +1746,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct edp_vsc_psr psr_vsc;
 
-       if (intel_dp->psr_setup_done)
-               return;
-
        /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
        memset(&psr_vsc, 0, sizeof(psr_vsc));
        psr_vsc.sdp_header.HB0 = 0;
@@ -1712,22 +1757,25 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
        /* Avoid continuous PSR exit by masking memup and hpd */
        I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
                   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
-
-       intel_dp->psr_setup_done = true;
 }
 
 static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t aux_clock_divider;
        int precharge = 0x3;
        int msg_size = 5;       /* Header(4) + Message(1) */
+       bool only_standby = false;
 
        aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 
+       if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+               only_standby = true;
+
        /* Enable PSR in sink */
-       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
+       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
                drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
                                   DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
        else
@@ -1746,18 +1794,24 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
 
 static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t max_sleep_time = 0x1f;
        uint32_t idle_frames = 1;
        uint32_t val = 0x0;
        const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+       bool only_standby = false;
+
+       if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+               only_standby = true;
 
-       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
                val |= EDP_PSR_LINK_STANDBY;
                val |= EDP_PSR_TP2_TP3_TIME_0us;
                val |= EDP_PSR_TP1_TIME_0us;
                val |= EDP_PSR_SKIP_AUX_EXIT;
+               val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
        } else
                val |= EDP_PSR_LINK_DISABLE;
 
@@ -1775,18 +1829,14 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = dig_port->base.base.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj;
-       struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
 
-       dev_priv->psr.source_ok = false;
+       lockdep_assert_held(&dev_priv->psr.lock);
+       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+       WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
-       if (!HAS_PSR(dev)) {
-               DRM_DEBUG_KMS("PSR not supported on this platform\n");
-               return false;
-       }
+       dev_priv->psr.source_ok = false;
 
-       if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
-           (dig_port->port != PORT_A)) {
+       if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
                DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
                return false;
        }
@@ -1796,29 +1846,9 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
                return false;
        }
 
-       crtc = dig_port->base.base.crtc;
-       if (crtc == NULL) {
-               DRM_DEBUG_KMS("crtc not active for PSR\n");
-               return false;
-       }
-
-       intel_crtc = to_intel_crtc(crtc);
-       if (!intel_crtc_active(crtc)) {
-               DRM_DEBUG_KMS("crtc not active for PSR\n");
-               return false;
-       }
-
-       obj = to_intel_framebuffer(crtc->primary->fb)->obj;
-       if (obj->tiling_mode != I915_TILING_X ||
-           obj->fence_reg == I915_FENCE_REG_NONE) {
-               DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
-               return false;
-       }
-
-       if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
-               DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
-               return false;
-       }
+       /* Below limitations aren't valid for Broadwell */
+       if (IS_BROADWELL(dev))
+               goto out;
 
        if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
            S3D_ENABLE) {
@@ -1831,35 +1861,60 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
                return false;
        }
 
+ out:
        dev_priv->psr.source_ok = true;
        return true;
 }
 
 static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
-
-       if (!intel_edp_psr_match_conditions(intel_dp) ||
-           intel_edp_is_psr_enabled(dev))
-               return;
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       /* Setup PSR once */
-       intel_edp_psr_setup(intel_dp);
+       WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+       WARN_ON(dev_priv->psr.active);
+       lockdep_assert_held(&dev_priv->psr.lock);
 
        /* Enable PSR on the panel */
        intel_edp_psr_enable_sink(intel_dp);
 
        /* Enable PSR on the host */
        intel_edp_psr_enable_source(intel_dp);
+
+       dev_priv->psr.active = true;
 }
 
 void intel_edp_psr_enable(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!HAS_PSR(dev)) {
+               DRM_DEBUG_KMS("PSR not supported on this platform\n");
+               return;
+       }
+
+       if (!is_edp_psr(intel_dp)) {
+               DRM_DEBUG_KMS("PSR not supported by this panel\n");
+               return;
+       }
+
+       mutex_lock(&dev_priv->psr.lock);
+       if (dev_priv->psr.enabled) {
+               DRM_DEBUG_KMS("PSR already in use\n");
+               mutex_unlock(&dev_priv->psr.lock);
+               return;
+       }
 
-       if (intel_edp_psr_match_conditions(intel_dp) &&
-           !intel_edp_is_psr_enabled(dev))
-               intel_edp_psr_do_enable(intel_dp);
+       dev_priv->psr.busy_frontbuffer_bits = 0;
+
+       /* Setup PSR once */
+       intel_edp_psr_setup(intel_dp);
+
+       if (intel_edp_psr_match_conditions(intel_dp))
+               dev_priv->psr.enabled = intel_dp;
+       mutex_unlock(&dev_priv->psr.lock);
 }
 
 void intel_edp_psr_disable(struct intel_dp *intel_dp)
@@ -1867,36 +1922,136 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!intel_edp_is_psr_enabled(dev))
+       mutex_lock(&dev_priv->psr.lock);
+       if (!dev_priv->psr.enabled) {
+               mutex_unlock(&dev_priv->psr.lock);
                return;
+       }
+
+       if (dev_priv->psr.active) {
+               I915_WRITE(EDP_PSR_CTL(dev),
+                          I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+
+               /* Wait till PSR is idle */
+               if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
+                              EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+                       DRM_ERROR("Timed out waiting for PSR Idle State\n");
+
+               dev_priv->psr.active = false;
+       } else {
+               WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+       }
 
-       I915_WRITE(EDP_PSR_CTL(dev),
-                  I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+       dev_priv->psr.enabled = NULL;
+       mutex_unlock(&dev_priv->psr.lock);
 
-       /* Wait till PSR is idle */
-       if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
-                      EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
-               DRM_ERROR("Timed out waiting for PSR Idle State\n");
+       cancel_delayed_work_sync(&dev_priv->psr.work);
 }
 
-void intel_edp_psr_update(struct drm_device *dev)
+static void intel_edp_psr_work(struct work_struct *work)
 {
-       struct intel_encoder *encoder;
-       struct intel_dp *intel_dp = NULL;
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), psr.work.work);
+       struct intel_dp *intel_dp = dev_priv->psr.enabled;
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
-               if (encoder->type == INTEL_OUTPUT_EDP) {
-                       intel_dp = enc_to_intel_dp(&encoder->base);
+       mutex_lock(&dev_priv->psr.lock);
+       intel_dp = dev_priv->psr.enabled;
 
-                       if (!is_edp_psr(dev))
-                               return;
+       if (!intel_dp)
+               goto unlock;
 
-                       if (!intel_edp_psr_match_conditions(intel_dp))
-                               intel_edp_psr_disable(intel_dp);
-                       else
-                               if (!intel_edp_is_psr_enabled(dev))
-                                       intel_edp_psr_do_enable(intel_dp);
-               }
+       /*
+        * The delayed work can race with an invalidate hence we need to
+        * recheck. Since psr_flush first clears this and then reschedules we
+        * won't ever miss a flush when bailing out here.
+        */
+       if (dev_priv->psr.busy_frontbuffer_bits)
+               goto unlock;
+
+       intel_edp_psr_do_enable(intel_dp);
+unlock:
+       mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void intel_edp_psr_do_exit(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->psr.active) {
+               u32 val = I915_READ(EDP_PSR_CTL(dev));
+
+               WARN_ON(!(val & EDP_PSR_ENABLE));
+
+               I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
+
+               dev_priv->psr.active = false;
+       }
+
+}
+
+void intel_edp_psr_invalidate(struct drm_device *dev,
+                             unsigned frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
+       enum pipe pipe;
+
+       mutex_lock(&dev_priv->psr.lock);
+       if (!dev_priv->psr.enabled) {
+               mutex_unlock(&dev_priv->psr.lock);
+               return;
+       }
+
+       crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+       pipe = to_intel_crtc(crtc)->pipe;
+
+       intel_edp_psr_do_exit(dev);
+
+       frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+
+       dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+       mutex_unlock(&dev_priv->psr.lock);
+}
+
+void intel_edp_psr_flush(struct drm_device *dev,
+                        unsigned frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
+       enum pipe pipe;
+
+       mutex_lock(&dev_priv->psr.lock);
+       if (!dev_priv->psr.enabled) {
+               mutex_unlock(&dev_priv->psr.lock);
+               return;
+       }
+
+       crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+       pipe = to_intel_crtc(crtc)->pipe;
+       dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+       /*
+        * On Haswell sprite plane updates don't result in a psr invalidating
+        * signal in the hardware. Which means we need to manually fake this in
+        * software for all flushes, not just when we've seen a preceding
+        * invalidation through frontbuffer rendering.
+        */
+       if (IS_HASWELL(dev) &&
+           (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
+               intel_edp_psr_do_exit(dev);
+
+       if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
+               schedule_delayed_work(&dev_priv->psr.work,
+                                     msecs_to_jiffies(100));
+       mutex_unlock(&dev_priv->psr.lock);
+}
+
+void intel_edp_psr_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
+       mutex_init(&dev_priv->psr.lock);
 }
 
 static void intel_disable_dp(struct intel_encoder *encoder)
@@ -1986,7 +2141,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
        intel_dp_start_link_train(intel_dp);
        intel_edp_panel_on(intel_dp);
-       edp_panel_vdd_off(intel_dp, true);
+       intel_edp_panel_vdd_off(intel_dp, true);
        intel_dp_complete_link_train(intel_dp);
        intel_dp_stop_link_train(intel_dp);
 }
@@ -2152,6 +2307,72 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
        vlv_wait_port_ready(dev_priv, dport);
 }
 
+static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
+{
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(encoder->base.crtc);
+       enum dpio_channel ch = vlv_dport_to_channel(dport);
+       enum pipe pipe = intel_crtc->pipe;
+       u32 val;
+
+       intel_dp_prepare(encoder);
+
+       mutex_lock(&dev_priv->dpio_lock);
+
+       /* program left/right clock distribution */
+       if (pipe != PIPE_B) {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+               if (ch == DPIO_CH0)
+                       val |= CHV_BUFLEFTENA1_FORCE;
+               if (ch == DPIO_CH1)
+                       val |= CHV_BUFRIGHTENA1_FORCE;
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+       } else {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+               if (ch == DPIO_CH0)
+                       val |= CHV_BUFLEFTENA2_FORCE;
+               if (ch == DPIO_CH1)
+                       val |= CHV_BUFRIGHTENA2_FORCE;
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+       }
+
+       /* program clock channel usage */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+       val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+       if (pipe != PIPE_B)
+               val &= ~CHV_PCS_USEDCLKCHANNEL;
+       else
+               val |= CHV_PCS_USEDCLKCHANNEL;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+       val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+       if (pipe != PIPE_B)
+               val &= ~CHV_PCS_USEDCLKCHANNEL;
+       else
+               val |= CHV_PCS_USEDCLKCHANNEL;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+
+       /*
+        * This a a bit weird since generally CL
+        * matches the pipe, but here we need to
+        * pick the CL based on the port.
+        */
+       val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+       if (pipe != PIPE_B)
+               val &= ~CHV_CMN_USEDCLKCHANNEL;
+       else
+               val |= CHV_CMN_USEDCLKCHANNEL;
+       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+       mutex_unlock(&dev_priv->dpio_lock);
+}
+
 /*
  * Native read with retry for link status and receiver capability reads for
  * cases where the sink may still be asleep.
@@ -2189,18 +2410,14 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
                                       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
 }
 
-/*
- * These are source-specific values; current Intel hardware supports
- * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
- */
-
+/* These are source-specific values. */
 static uint8_t
 intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        enum port port = dp_to_dig_port(intel_dp)->port;
 
-       if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
+       if (IS_VALLEYVIEW(dev))
                return DP_TRAIN_VOLTAGE_SWING_1200;
        else if (IS_GEN7(dev) && port == PORT_A)
                return DP_TRAIN_VOLTAGE_SWING_800;
@@ -2216,18 +2433,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        enum port port = dp_to_dig_port(intel_dp)->port;
 
-       if (IS_BROADWELL(dev)) {
-               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_400:
-               case DP_TRAIN_VOLTAGE_SWING_600:
-                       return DP_TRAIN_PRE_EMPHASIS_6;
-               case DP_TRAIN_VOLTAGE_SWING_800:
-                       return DP_TRAIN_PRE_EMPHASIS_3_5;
-               case DP_TRAIN_VOLTAGE_SWING_1200:
-               default:
-                       return DP_TRAIN_PRE_EMPHASIS_0;
-               }
-       } else if (IS_HASWELL(dev)) {
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
                case DP_TRAIN_VOLTAGE_SWING_400:
                        return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -2480,8 +2686,8 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
        /* Program swing margin */
        for (i = 0; i < 4; i++) {
                val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
-               val &= ~DPIO_SWING_MARGIN_MASK;
-               val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
+               val &= ~DPIO_SWING_MARGIN000_MASK;
+               val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
                vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
        }
 
@@ -2699,41 +2905,6 @@ intel_hsw_signal_levels(uint8_t train_set)
        }
 }
 
-static uint32_t
-intel_bdw_signal_levels(uint8_t train_set)
-{
-       int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
-                                        DP_TRAIN_PRE_EMPHASIS_MASK);
-       switch (signal_levels) {
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
-               return DDI_BUF_EMP_400MV_0DB_BDW;       /* Sel0 */
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
-               return DDI_BUF_EMP_400MV_3_5DB_BDW;     /* Sel1 */
-       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
-               return DDI_BUF_EMP_400MV_6DB_BDW;       /* Sel2 */
-
-       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
-               return DDI_BUF_EMP_600MV_0DB_BDW;       /* Sel3 */
-       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
-               return DDI_BUF_EMP_600MV_3_5DB_BDW;     /* Sel4 */
-       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
-               return DDI_BUF_EMP_600MV_6DB_BDW;       /* Sel5 */
-
-       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
-               return DDI_BUF_EMP_800MV_0DB_BDW;       /* Sel6 */
-       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
-               return DDI_BUF_EMP_800MV_3_5DB_BDW;     /* Sel7 */
-
-       case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
-               return DDI_BUF_EMP_1200MV_0DB_BDW;      /* Sel8 */
-
-       default:
-               DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
-                             "0x%x\n", signal_levels);
-               return DDI_BUF_EMP_400MV_0DB_BDW;       /* Sel0 */
-       }
-}
-
 /* Properly updates "DP" with the correct signal levels. */
 static void
 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -2744,10 +2915,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
        uint32_t signal_levels, mask;
        uint8_t train_set = intel_dp->train_set[0];
 
-       if (IS_BROADWELL(dev)) {
-               signal_levels = intel_bdw_signal_levels(train_set);
-               mask = DDI_BUF_EMP_MASK;
-       } else if (IS_HASWELL(dev)) {
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                signal_levels = intel_hsw_signal_levels(train_set);
                mask = DDI_BUF_EMP_MASK;
        } else if (IS_CHERRYVIEW(dev)) {
@@ -2830,7 +2998,10 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
                }
 
        } else {
-               *DP &= ~DP_LINK_TRAIN_MASK;
+               if (IS_CHERRYVIEW(dev))
+                       *DP &= ~DP_LINK_TRAIN_MASK_CHV;
+               else
+                       *DP &= ~DP_LINK_TRAIN_MASK;
 
                switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
                case DP_TRAINING_PATTERN_DISABLE:
@@ -2843,8 +3014,12 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
                        *DP |= DP_LINK_TRAIN_PAT_2;
                        break;
                case DP_TRAINING_PATTERN_3:
-                       DRM_ERROR("DP training pattern 3 not supported\n");
-                       *DP |= DP_LINK_TRAIN_PAT_2;
+                       if (IS_CHERRYVIEW(dev)) {
+                               *DP |= DP_LINK_TRAIN_PAT_3_CHV;
+                       } else {
+                               DRM_ERROR("DP training pattern 3 not supported\n");
+                               *DP |= DP_LINK_TRAIN_PAT_2;
+                       }
                        break;
                }
        }
@@ -3131,7 +3306,10 @@ intel_dp_link_down(struct intel_dp *intel_dp)
                DP &= ~DP_LINK_TRAIN_MASK_CPT;
                I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
        } else {
-               DP &= ~DP_LINK_TRAIN_MASK;
+               if (IS_CHERRYVIEW(dev))
+                       DP &= ~DP_LINK_TRAIN_MASK_CHV;
+               else
+                       DP &= ~DP_LINK_TRAIN_MASK;
                I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
        }
        POSTING_READ(intel_dp->output_reg);
@@ -3243,7 +3421,34 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
                DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
 
-       edp_panel_vdd_off(intel_dp, false);
+       intel_edp_panel_vdd_off(intel_dp, false);
+}
+
+static bool
+intel_dp_probe_mst(struct intel_dp *intel_dp)
+{
+       u8 buf[1];
+
+       if (!intel_dp->can_mst)
+               return false;
+
+       if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
+               return false;
+
+       intel_edp_panel_vdd_on(intel_dp);
+       if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
+               if (buf[0] & DP_MST_CAP) {
+                       DRM_DEBUG_KMS("Sink is MST capable\n");
+                       intel_dp->is_mst = true;
+               } else {
+                       DRM_DEBUG_KMS("Sink is not MST capable\n");
+                       intel_dp->is_mst = false;
+               }
+       }
+       intel_edp_panel_vdd_off(intel_dp, false);
+
+       drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+       return intel_dp->is_mst;
 }
 
 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
@@ -3283,6 +3488,20 @@ intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
                                       sink_irq_vector, 1) == 1;
 }
 
+static bool
+intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+       int ret;
+
+       ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
+                                            DP_SINK_COUNT_ESI,
+                                            sink_irq_vector, 14);
+       if (ret != 14)
+               return false;
+
+       return true;
+}
+
 static void
 intel_dp_handle_test_request(struct intel_dp *intel_dp)
 {
@@ -3290,6 +3509,63 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
 }
 
+static int
+intel_dp_check_mst_status(struct intel_dp *intel_dp)
+{
+       bool bret;
+
+       if (intel_dp->is_mst) {
+               u8 esi[16] = { 0 };
+               int ret = 0;
+               int retry;
+               bool handled;
+               bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
+go_again:
+               if (bret == true) {
+
+                       /* check link status - esi[10] = 0x200c */
+                       if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
+                               DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
+                               intel_dp_start_link_train(intel_dp);
+                               intel_dp_complete_link_train(intel_dp);
+                               intel_dp_stop_link_train(intel_dp);
+                       }
+
+                       DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+                       ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+
+                       if (handled) {
+                               for (retry = 0; retry < 3; retry++) {
+                                       int wret;
+                                       wret = drm_dp_dpcd_write(&intel_dp->aux,
+                                                                DP_SINK_COUNT_ESI+1,
+                                                                &esi[1], 3);
+                                       if (wret == 3) {
+                                               break;
+                                       }
+                               }
+
+                               bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
+                               if (bret == true) {
+                                       DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+                                       goto go_again;
+                               }
+                       } else
+                               ret = 0;
+
+                       return ret;
+               } else {
+                       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+                       DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
+                       intel_dp->is_mst = false;
+                       drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+                       /* send a hotplug event */
+                       drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
+               }
+       }
+       return -EINVAL;
+}
+
 /*
  * According to DP spec
  * 5.1.2:
@@ -3298,21 +3574,25 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
  *  4. Check link status on receipt of hot-plug interrupt
  */
-
 void
 intel_dp_check_link_status(struct intel_dp *intel_dp)
 {
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
        u8 sink_irq_vector;
        u8 link_status[DP_LINK_STATUS_SIZE];
 
-       /* FIXME: This access isn't protected by any locks. */
+       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
        if (!intel_encoder->connectors_active)
                return;
 
        if (WARN_ON(!intel_encoder->base.crtc))
                return;
 
+       if (!to_intel_crtc(intel_encoder->base.crtc)->active)
+               return;
+
        /* Try to read receiver status if the link appears to be up */
        if (!intel_dp_get_link_status(intel_dp, link_status)) {
                return;
@@ -3518,8 +3798,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
        enum drm_connector_status status;
        enum intel_display_power_domain power_domain;
        struct edid *edid = NULL;
-
-       intel_runtime_pm_get(dev_priv);
+       bool ret;
 
        power_domain = intel_display_port_power_domain(intel_encoder);
        intel_display_power_get(dev_priv, power_domain);
@@ -3527,6 +3806,14 @@ intel_dp_detect(struct drm_connector *connector, bool force)
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, connector->name);
 
+       if (intel_dp->is_mst) {
+               /* MST devices are disconnected from a monitor POV */
+               if (intel_encoder->type != INTEL_OUTPUT_EDP)
+                       intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+               status = connector_status_disconnected;
+               goto out;
+       }
+
        intel_dp->has_audio = false;
 
        if (HAS_PCH_SPLIT(dev))
@@ -3539,6 +3826,16 @@ intel_dp_detect(struct drm_connector *connector, bool force)
 
        intel_dp_probe_oui(intel_dp);
 
+       ret = intel_dp_probe_mst(intel_dp);
+       if (ret) {
+               /* if we are in MST mode then this connector
+                  won't appear connected or have anything with EDID on it */
+               if (intel_encoder->type != INTEL_OUTPUT_EDP)
+                       intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+               status = connector_status_disconnected;
+               goto out;
+       }
+
        if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
                intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
        } else {
@@ -3555,9 +3852,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
 
 out:
        intel_display_power_put(dev_priv, power_domain);
-
-       intel_runtime_pm_put(dev_priv);
-
        return status;
 }
 
@@ -3734,6 +4028,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
        drm_dp_aux_unregister(&intel_dp->aux);
+       intel_dp_mst_encoder_cleanup(intel_dig_port);
        drm_encoder_cleanup(encoder);
        if (is_edp(intel_dp)) {
                cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
@@ -3748,6 +4043,21 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        kfree(intel_dig_port);
 }
 
+static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+       if (!is_edp(intel_dp))
+               return;
+
+       edp_panel_vdd_off_sync(intel_dp);
+}
+
+static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+{
+       intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
+}
+
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
        .dpms = intel_connector_dpms,
        .detect = intel_dp_detect,
@@ -3763,15 +4073,78 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
 };
 
 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+       .reset = intel_dp_encoder_reset,
        .destroy = intel_dp_encoder_destroy,
 };
 
-static void
+void
 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
 {
-       struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+       return;
+}
+
+bool
+intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
+{
+       struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum intel_display_power_domain power_domain;
+       bool ret = true;
 
-       intel_dp_check_link_status(intel_dp);
+       if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
+               intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
+
+       DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
+                     port_name(intel_dig_port->port),
+                     long_hpd ? "long" : "short");
+
+       power_domain = intel_display_port_power_domain(intel_encoder);
+       intel_display_power_get(dev_priv, power_domain);
+
+       if (long_hpd) {
+               if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+                       goto mst_fail;
+
+               if (!intel_dp_get_dpcd(intel_dp)) {
+                       goto mst_fail;
+               }
+
+               intel_dp_probe_oui(intel_dp);
+
+               if (!intel_dp_probe_mst(intel_dp))
+                       goto mst_fail;
+
+       } else {
+               if (intel_dp->is_mst) {
+                       if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
+                               goto mst_fail;
+               }
+
+               if (!intel_dp->is_mst) {
+                       /*
+                        * we'll check the link status via the normal hot plug path later -
+                        * but for short hpds we should check it now
+                        */
+                       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+                       intel_dp_check_link_status(intel_dp);
+                       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+               }
+       }
+       ret = false;
+       goto put_power;
+mst_fail:
+       /* if we were in MST mode, and device is not there get out of MST mode */
+       if (intel_dp->is_mst) {
+               DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+               intel_dp->is_mst = false;
+               drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+       }
+put_power:
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 /* Return which DP Port should be selected for Transcoder DP control */
@@ -3822,7 +4195,7 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port)
        return false;
 }
 
-static void
+void
 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -3957,6 +4330,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
        u32 pp_on, pp_off, pp_div, port_sel = 0;
        int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
        int pp_on_reg, pp_off_reg, pp_div_reg;
+       enum port port = dp_to_dig_port(intel_dp)->port;
 
        if (HAS_PCH_SPLIT(dev)) {
                pp_on_reg = PCH_PP_ON_DELAYS;
@@ -3991,12 +4365,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
        /* Haswell doesn't have any port selection bits for the panel
         * power sequencer any more. */
        if (IS_VALLEYVIEW(dev)) {
-               if (dp_to_dig_port(intel_dp)->port == PORT_B)
-                       port_sel = PANEL_PORT_SELECT_DPB_VLV;
-               else
-                       port_sel = PANEL_PORT_SELECT_DPC_VLV;
+               port_sel = PANEL_PORT_SELECT_VLV(port);
        } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
-               if (dp_to_dig_port(intel_dp)->port == PORT_A)
+               if (port == PORT_A)
                        port_sel = PANEL_PORT_SELECT_DPA;
                else
                        port_sel = PANEL_PORT_SELECT_DPD;
@@ -4035,6 +4406,11 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
                return;
        }
 
+       /*
+        * FIXME: This needs proper synchronization with psr state. But really
+        * hard to tell without seeing the user of this function of this code.
+        * Check locking and ordering once that lands.
+        */
        if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
                DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
                return;
@@ -4075,7 +4451,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
                val = I915_READ(reg);
                if (index > DRRS_HIGH_RR) {
                        val |= PIPECONF_EDP_RR_MODE_SWITCH;
-                       intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
+                       intel_dp_set_m_n(intel_crtc);
                } else {
                        val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
                }
@@ -4115,7 +4491,7 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
        }
 
        if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
-               DRM_INFO("VBT doesn't support DRRS\n");
+               DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
                return NULL;
        }
 
@@ -4123,7 +4499,7 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
                                        (dev, fixed_mode, connector);
 
        if (!downclock_mode) {
-               DRM_INFO("DRRS not supported\n");
+               DRM_DEBUG_KMS("DRRS not supported\n");
                return NULL;
        }
 
@@ -4134,10 +4510,36 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
        intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
 
        intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
-       DRM_INFO("seamless DRRS supported for eDP panel.\n");
+       DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
        return downclock_mode;
 }
 
+void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
+{
+       struct drm_device *dev = intel_encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_dp *intel_dp;
+       enum intel_display_power_domain power_domain;
+
+       if (intel_encoder->type != INTEL_OUTPUT_EDP)
+               return;
+
+       intel_dp = enc_to_intel_dp(&intel_encoder->base);
+       if (!edp_have_panel_vdd(intel_dp))
+               return;
+       /*
+        * The VDD bit needs a power domain reference, so if the bit is
+        * already enabled when we boot or resume, grab this reference and
+        * schedule a vdd off, so we don't hold on to the reference
+        * indefinitely.
+        */
+       DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
+       power_domain = intel_display_port_power_domain(intel_encoder);
+       intel_display_power_get(dev_priv, power_domain);
+
+       edp_panel_vdd_schedule_off(intel_dp);
+}
+
 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
                                     struct intel_connector *intel_connector,
                                     struct edp_power_seq *power_seq)
@@ -4158,18 +4560,12 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        if (!is_edp(intel_dp))
                return true;
 
-       /* The VDD bit needs a power domain reference, so if the bit is already
-        * enabled when we boot, grab this reference. */
-       if (edp_have_panel_vdd(intel_dp)) {
-               enum intel_display_power_domain power_domain;
-               power_domain = intel_display_port_power_domain(intel_encoder);
-               intel_display_power_get(dev_priv, power_domain);
-       }
+       intel_edp_panel_vdd_sanitize(intel_encoder);
 
        /* Cache DPCD and EDID for edp. */
        intel_edp_panel_vdd_on(intel_dp);
        has_dpcd = intel_dp_get_dpcd(intel_dp);
-       edp_panel_vdd_off(intel_dp, false);
+       intel_edp_panel_vdd_off(intel_dp, false);
 
        if (has_dpcd) {
                if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
@@ -4227,6 +4623,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        }
 
        intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+       intel_connector->panel.backlight_power = intel_edp_backlight_power;
        intel_panel_setup_backlight(connector);
 
        return true;
@@ -4288,7 +4685,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                          edp_panel_vdd_work);
 
        intel_connector_attach_encoder(intel_connector, intel_encoder);
-       drm_sysfs_connector_add(connector);
+       drm_connector_register(connector);
 
        if (HAS_DDI(dev))
                intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
@@ -4321,7 +4718,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 
        intel_dp_aux_init(intel_dp, intel_connector);
 
-       intel_dp->psr_setup_done = false;
+       /* init MST on ports that can support it */
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+               if (port == PORT_B || port == PORT_C || port == PORT_D) {
+                       intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id);
+               }
+       }
 
        if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
                drm_dp_aux_unregister(&intel_dp->aux);
@@ -4331,7 +4733,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                        edp_panel_vdd_off_sync(intel_dp);
                        drm_modeset_unlock(&dev->mode_config.connection_mutex);
                }
-               drm_sysfs_connector_remove(connector);
+               drm_connector_unregister(connector);
                drm_connector_cleanup(connector);
                return false;
        }
@@ -4353,6 +4755,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 void
 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_digital_port *intel_dig_port;
        struct intel_encoder *intel_encoder;
        struct drm_encoder *encoder;
@@ -4378,7 +4781,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
        intel_encoder->disable = intel_disable_dp;
        intel_encoder->get_hw_state = intel_dp_get_hw_state;
        intel_encoder->get_config = intel_dp_get_config;
+       intel_encoder->suspend = intel_dp_encoder_suspend;
        if (IS_CHERRYVIEW(dev)) {
+               intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
                intel_encoder->pre_enable = chv_pre_enable_dp;
                intel_encoder->enable = vlv_enable_dp;
                intel_encoder->post_disable = chv_post_disable_dp;
@@ -4408,9 +4813,55 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
        intel_encoder->cloneable = 0;
        intel_encoder->hot_plug = intel_dp_hot_plug;
 
+       intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+       dev_priv->hpd_irq_port[port] = intel_dig_port;
+
        if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
                drm_encoder_cleanup(encoder);
                kfree(intel_dig_port);
                kfree(intel_connector);
        }
 }
+
+void intel_dp_mst_suspend(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+
+       /* disable MST */
+       for (i = 0; i < I915_MAX_PORTS; i++) {
+               struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+               if (!intel_dig_port)
+                       continue;
+
+               if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+                       if (!intel_dig_port->dp.can_mst)
+                               continue;
+                       if (intel_dig_port->dp.is_mst)
+                               drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
+               }
+       }
+}
+
+void intel_dp_mst_resume(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+
+       for (i = 0; i < I915_MAX_PORTS; i++) {
+               struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+               if (!intel_dig_port)
+                       continue;
+               if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+                       int ret;
+
+                       if (!intel_dig_port->dp.can_mst)
+                               continue;
+
+                       ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+                       if (ret != 0) {
+                               intel_dp_check_mst_status(&intel_dig_port->dp);
+                       }
+               }
+       }
+}
This page took 0.04567 seconds and 5 git commands to generate.