2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
55 static const struct dp_link_dpll gen4_dpll
[] = {
57 { .p1
= 2, .p2
= 10, .n
= 2, .m1
= 23, .m2
= 8 } },
59 { .p1
= 1, .p2
= 10, .n
= 1, .m1
= 14, .m2
= 2 } }
62 static const struct dp_link_dpll pch_dpll
[] = {
64 { .p1
= 2, .p2
= 10, .n
= 1, .m1
= 12, .m2
= 9 } },
66 { .p1
= 1, .p2
= 10, .n
= 2, .m1
= 14, .m2
= 8 } }
69 static const struct dp_link_dpll vlv_dpll
[] = {
71 { .p1
= 3, .p2
= 2, .n
= 5, .m1
= 3, .m2
= 81 } },
73 { .p1
= 2, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 27 } }
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
80 static const struct dp_link_dpll chv_dpll
[] = {
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1
= 4, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1
= 4, .p2
= 1, .n
= 1, .m1
= 2, .m2
= 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1
= 2, .p2
= 1, .n
= 1, .m1
= 2, .m2
= 0x6c00000 } }
94 static const int bxt_rates
[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates
[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int default_rates
[] = { 162000, 270000, 540000 };
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
107 static bool is_edp(struct intel_dp
*intel_dp
)
109 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
111 return intel_dig_port
->base
.type
== INTEL_OUTPUT_EDP
;
114 static struct drm_device
*intel_dp_to_dev(struct intel_dp
*intel_dp
)
116 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
118 return intel_dig_port
->base
.base
.dev
;
121 static struct intel_dp
*intel_attached_dp(struct drm_connector
*connector
)
123 return enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
126 static void intel_dp_link_down(struct intel_dp
*intel_dp
);
127 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
);
128 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
);
129 static void vlv_init_panel_power_sequencer(struct intel_dp
*intel_dp
);
130 static void vlv_steal_power_sequencer(struct drm_device
*dev
,
133 static unsigned int intel_dp_unused_lane_mask(int lane_count
)
135 return ~((1 << lane_count
) - 1) & 0xf;
139 intel_dp_max_link_bw(struct intel_dp
*intel_dp
)
141 int max_link_bw
= intel_dp
->dpcd
[DP_MAX_LINK_RATE
];
143 switch (max_link_bw
) {
144 case DP_LINK_BW_1_62
:
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
151 max_link_bw
= DP_LINK_BW_1_62
;
157 static u8
intel_dp_max_lane_count(struct intel_dp
*intel_dp
)
159 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
160 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
161 u8 source_max
, sink_max
;
164 if (HAS_DDI(dev
) && intel_dig_port
->port
== PORT_A
&&
165 (intel_dig_port
->saved_port_bits
& DDI_A_4_LANES
) == 0)
168 sink_max
= drm_dp_max_lane_count(intel_dp
->dpcd
);
170 return min(source_max
, sink_max
);
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
179 * 270000 * 1 * 8 / 10 == 216000
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
191 intel_dp_link_required(int pixel_clock
, int bpp
)
193 return (pixel_clock
* bpp
+ 9) / 10;
197 intel_dp_max_data_rate(int max_link_clock
, int max_lanes
)
199 return (max_link_clock
* max_lanes
* 8) / 10;
202 static enum drm_mode_status
203 intel_dp_mode_valid(struct drm_connector
*connector
,
204 struct drm_display_mode
*mode
)
206 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
207 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
208 struct drm_display_mode
*fixed_mode
= intel_connector
->panel
.fixed_mode
;
209 int target_clock
= mode
->clock
;
210 int max_rate
, mode_rate
, max_lanes
, max_link_clock
;
212 if (is_edp(intel_dp
) && fixed_mode
) {
213 if (mode
->hdisplay
> fixed_mode
->hdisplay
)
216 if (mode
->vdisplay
> fixed_mode
->vdisplay
)
219 target_clock
= fixed_mode
->clock
;
222 max_link_clock
= intel_dp_max_link_rate(intel_dp
);
223 max_lanes
= intel_dp_max_lane_count(intel_dp
);
225 max_rate
= intel_dp_max_data_rate(max_link_clock
, max_lanes
);
226 mode_rate
= intel_dp_link_required(target_clock
, 18);
228 if (mode_rate
> max_rate
)
229 return MODE_CLOCK_HIGH
;
231 if (mode
->clock
< 10000)
232 return MODE_CLOCK_LOW
;
234 if (mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
235 return MODE_H_ILLEGAL
;
240 uint32_t intel_dp_pack_aux(const uint8_t *src
, int src_bytes
)
247 for (i
= 0; i
< src_bytes
; i
++)
248 v
|= ((uint32_t) src
[i
]) << ((3-i
) * 8);
252 static void intel_dp_unpack_aux(uint32_t src
, uint8_t *dst
, int dst_bytes
)
257 for (i
= 0; i
< dst_bytes
; i
++)
258 dst
[i
] = src
>> ((3-i
) * 8);
262 intel_dp_init_panel_power_sequencer(struct drm_device
*dev
,
263 struct intel_dp
*intel_dp
);
265 intel_dp_init_panel_power_sequencer_registers(struct drm_device
*dev
,
266 struct intel_dp
*intel_dp
);
268 static void pps_lock(struct intel_dp
*intel_dp
)
270 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
271 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
272 struct drm_device
*dev
= encoder
->base
.dev
;
273 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
274 enum intel_display_power_domain power_domain
;
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
280 power_domain
= intel_display_port_aux_power_domain(encoder
);
281 intel_display_power_get(dev_priv
, power_domain
);
283 mutex_lock(&dev_priv
->pps_mutex
);
286 static void pps_unlock(struct intel_dp
*intel_dp
)
288 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
289 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
290 struct drm_device
*dev
= encoder
->base
.dev
;
291 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
292 enum intel_display_power_domain power_domain
;
294 mutex_unlock(&dev_priv
->pps_mutex
);
296 power_domain
= intel_display_port_aux_power_domain(encoder
);
297 intel_display_power_put(dev_priv
, power_domain
);
301 vlv_power_sequencer_kick(struct intel_dp
*intel_dp
)
303 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
304 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
305 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
306 enum pipe pipe
= intel_dp
->pps_pipe
;
307 bool pll_enabled
, release_cl_override
= false;
308 enum dpio_phy phy
= DPIO_PHY(pipe
);
309 enum dpio_channel ch
= vlv_pipe_to_channel(pipe
);
312 if (WARN(I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe
), port_name(intel_dig_port
->port
)))
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe
), port_name(intel_dig_port
->port
));
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
323 DP
= I915_READ(intel_dp
->output_reg
) & DP_DETECTED
;
324 DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
325 DP
|= DP_PORT_WIDTH(1);
326 DP
|= DP_LINK_TRAIN_PAT_1
;
328 if (IS_CHERRYVIEW(dev
))
329 DP
|= DP_PIPE_SELECT_CHV(pipe
);
330 else if (pipe
== PIPE_B
)
331 DP
|= DP_PIPEB_SELECT
;
333 pll_enabled
= I915_READ(DPLL(pipe
)) & DPLL_VCO_ENABLE
;
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
340 release_cl_override
= IS_CHERRYVIEW(dev
) &&
341 !chv_phy_powergate_ch(dev_priv
, phy
, ch
, true);
343 vlv_force_pll_on(dev
, pipe
, IS_CHERRYVIEW(dev
) ?
344 &chv_dpll
[0].dpll
: &vlv_dpll
[0].dpll
);
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
353 I915_WRITE(intel_dp
->output_reg
, DP
);
354 POSTING_READ(intel_dp
->output_reg
);
356 I915_WRITE(intel_dp
->output_reg
, DP
| DP_PORT_EN
);
357 POSTING_READ(intel_dp
->output_reg
);
359 I915_WRITE(intel_dp
->output_reg
, DP
& ~DP_PORT_EN
);
360 POSTING_READ(intel_dp
->output_reg
);
363 vlv_force_pll_off(dev
, pipe
);
365 if (release_cl_override
)
366 chv_phy_powergate_ch(dev_priv
, phy
, ch
, false);
371 vlv_power_sequencer_pipe(struct intel_dp
*intel_dp
)
373 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
374 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
375 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
376 struct intel_encoder
*encoder
;
377 unsigned int pipes
= (1 << PIPE_A
) | (1 << PIPE_B
);
380 lockdep_assert_held(&dev_priv
->pps_mutex
);
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp
));
385 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
386 return intel_dp
->pps_pipe
;
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
392 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
394 struct intel_dp
*tmp
;
396 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
399 tmp
= enc_to_intel_dp(&encoder
->base
);
401 if (tmp
->pps_pipe
!= INVALID_PIPE
)
402 pipes
&= ~(1 << tmp
->pps_pipe
);
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
409 if (WARN_ON(pipes
== 0))
412 pipe
= ffs(pipes
) - 1;
414 vlv_steal_power_sequencer(dev
, pipe
);
415 intel_dp
->pps_pipe
= pipe
;
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp
->pps_pipe
),
419 port_name(intel_dig_port
->port
));
421 /* init power sequencer on this pipe and port */
422 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
423 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
429 vlv_power_sequencer_kick(intel_dp
);
431 return intel_dp
->pps_pipe
;
434 typedef bool (*vlv_pipe_check
)(struct drm_i915_private
*dev_priv
,
437 static bool vlv_pipe_has_pp_on(struct drm_i915_private
*dev_priv
,
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe
)) & PP_ON
;
443 static bool vlv_pipe_has_vdd_on(struct drm_i915_private
*dev_priv
,
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe
)) & EDP_FORCE_VDD
;
449 static bool vlv_pipe_any(struct drm_i915_private
*dev_priv
,
456 vlv_initial_pps_pipe(struct drm_i915_private
*dev_priv
,
458 vlv_pipe_check pipe_check
)
462 for (pipe
= PIPE_A
; pipe
<= PIPE_B
; pipe
++) {
463 u32 port_sel
= I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe
)) &
464 PANEL_PORT_SELECT_MASK
;
466 if (port_sel
!= PANEL_PORT_SELECT_VLV(port
))
469 if (!pipe_check(dev_priv
, pipe
))
479 vlv_initial_power_sequencer_setup(struct intel_dp
*intel_dp
)
481 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
482 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
483 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
484 enum port port
= intel_dig_port
->port
;
486 lockdep_assert_held(&dev_priv
->pps_mutex
);
488 /* try to find a pipe with this port selected */
489 /* first pick one where the panel is on */
490 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
494 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
495 vlv_pipe_has_vdd_on
);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
498 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp
->pps_pipe
== INVALID_PIPE
) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port
), pipe_name(intel_dp
->pps_pipe
));
511 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
512 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
515 void vlv_power_sequencer_reset(struct drm_i915_private
*dev_priv
)
517 struct drm_device
*dev
= dev_priv
->dev
;
518 struct intel_encoder
*encoder
;
520 if (WARN_ON(!IS_VALLEYVIEW(dev
)))
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
533 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, base
.head
) {
534 struct intel_dp
*intel_dp
;
536 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
539 intel_dp
= enc_to_intel_dp(&encoder
->base
);
540 intel_dp
->pps_pipe
= INVALID_PIPE
;
545 _pp_ctrl_reg(struct intel_dp
*intel_dp
)
547 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
550 return BXT_PP_CONTROL(0);
551 else if (HAS_PCH_SPLIT(dev
))
552 return PCH_PP_CONTROL
;
554 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp
));
558 _pp_stat_reg(struct intel_dp
*intel_dp
)
560 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
563 return BXT_PP_STATUS(0);
564 else if (HAS_PCH_SPLIT(dev
))
565 return PCH_PP_STATUS
;
567 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp
));
570 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
571 This function only applicable when panel PM state is not to be tracked */
572 static int edp_notify_handler(struct notifier_block
*this, unsigned long code
,
575 struct intel_dp
*intel_dp
= container_of(this, typeof(* intel_dp
),
577 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
578 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
580 if (!is_edp(intel_dp
) || code
!= SYS_RESTART
)
585 if (IS_VALLEYVIEW(dev
)) {
586 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
587 i915_reg_t pp_ctrl_reg
, pp_div_reg
;
590 pp_ctrl_reg
= VLV_PIPE_PP_CONTROL(pipe
);
591 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
592 pp_div
= I915_READ(pp_div_reg
);
593 pp_div
&= PP_REFERENCE_DIVIDER_MASK
;
595 /* 0x1F write to PP_DIV_REG sets max cycle delay */
596 I915_WRITE(pp_div_reg
, pp_div
| 0x1F);
597 I915_WRITE(pp_ctrl_reg
, PANEL_UNLOCK_REGS
| PANEL_POWER_OFF
);
598 msleep(intel_dp
->panel_power_cycle_delay
);
601 pps_unlock(intel_dp
);
606 static bool edp_have_panel_power(struct intel_dp
*intel_dp
)
608 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
609 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
611 lockdep_assert_held(&dev_priv
->pps_mutex
);
613 if (IS_VALLEYVIEW(dev
) &&
614 intel_dp
->pps_pipe
== INVALID_PIPE
)
617 return (I915_READ(_pp_stat_reg(intel_dp
)) & PP_ON
) != 0;
620 static bool edp_have_panel_vdd(struct intel_dp
*intel_dp
)
622 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
623 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
625 lockdep_assert_held(&dev_priv
->pps_mutex
);
627 if (IS_VALLEYVIEW(dev
) &&
628 intel_dp
->pps_pipe
== INVALID_PIPE
)
631 return I915_READ(_pp_ctrl_reg(intel_dp
)) & EDP_FORCE_VDD
;
635 intel_dp_check_edp(struct intel_dp
*intel_dp
)
637 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
638 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
640 if (!is_edp(intel_dp
))
643 if (!edp_have_panel_power(intel_dp
) && !edp_have_panel_vdd(intel_dp
)) {
644 WARN(1, "eDP powered off while attempting aux channel communication.\n");
645 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
646 I915_READ(_pp_stat_reg(intel_dp
)),
647 I915_READ(_pp_ctrl_reg(intel_dp
)));
652 intel_dp_aux_wait_done(struct intel_dp
*intel_dp
, bool has_aux_irq
)
654 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
655 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
656 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
657 i915_reg_t ch_ctl
= intel_dp
->aux_ch_ctl_reg
;
661 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
663 done
= wait_event_timeout(dev_priv
->gmbus_wait_queue
, C
,
664 msecs_to_jiffies_timeout(10));
666 done
= wait_for_atomic(C
, 10) == 0;
668 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
675 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
677 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
678 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
681 * The clock divider is based off the hrawclk, and would like to run at
682 * 2MHz. So, take the hrawclk value and divide by 2 and use that
684 return index
? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev
), 2);
687 static uint32_t ilk_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
689 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
690 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
691 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
696 if (intel_dig_port
->port
== PORT_A
) {
697 return DIV_ROUND_CLOSEST(dev_priv
->cdclk_freq
, 2000);
700 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev
), 2);
704 static uint32_t hsw_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
706 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
707 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
708 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
710 if (intel_dig_port
->port
== PORT_A
) {
713 return DIV_ROUND_CLOSEST(dev_priv
->cdclk_freq
, 2000);
714 } else if (HAS_PCH_LPT_H(dev_priv
)) {
715 /* Workaround for non-ULT HSW */
722 return index
? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev
), 2);
726 static uint32_t vlv_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
728 return index
? 0 : 100;
731 static uint32_t skl_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
734 * SKL doesn't need us to program the AUX clock divider (Hardware will
735 * derive the clock from CDCLK automatically). We still implement the
736 * get_aux_clock_divider vfunc to plug-in into the existing code.
738 return index
? 0 : 1;
741 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp
*intel_dp
,
744 uint32_t aux_clock_divider
)
746 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
747 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
748 uint32_t precharge
, timeout
;
755 if (IS_BROADWELL(dev
) && intel_dig_port
->port
== PORT_A
)
756 timeout
= DP_AUX_CH_CTL_TIME_OUT_600us
;
758 timeout
= DP_AUX_CH_CTL_TIME_OUT_400us
;
760 return DP_AUX_CH_CTL_SEND_BUSY
|
762 (has_aux_irq
? DP_AUX_CH_CTL_INTERRUPT
: 0) |
763 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
765 DP_AUX_CH_CTL_RECEIVE_ERROR
|
766 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
767 (precharge
<< DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT
) |
768 (aux_clock_divider
<< DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT
);
771 static uint32_t skl_get_aux_send_ctl(struct intel_dp
*intel_dp
,
776 return DP_AUX_CH_CTL_SEND_BUSY
|
778 (has_aux_irq
? DP_AUX_CH_CTL_INTERRUPT
: 0) |
779 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
780 DP_AUX_CH_CTL_TIME_OUT_1600us
|
781 DP_AUX_CH_CTL_RECEIVE_ERROR
|
782 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
783 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
787 intel_dp_aux_ch(struct intel_dp
*intel_dp
,
788 const uint8_t *send
, int send_bytes
,
789 uint8_t *recv
, int recv_size
)
791 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
792 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
793 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
794 i915_reg_t ch_ctl
= intel_dp
->aux_ch_ctl_reg
;
795 uint32_t aux_clock_divider
;
796 int i
, ret
, recv_bytes
;
799 bool has_aux_irq
= HAS_AUX_IRQ(dev
);
805 * We will be called with VDD already enabled for dpcd/edid/oui reads.
806 * In such cases we want to leave VDD enabled and it's up to upper layers
807 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
810 vdd
= edp_panel_vdd_on(intel_dp
);
812 /* dp aux is extremely sensitive to irq latency, hence request the
813 * lowest possible wakeup latency and so prevent the cpu from going into
816 pm_qos_update_request(&dev_priv
->pm_qos
, 0);
818 intel_dp_check_edp(intel_dp
);
820 /* Try to wait for any previous AUX channel activity */
821 for (try = 0; try < 3; try++) {
822 status
= I915_READ_NOTRACE(ch_ctl
);
823 if ((status
& DP_AUX_CH_CTL_SEND_BUSY
) == 0)
829 static u32 last_status
= -1;
830 const u32 status
= I915_READ(ch_ctl
);
832 if (status
!= last_status
) {
833 WARN(1, "dp_aux_ch not started status 0x%08x\n",
835 last_status
= status
;
842 /* Only 5 data registers! */
843 if (WARN_ON(send_bytes
> 20 || recv_size
> 20)) {
848 while ((aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, clock
++))) {
849 u32 send_ctl
= intel_dp
->get_aux_send_ctl(intel_dp
,
854 /* Must try at least 3 times according to DP spec */
855 for (try = 0; try < 5; try++) {
856 /* Load the send data into the aux channel data registers */
857 for (i
= 0; i
< send_bytes
; i
+= 4)
858 I915_WRITE(intel_dp
->aux_ch_data_reg
[i
>> 2],
859 intel_dp_pack_aux(send
+ i
,
862 /* Send the command and wait for it to complete */
863 I915_WRITE(ch_ctl
, send_ctl
);
865 status
= intel_dp_aux_wait_done(intel_dp
, has_aux_irq
);
867 /* Clear done status and any errors */
871 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
872 DP_AUX_CH_CTL_RECEIVE_ERROR
);
874 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
)
877 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
878 * 400us delay required for errors and timeouts
879 * Timeout errors from the HW already meet this
880 * requirement so skip to next iteration
882 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
883 usleep_range(400, 500);
886 if (status
& DP_AUX_CH_CTL_DONE
)
891 if ((status
& DP_AUX_CH_CTL_DONE
) == 0) {
892 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status
);
898 /* Check for timeout or receive error.
899 * Timeouts occur when the sink is not connected
901 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
902 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status
);
907 /* Timeouts occur when the device isn't connected, so they're
908 * "normal" -- don't fill the kernel log with these */
909 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
) {
910 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status
);
915 /* Unload any bytes sent back from the other side */
916 recv_bytes
= ((status
& DP_AUX_CH_CTL_MESSAGE_SIZE_MASK
) >>
917 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
);
918 if (recv_bytes
> recv_size
)
919 recv_bytes
= recv_size
;
921 for (i
= 0; i
< recv_bytes
; i
+= 4)
922 intel_dp_unpack_aux(I915_READ(intel_dp
->aux_ch_data_reg
[i
>> 2]),
923 recv
+ i
, recv_bytes
- i
);
927 pm_qos_update_request(&dev_priv
->pm_qos
, PM_QOS_DEFAULT_VALUE
);
930 edp_panel_vdd_off(intel_dp
, false);
932 pps_unlock(intel_dp
);
937 #define BARE_ADDRESS_SIZE 3
938 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
940 intel_dp_aux_transfer(struct drm_dp_aux
*aux
, struct drm_dp_aux_msg
*msg
)
942 struct intel_dp
*intel_dp
= container_of(aux
, struct intel_dp
, aux
);
943 uint8_t txbuf
[20], rxbuf
[20];
944 size_t txsize
, rxsize
;
947 txbuf
[0] = (msg
->request
<< 4) |
948 ((msg
->address
>> 16) & 0xf);
949 txbuf
[1] = (msg
->address
>> 8) & 0xff;
950 txbuf
[2] = msg
->address
& 0xff;
951 txbuf
[3] = msg
->size
- 1;
953 switch (msg
->request
& ~DP_AUX_I2C_MOT
) {
954 case DP_AUX_NATIVE_WRITE
:
955 case DP_AUX_I2C_WRITE
:
956 case DP_AUX_I2C_WRITE_STATUS_UPDATE
:
957 txsize
= msg
->size
? HEADER_SIZE
+ msg
->size
: BARE_ADDRESS_SIZE
;
958 rxsize
= 2; /* 0 or 1 data bytes */
960 if (WARN_ON(txsize
> 20))
963 memcpy(txbuf
+ HEADER_SIZE
, msg
->buffer
, msg
->size
);
965 ret
= intel_dp_aux_ch(intel_dp
, txbuf
, txsize
, rxbuf
, rxsize
);
967 msg
->reply
= rxbuf
[0] >> 4;
970 /* Number of bytes written in a short write. */
971 ret
= clamp_t(int, rxbuf
[1], 0, msg
->size
);
973 /* Return payload size. */
979 case DP_AUX_NATIVE_READ
:
980 case DP_AUX_I2C_READ
:
981 txsize
= msg
->size
? HEADER_SIZE
: BARE_ADDRESS_SIZE
;
982 rxsize
= msg
->size
+ 1;
984 if (WARN_ON(rxsize
> 20))
987 ret
= intel_dp_aux_ch(intel_dp
, txbuf
, txsize
, rxbuf
, rxsize
);
989 msg
->reply
= rxbuf
[0] >> 4;
991 * Assume happy day, and copy the data. The caller is
992 * expected to check msg->reply before touching it.
994 * Return payload size.
997 memcpy(msg
->buffer
, rxbuf
+ 1, ret
);
1009 static i915_reg_t
g4x_aux_ctl_reg(struct drm_i915_private
*dev_priv
,
1016 return DP_AUX_CH_CTL(port
);
1019 return DP_AUX_CH_CTL(PORT_B
);
1023 static i915_reg_t
g4x_aux_data_reg(struct drm_i915_private
*dev_priv
,
1024 enum port port
, int index
)
1030 return DP_AUX_CH_DATA(port
, index
);
1033 return DP_AUX_CH_DATA(PORT_B
, index
);
1037 static i915_reg_t
ilk_aux_ctl_reg(struct drm_i915_private
*dev_priv
,
1042 return DP_AUX_CH_CTL(port
);
1046 return PCH_DP_AUX_CH_CTL(port
);
1049 return DP_AUX_CH_CTL(PORT_A
);
1053 static i915_reg_t
ilk_aux_data_reg(struct drm_i915_private
*dev_priv
,
1054 enum port port
, int index
)
1058 return DP_AUX_CH_DATA(port
, index
);
1062 return PCH_DP_AUX_CH_DATA(port
, index
);
1065 return DP_AUX_CH_DATA(PORT_A
, index
);
1070 * On SKL we don't have Aux for port E so we rely
1071 * on VBT to set a proper alternate aux channel.
1073 static enum port
skl_porte_aux_port(struct drm_i915_private
*dev_priv
)
1075 const struct ddi_vbt_port_info
*info
=
1076 &dev_priv
->vbt
.ddi_port_info
[PORT_E
];
1078 switch (info
->alternate_aux_channel
) {
1088 MISSING_CASE(info
->alternate_aux_channel
);
1093 static i915_reg_t
skl_aux_ctl_reg(struct drm_i915_private
*dev_priv
,
1097 port
= skl_porte_aux_port(dev_priv
);
1104 return DP_AUX_CH_CTL(port
);
1107 return DP_AUX_CH_CTL(PORT_A
);
1111 static i915_reg_t
skl_aux_data_reg(struct drm_i915_private
*dev_priv
,
1112 enum port port
, int index
)
1115 port
= skl_porte_aux_port(dev_priv
);
1122 return DP_AUX_CH_DATA(port
, index
);
1125 return DP_AUX_CH_DATA(PORT_A
, index
);
1129 static i915_reg_t
intel_aux_ctl_reg(struct drm_i915_private
*dev_priv
,
1132 if (INTEL_INFO(dev_priv
)->gen
>= 9)
1133 return skl_aux_ctl_reg(dev_priv
, port
);
1134 else if (HAS_PCH_SPLIT(dev_priv
))
1135 return ilk_aux_ctl_reg(dev_priv
, port
);
1137 return g4x_aux_ctl_reg(dev_priv
, port
);
1140 static i915_reg_t
intel_aux_data_reg(struct drm_i915_private
*dev_priv
,
1141 enum port port
, int index
)
1143 if (INTEL_INFO(dev_priv
)->gen
>= 9)
1144 return skl_aux_data_reg(dev_priv
, port
, index
);
1145 else if (HAS_PCH_SPLIT(dev_priv
))
1146 return ilk_aux_data_reg(dev_priv
, port
, index
);
1148 return g4x_aux_data_reg(dev_priv
, port
, index
);
1151 static void intel_aux_reg_init(struct intel_dp
*intel_dp
)
1153 struct drm_i915_private
*dev_priv
= to_i915(intel_dp_to_dev(intel_dp
));
1154 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1157 intel_dp
->aux_ch_ctl_reg
= intel_aux_ctl_reg(dev_priv
, port
);
1158 for (i
= 0; i
< ARRAY_SIZE(intel_dp
->aux_ch_data_reg
); i
++)
1159 intel_dp
->aux_ch_data_reg
[i
] = intel_aux_data_reg(dev_priv
, port
, i
);
1163 intel_dp_aux_fini(struct intel_dp
*intel_dp
)
1165 drm_dp_aux_unregister(&intel_dp
->aux
);
1166 kfree(intel_dp
->aux
.name
);
1170 intel_dp_aux_init(struct intel_dp
*intel_dp
, struct intel_connector
*connector
)
1172 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1173 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1174 enum port port
= intel_dig_port
->port
;
1177 intel_aux_reg_init(intel_dp
);
1179 intel_dp
->aux
.name
= kasprintf(GFP_KERNEL
, "DPDDC-%c", port_name(port
));
1180 if (!intel_dp
->aux
.name
)
1183 intel_dp
->aux
.dev
= dev
->dev
;
1184 intel_dp
->aux
.transfer
= intel_dp_aux_transfer
;
1186 DRM_DEBUG_KMS("registering %s bus for %s\n",
1188 connector
->base
.kdev
->kobj
.name
);
1190 ret
= drm_dp_aux_register(&intel_dp
->aux
);
1192 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1193 intel_dp
->aux
.name
, ret
);
1194 kfree(intel_dp
->aux
.name
);
1198 ret
= sysfs_create_link(&connector
->base
.kdev
->kobj
,
1199 &intel_dp
->aux
.ddc
.dev
.kobj
,
1200 intel_dp
->aux
.ddc
.dev
.kobj
.name
);
1202 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1203 intel_dp
->aux
.name
, ret
);
1204 intel_dp_aux_fini(intel_dp
);
1212 intel_dp_connector_unregister(struct intel_connector
*intel_connector
)
1214 struct intel_dp
*intel_dp
= intel_attached_dp(&intel_connector
->base
);
1216 if (!intel_connector
->mst_port
)
1217 sysfs_remove_link(&intel_connector
->base
.kdev
->kobj
,
1218 intel_dp
->aux
.ddc
.dev
.kobj
.name
);
1219 intel_connector_unregister(intel_connector
);
1223 skl_edp_set_pll_config(struct intel_crtc_state
*pipe_config
)
1227 memset(&pipe_config
->dpll_hw_state
, 0,
1228 sizeof(pipe_config
->dpll_hw_state
));
1230 pipe_config
->ddi_pll_sel
= SKL_DPLL0
;
1231 pipe_config
->dpll_hw_state
.cfgcr1
= 0;
1232 pipe_config
->dpll_hw_state
.cfgcr2
= 0;
1234 ctrl1
= DPLL_CTRL1_OVERRIDE(SKL_DPLL0
);
1235 switch (pipe_config
->port_clock
/ 2) {
1237 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810
,
1241 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350
,
1245 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700
,
1249 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620
,
1252 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1253 results in CDCLK change. Need to handle the change of CDCLK by
1254 disabling pipes and re-enabling them */
1256 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080
,
1260 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160
,
1265 pipe_config
->dpll_hw_state
.ctrl1
= ctrl1
;
1269 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state
*pipe_config
)
1271 memset(&pipe_config
->dpll_hw_state
, 0,
1272 sizeof(pipe_config
->dpll_hw_state
));
1274 switch (pipe_config
->port_clock
/ 2) {
1276 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_810
;
1279 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_1350
;
1282 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_2700
;
1288 intel_dp_sink_rates(struct intel_dp
*intel_dp
, const int **sink_rates
)
1290 if (intel_dp
->num_sink_rates
) {
1291 *sink_rates
= intel_dp
->sink_rates
;
1292 return intel_dp
->num_sink_rates
;
1295 *sink_rates
= default_rates
;
1297 return (intel_dp_max_link_bw(intel_dp
) >> 3) + 1;
1300 bool intel_dp_source_supports_hbr2(struct intel_dp
*intel_dp
)
1302 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1303 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
1305 /* WaDisableHBR2:skl */
1306 if (IS_SKL_REVID(dev
, 0, SKL_REVID_B0
))
1309 if ((IS_HASWELL(dev
) && !IS_HSW_ULX(dev
)) || IS_BROADWELL(dev
) ||
1310 (INTEL_INFO(dev
)->gen
>= 9))
1317 intel_dp_source_rates(struct intel_dp
*intel_dp
, const int **source_rates
)
1319 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1320 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
1323 if (IS_BROXTON(dev
)) {
1324 *source_rates
= bxt_rates
;
1325 size
= ARRAY_SIZE(bxt_rates
);
1326 } else if (IS_SKYLAKE(dev
) || IS_KABYLAKE(dev
)) {
1327 *source_rates
= skl_rates
;
1328 size
= ARRAY_SIZE(skl_rates
);
1330 *source_rates
= default_rates
;
1331 size
= ARRAY_SIZE(default_rates
);
1334 /* This depends on the fact that 5.4 is last value in the array */
1335 if (!intel_dp_source_supports_hbr2(intel_dp
))
1342 intel_dp_set_clock(struct intel_encoder
*encoder
,
1343 struct intel_crtc_state
*pipe_config
)
1345 struct drm_device
*dev
= encoder
->base
.dev
;
1346 const struct dp_link_dpll
*divisor
= NULL
;
1350 divisor
= gen4_dpll
;
1351 count
= ARRAY_SIZE(gen4_dpll
);
1352 } else if (HAS_PCH_SPLIT(dev
)) {
1354 count
= ARRAY_SIZE(pch_dpll
);
1355 } else if (IS_CHERRYVIEW(dev
)) {
1357 count
= ARRAY_SIZE(chv_dpll
);
1358 } else if (IS_VALLEYVIEW(dev
)) {
1360 count
= ARRAY_SIZE(vlv_dpll
);
1363 if (divisor
&& count
) {
1364 for (i
= 0; i
< count
; i
++) {
1365 if (pipe_config
->port_clock
== divisor
[i
].clock
) {
1366 pipe_config
->dpll
= divisor
[i
].dpll
;
1367 pipe_config
->clock_set
= true;
1374 static int intersect_rates(const int *source_rates
, int source_len
,
1375 const int *sink_rates
, int sink_len
,
1378 int i
= 0, j
= 0, k
= 0;
1380 while (i
< source_len
&& j
< sink_len
) {
1381 if (source_rates
[i
] == sink_rates
[j
]) {
1382 if (WARN_ON(k
>= DP_MAX_SUPPORTED_RATES
))
1384 common_rates
[k
] = source_rates
[i
];
1388 } else if (source_rates
[i
] < sink_rates
[j
]) {
1397 static int intel_dp_common_rates(struct intel_dp
*intel_dp
,
1400 const int *source_rates
, *sink_rates
;
1401 int source_len
, sink_len
;
1403 sink_len
= intel_dp_sink_rates(intel_dp
, &sink_rates
);
1404 source_len
= intel_dp_source_rates(intel_dp
, &source_rates
);
1406 return intersect_rates(source_rates
, source_len
,
1407 sink_rates
, sink_len
,
1411 static void snprintf_int_array(char *str
, size_t len
,
1412 const int *array
, int nelem
)
1418 for (i
= 0; i
< nelem
; i
++) {
1419 int r
= snprintf(str
, len
, "%s%d", i
? ", " : "", array
[i
]);
1427 static void intel_dp_print_rates(struct intel_dp
*intel_dp
)
1429 const int *source_rates
, *sink_rates
;
1430 int source_len
, sink_len
, common_len
;
1431 int common_rates
[DP_MAX_SUPPORTED_RATES
];
1432 char str
[128]; /* FIXME: too big for stack? */
1434 if ((drm_debug
& DRM_UT_KMS
) == 0)
1437 source_len
= intel_dp_source_rates(intel_dp
, &source_rates
);
1438 snprintf_int_array(str
, sizeof(str
), source_rates
, source_len
);
1439 DRM_DEBUG_KMS("source rates: %s\n", str
);
1441 sink_len
= intel_dp_sink_rates(intel_dp
, &sink_rates
);
1442 snprintf_int_array(str
, sizeof(str
), sink_rates
, sink_len
);
1443 DRM_DEBUG_KMS("sink rates: %s\n", str
);
1445 common_len
= intel_dp_common_rates(intel_dp
, common_rates
);
1446 snprintf_int_array(str
, sizeof(str
), common_rates
, common_len
);
1447 DRM_DEBUG_KMS("common rates: %s\n", str
);
1450 static int rate_to_index(int find
, const int *rates
)
1454 for (i
= 0; i
< DP_MAX_SUPPORTED_RATES
; ++i
)
1455 if (find
== rates
[i
])
1462 intel_dp_max_link_rate(struct intel_dp
*intel_dp
)
1464 int rates
[DP_MAX_SUPPORTED_RATES
] = {};
1467 len
= intel_dp_common_rates(intel_dp
, rates
);
1468 if (WARN_ON(len
<= 0))
1471 return rates
[rate_to_index(0, rates
) - 1];
1474 int intel_dp_rate_select(struct intel_dp
*intel_dp
, int rate
)
1476 return rate_to_index(rate
, intel_dp
->sink_rates
);
1479 void intel_dp_compute_rate(struct intel_dp
*intel_dp
, int port_clock
,
1480 uint8_t *link_bw
, uint8_t *rate_select
)
1482 if (intel_dp
->num_sink_rates
) {
1485 intel_dp_rate_select(intel_dp
, port_clock
);
1487 *link_bw
= drm_dp_link_rate_to_bw_code(port_clock
);
1493 intel_dp_compute_config(struct intel_encoder
*encoder
,
1494 struct intel_crtc_state
*pipe_config
)
1496 struct drm_device
*dev
= encoder
->base
.dev
;
1497 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1498 struct drm_display_mode
*adjusted_mode
= &pipe_config
->base
.adjusted_mode
;
1499 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
1500 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1501 struct intel_crtc
*intel_crtc
= to_intel_crtc(pipe_config
->base
.crtc
);
1502 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
1503 int lane_count
, clock
;
1504 int min_lane_count
= 1;
1505 int max_lane_count
= intel_dp_max_lane_count(intel_dp
);
1506 /* Conveniently, the link BW constants become indices with a shift...*/
1510 int link_avail
, link_clock
;
1511 int common_rates
[DP_MAX_SUPPORTED_RATES
] = {};
1513 uint8_t link_bw
, rate_select
;
1515 common_len
= intel_dp_common_rates(intel_dp
, common_rates
);
1517 /* No common link rates between source and sink */
1518 WARN_ON(common_len
<= 0);
1520 max_clock
= common_len
- 1;
1522 if (HAS_PCH_SPLIT(dev
) && !HAS_DDI(dev
) && port
!= PORT_A
)
1523 pipe_config
->has_pch_encoder
= true;
1525 pipe_config
->has_dp_encoder
= true;
1526 pipe_config
->has_drrs
= false;
1527 pipe_config
->has_audio
= intel_dp
->has_audio
&& port
!= PORT_A
;
1529 if (is_edp(intel_dp
) && intel_connector
->panel
.fixed_mode
) {
1530 intel_fixed_panel_mode(intel_connector
->panel
.fixed_mode
,
1533 if (INTEL_INFO(dev
)->gen
>= 9) {
1535 ret
= skl_update_scaler_crtc(pipe_config
);
1540 if (HAS_GMCH_DISPLAY(dev
))
1541 intel_gmch_panel_fitting(intel_crtc
, pipe_config
,
1542 intel_connector
->panel
.fitting_mode
);
1544 intel_pch_panel_fitting(intel_crtc
, pipe_config
,
1545 intel_connector
->panel
.fitting_mode
);
1548 if (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
1551 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1552 "max bw %d pixel clock %iKHz\n",
1553 max_lane_count
, common_rates
[max_clock
],
1554 adjusted_mode
->crtc_clock
);
1556 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1557 * bpc in between. */
1558 bpp
= pipe_config
->pipe_bpp
;
1559 if (is_edp(intel_dp
)) {
1561 /* Get bpp from vbt only for panels that dont have bpp in edid */
1562 if (intel_connector
->base
.display_info
.bpc
== 0 &&
1563 (dev_priv
->vbt
.edp_bpp
&& dev_priv
->vbt
.edp_bpp
< bpp
)) {
1564 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1565 dev_priv
->vbt
.edp_bpp
);
1566 bpp
= dev_priv
->vbt
.edp_bpp
;
1570 * Use the maximum clock and number of lanes the eDP panel
1571 * advertizes being capable of. The panels are generally
1572 * designed to support only a single clock and lane
1573 * configuration, and typically these values correspond to the
1574 * native resolution of the panel.
1576 min_lane_count
= max_lane_count
;
1577 min_clock
= max_clock
;
1580 for (; bpp
>= 6*3; bpp
-= 2*3) {
1581 mode_rate
= intel_dp_link_required(adjusted_mode
->crtc_clock
,
1584 for (clock
= min_clock
; clock
<= max_clock
; clock
++) {
1585 for (lane_count
= min_lane_count
;
1586 lane_count
<= max_lane_count
;
1589 link_clock
= common_rates
[clock
];
1590 link_avail
= intel_dp_max_data_rate(link_clock
,
1593 if (mode_rate
<= link_avail
) {
1603 if (intel_dp
->color_range_auto
) {
1606 * CEA-861-E - 5.1 Default Encoding Parameters
1607 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1609 pipe_config
->limited_color_range
=
1610 bpp
!= 18 && drm_match_cea_mode(adjusted_mode
) > 1;
1612 pipe_config
->limited_color_range
=
1613 intel_dp
->limited_color_range
;
1616 pipe_config
->lane_count
= lane_count
;
1618 pipe_config
->pipe_bpp
= bpp
;
1619 pipe_config
->port_clock
= common_rates
[clock
];
1621 intel_dp_compute_rate(intel_dp
, pipe_config
->port_clock
,
1622 &link_bw
, &rate_select
);
1624 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1625 link_bw
, rate_select
, pipe_config
->lane_count
,
1626 pipe_config
->port_clock
, bpp
);
1627 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1628 mode_rate
, link_avail
);
1630 intel_link_compute_m_n(bpp
, lane_count
,
1631 adjusted_mode
->crtc_clock
,
1632 pipe_config
->port_clock
,
1633 &pipe_config
->dp_m_n
);
1635 if (intel_connector
->panel
.downclock_mode
!= NULL
&&
1636 dev_priv
->drrs
.type
== SEAMLESS_DRRS_SUPPORT
) {
1637 pipe_config
->has_drrs
= true;
1638 intel_link_compute_m_n(bpp
, lane_count
,
1639 intel_connector
->panel
.downclock_mode
->clock
,
1640 pipe_config
->port_clock
,
1641 &pipe_config
->dp_m2_n2
);
1644 if ((IS_SKYLAKE(dev
) || IS_KABYLAKE(dev
)) && is_edp(intel_dp
))
1645 skl_edp_set_pll_config(pipe_config
);
1646 else if (IS_BROXTON(dev
))
1647 /* handled in ddi */;
1648 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1649 hsw_dp_set_ddi_pll_sel(pipe_config
);
1651 intel_dp_set_clock(encoder
, pipe_config
);
1656 void intel_dp_set_link_params(struct intel_dp
*intel_dp
,
1657 const struct intel_crtc_state
*pipe_config
)
1659 intel_dp
->link_rate
= pipe_config
->port_clock
;
1660 intel_dp
->lane_count
= pipe_config
->lane_count
;
1663 static void intel_dp_prepare(struct intel_encoder
*encoder
)
1665 struct drm_device
*dev
= encoder
->base
.dev
;
1666 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1667 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
1668 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1669 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
1670 const struct drm_display_mode
*adjusted_mode
= &crtc
->config
->base
.adjusted_mode
;
1672 intel_dp_set_link_params(intel_dp
, crtc
->config
);
1675 * There are four kinds of DP registers:
1682 * IBX PCH and CPU are the same for almost everything,
1683 * except that the CPU DP PLL is configured in this
1686 * CPT PCH is quite different, having many bits moved
1687 * to the TRANS_DP_CTL register instead. That
1688 * configuration happens (oddly) in ironlake_pch_enable
1691 /* Preserve the BIOS-computed detected bit. This is
1692 * supposed to be read-only.
1694 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
) & DP_DETECTED
;
1696 /* Handle DP bits in common between all three register formats */
1697 intel_dp
->DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
1698 intel_dp
->DP
|= DP_PORT_WIDTH(crtc
->config
->lane_count
);
1700 /* Split out the IBX/CPU vs CPT settings */
1702 if (IS_GEN7(dev
) && port
== PORT_A
) {
1703 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
1704 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
1705 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
1706 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
1707 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
1709 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1710 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
1712 intel_dp
->DP
|= crtc
->pipe
<< 29;
1713 } else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
1716 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
1718 trans_dp
= I915_READ(TRANS_DP_CTL(crtc
->pipe
));
1719 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1720 trans_dp
|= TRANS_DP_ENH_FRAMING
;
1722 trans_dp
&= ~TRANS_DP_ENH_FRAMING
;
1723 I915_WRITE(TRANS_DP_CTL(crtc
->pipe
), trans_dp
);
1725 if (!HAS_PCH_SPLIT(dev
) && !IS_VALLEYVIEW(dev
) &&
1726 crtc
->config
->limited_color_range
)
1727 intel_dp
->DP
|= DP_COLOR_RANGE_16_235
;
1729 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
1730 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
1731 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
1732 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
1733 intel_dp
->DP
|= DP_LINK_TRAIN_OFF
;
1735 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1736 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
1738 if (IS_CHERRYVIEW(dev
))
1739 intel_dp
->DP
|= DP_PIPE_SELECT_CHV(crtc
->pipe
);
1740 else if (crtc
->pipe
== PIPE_B
)
1741 intel_dp
->DP
|= DP_PIPEB_SELECT
;
1745 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1746 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1748 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1749 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1751 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1752 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1754 static void wait_panel_status(struct intel_dp
*intel_dp
,
1758 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1759 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1760 i915_reg_t pp_stat_reg
, pp_ctrl_reg
;
1762 lockdep_assert_held(&dev_priv
->pps_mutex
);
1764 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1765 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1767 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1769 I915_READ(pp_stat_reg
),
1770 I915_READ(pp_ctrl_reg
));
1772 if (_wait_for((I915_READ(pp_stat_reg
) & mask
) == value
, 5000, 10)) {
1773 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1774 I915_READ(pp_stat_reg
),
1775 I915_READ(pp_ctrl_reg
));
1778 DRM_DEBUG_KMS("Wait complete\n");
1781 static void wait_panel_on(struct intel_dp
*intel_dp
)
1783 DRM_DEBUG_KMS("Wait for panel power on\n");
1784 wait_panel_status(intel_dp
, IDLE_ON_MASK
, IDLE_ON_VALUE
);
1787 static void wait_panel_off(struct intel_dp
*intel_dp
)
1789 DRM_DEBUG_KMS("Wait for panel power off time\n");
1790 wait_panel_status(intel_dp
, IDLE_OFF_MASK
, IDLE_OFF_VALUE
);
1793 static void wait_panel_power_cycle(struct intel_dp
*intel_dp
)
1795 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1797 /* When we disable the VDD override bit last we have to do the manual
1799 wait_remaining_ms_from_jiffies(intel_dp
->last_power_cycle
,
1800 intel_dp
->panel_power_cycle_delay
);
1802 wait_panel_status(intel_dp
, IDLE_CYCLE_MASK
, IDLE_CYCLE_VALUE
);
1805 static void wait_backlight_on(struct intel_dp
*intel_dp
)
1807 wait_remaining_ms_from_jiffies(intel_dp
->last_power_on
,
1808 intel_dp
->backlight_on_delay
);
1811 static void edp_wait_backlight_off(struct intel_dp
*intel_dp
)
1813 wait_remaining_ms_from_jiffies(intel_dp
->last_backlight_off
,
1814 intel_dp
->backlight_off_delay
);
1817 /* Read the current pp_control value, unlocking the register if it
1821 static u32
ironlake_get_pp_control(struct intel_dp
*intel_dp
)
1823 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1824 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1827 lockdep_assert_held(&dev_priv
->pps_mutex
);
1829 control
= I915_READ(_pp_ctrl_reg(intel_dp
));
1830 if (!IS_BROXTON(dev
)) {
1831 control
&= ~PANEL_UNLOCK_MASK
;
1832 control
|= PANEL_UNLOCK_REGS
;
1838 * Must be paired with edp_panel_vdd_off().
1839 * Must hold pps_mutex around the whole on/off sequence.
1840 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1842 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
)
1844 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1845 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1846 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
1847 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1848 enum intel_display_power_domain power_domain
;
1850 i915_reg_t pp_stat_reg
, pp_ctrl_reg
;
1851 bool need_to_disable
= !intel_dp
->want_panel_vdd
;
1853 lockdep_assert_held(&dev_priv
->pps_mutex
);
1855 if (!is_edp(intel_dp
))
1858 cancel_delayed_work(&intel_dp
->panel_vdd_work
);
1859 intel_dp
->want_panel_vdd
= true;
1861 if (edp_have_panel_vdd(intel_dp
))
1862 return need_to_disable
;
1864 power_domain
= intel_display_port_aux_power_domain(intel_encoder
);
1865 intel_display_power_get(dev_priv
, power_domain
);
1867 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1868 port_name(intel_dig_port
->port
));
1870 if (!edp_have_panel_power(intel_dp
))
1871 wait_panel_power_cycle(intel_dp
);
1873 pp
= ironlake_get_pp_control(intel_dp
);
1874 pp
|= EDP_FORCE_VDD
;
1876 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1877 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1879 I915_WRITE(pp_ctrl_reg
, pp
);
1880 POSTING_READ(pp_ctrl_reg
);
1881 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1882 I915_READ(pp_stat_reg
), I915_READ(pp_ctrl_reg
));
1884 * If the panel wasn't on, delay before accessing aux channel
1886 if (!edp_have_panel_power(intel_dp
)) {
1887 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1888 port_name(intel_dig_port
->port
));
1889 msleep(intel_dp
->panel_power_up_delay
);
1892 return need_to_disable
;
1896 * Must be paired with intel_edp_panel_vdd_off() or
1897 * intel_edp_panel_off().
1898 * Nested calls to these functions are not allowed since
1899 * we drop the lock. Caller must use some higher level
1900 * locking to prevent nested calls from other threads.
1902 void intel_edp_panel_vdd_on(struct intel_dp
*intel_dp
)
1906 if (!is_edp(intel_dp
))
1910 vdd
= edp_panel_vdd_on(intel_dp
);
1911 pps_unlock(intel_dp
);
1913 I915_STATE_WARN(!vdd
, "eDP port %c VDD already requested on\n",
1914 port_name(dp_to_dig_port(intel_dp
)->port
));
1917 static void edp_panel_vdd_off_sync(struct intel_dp
*intel_dp
)
1919 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1920 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1921 struct intel_digital_port
*intel_dig_port
=
1922 dp_to_dig_port(intel_dp
);
1923 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
1924 enum intel_display_power_domain power_domain
;
1926 i915_reg_t pp_stat_reg
, pp_ctrl_reg
;
1928 lockdep_assert_held(&dev_priv
->pps_mutex
);
1930 WARN_ON(intel_dp
->want_panel_vdd
);
1932 if (!edp_have_panel_vdd(intel_dp
))
1935 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1936 port_name(intel_dig_port
->port
));
1938 pp
= ironlake_get_pp_control(intel_dp
);
1939 pp
&= ~EDP_FORCE_VDD
;
1941 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1942 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1944 I915_WRITE(pp_ctrl_reg
, pp
);
1945 POSTING_READ(pp_ctrl_reg
);
1947 /* Make sure sequencer is idle before allowing subsequent activity */
1948 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1949 I915_READ(pp_stat_reg
), I915_READ(pp_ctrl_reg
));
1951 if ((pp
& POWER_TARGET_ON
) == 0)
1952 intel_dp
->last_power_cycle
= jiffies
;
1954 power_domain
= intel_display_port_aux_power_domain(intel_encoder
);
1955 intel_display_power_put(dev_priv
, power_domain
);
1958 static void edp_panel_vdd_work(struct work_struct
*__work
)
1960 struct intel_dp
*intel_dp
= container_of(to_delayed_work(__work
),
1961 struct intel_dp
, panel_vdd_work
);
1964 if (!intel_dp
->want_panel_vdd
)
1965 edp_panel_vdd_off_sync(intel_dp
);
1966 pps_unlock(intel_dp
);
1969 static void edp_panel_vdd_schedule_off(struct intel_dp
*intel_dp
)
1971 unsigned long delay
;
1974 * Queue the timer to fire a long time from now (relative to the power
1975 * down delay) to keep the panel power up across a sequence of
1978 delay
= msecs_to_jiffies(intel_dp
->panel_power_cycle_delay
* 5);
1979 schedule_delayed_work(&intel_dp
->panel_vdd_work
, delay
);
1983 * Must be paired with edp_panel_vdd_on().
1984 * Must hold pps_mutex around the whole on/off sequence.
1985 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1987 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
)
1989 struct drm_i915_private
*dev_priv
=
1990 intel_dp_to_dev(intel_dp
)->dev_private
;
1992 lockdep_assert_held(&dev_priv
->pps_mutex
);
1994 if (!is_edp(intel_dp
))
1997 I915_STATE_WARN(!intel_dp
->want_panel_vdd
, "eDP port %c VDD not forced on",
1998 port_name(dp_to_dig_port(intel_dp
)->port
));
2000 intel_dp
->want_panel_vdd
= false;
2003 edp_panel_vdd_off_sync(intel_dp
);
2005 edp_panel_vdd_schedule_off(intel_dp
);
2008 static void edp_panel_on(struct intel_dp
*intel_dp
)
2010 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2011 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2013 i915_reg_t pp_ctrl_reg
;
2015 lockdep_assert_held(&dev_priv
->pps_mutex
);
2017 if (!is_edp(intel_dp
))
2020 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2021 port_name(dp_to_dig_port(intel_dp
)->port
));
2023 if (WARN(edp_have_panel_power(intel_dp
),
2024 "eDP port %c panel power already on\n",
2025 port_name(dp_to_dig_port(intel_dp
)->port
)))
2028 wait_panel_power_cycle(intel_dp
);
2030 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2031 pp
= ironlake_get_pp_control(intel_dp
);
2033 /* ILK workaround: disable reset around power sequence */
2034 pp
&= ~PANEL_POWER_RESET
;
2035 I915_WRITE(pp_ctrl_reg
, pp
);
2036 POSTING_READ(pp_ctrl_reg
);
2039 pp
|= POWER_TARGET_ON
;
2041 pp
|= PANEL_POWER_RESET
;
2043 I915_WRITE(pp_ctrl_reg
, pp
);
2044 POSTING_READ(pp_ctrl_reg
);
2046 wait_panel_on(intel_dp
);
2047 intel_dp
->last_power_on
= jiffies
;
2050 pp
|= PANEL_POWER_RESET
; /* restore panel reset bit */
2051 I915_WRITE(pp_ctrl_reg
, pp
);
2052 POSTING_READ(pp_ctrl_reg
);
2056 void intel_edp_panel_on(struct intel_dp
*intel_dp
)
2058 if (!is_edp(intel_dp
))
2062 edp_panel_on(intel_dp
);
2063 pps_unlock(intel_dp
);
2067 static void edp_panel_off(struct intel_dp
*intel_dp
)
2069 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2070 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
2071 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2072 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2073 enum intel_display_power_domain power_domain
;
2075 i915_reg_t pp_ctrl_reg
;
2077 lockdep_assert_held(&dev_priv
->pps_mutex
);
2079 if (!is_edp(intel_dp
))
2082 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2083 port_name(dp_to_dig_port(intel_dp
)->port
));
2085 WARN(!intel_dp
->want_panel_vdd
, "Need eDP port %c VDD to turn off panel\n",
2086 port_name(dp_to_dig_port(intel_dp
)->port
));
2088 pp
= ironlake_get_pp_control(intel_dp
);
2089 /* We need to switch off panel power _and_ force vdd, for otherwise some
2090 * panels get very unhappy and cease to work. */
2091 pp
&= ~(POWER_TARGET_ON
| PANEL_POWER_RESET
| EDP_FORCE_VDD
|
2094 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2096 intel_dp
->want_panel_vdd
= false;
2098 I915_WRITE(pp_ctrl_reg
, pp
);
2099 POSTING_READ(pp_ctrl_reg
);
2101 intel_dp
->last_power_cycle
= jiffies
;
2102 wait_panel_off(intel_dp
);
2104 /* We got a reference when we enabled the VDD. */
2105 power_domain
= intel_display_port_aux_power_domain(intel_encoder
);
2106 intel_display_power_put(dev_priv
, power_domain
);
2109 void intel_edp_panel_off(struct intel_dp
*intel_dp
)
2111 if (!is_edp(intel_dp
))
2115 edp_panel_off(intel_dp
);
2116 pps_unlock(intel_dp
);
2119 /* Enable backlight in the panel power control. */
2120 static void _intel_edp_backlight_on(struct intel_dp
*intel_dp
)
2122 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2123 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
2124 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2126 i915_reg_t pp_ctrl_reg
;
2129 * If we enable the backlight right away following a panel power
2130 * on, we may see slight flicker as the panel syncs with the eDP
2131 * link. So delay a bit to make sure the image is solid before
2132 * allowing it to appear.
2134 wait_backlight_on(intel_dp
);
2138 pp
= ironlake_get_pp_control(intel_dp
);
2139 pp
|= EDP_BLC_ENABLE
;
2141 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2143 I915_WRITE(pp_ctrl_reg
, pp
);
2144 POSTING_READ(pp_ctrl_reg
);
2146 pps_unlock(intel_dp
);
2149 /* Enable backlight PWM and backlight PP control. */
2150 void intel_edp_backlight_on(struct intel_dp
*intel_dp
)
2152 if (!is_edp(intel_dp
))
2155 DRM_DEBUG_KMS("\n");
2157 intel_panel_enable_backlight(intel_dp
->attached_connector
);
2158 _intel_edp_backlight_on(intel_dp
);
2161 /* Disable backlight in the panel power control. */
2162 static void _intel_edp_backlight_off(struct intel_dp
*intel_dp
)
2164 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2165 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2167 i915_reg_t pp_ctrl_reg
;
2169 if (!is_edp(intel_dp
))
2174 pp
= ironlake_get_pp_control(intel_dp
);
2175 pp
&= ~EDP_BLC_ENABLE
;
2177 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2179 I915_WRITE(pp_ctrl_reg
, pp
);
2180 POSTING_READ(pp_ctrl_reg
);
2182 pps_unlock(intel_dp
);
2184 intel_dp
->last_backlight_off
= jiffies
;
2185 edp_wait_backlight_off(intel_dp
);
2188 /* Disable backlight PP control and backlight PWM. */
2189 void intel_edp_backlight_off(struct intel_dp
*intel_dp
)
2191 if (!is_edp(intel_dp
))
2194 DRM_DEBUG_KMS("\n");
2196 _intel_edp_backlight_off(intel_dp
);
2197 intel_panel_disable_backlight(intel_dp
->attached_connector
);
2201 * Hook for controlling the panel power control backlight through the bl_power
2202 * sysfs attribute. Take care to handle multiple calls.
2204 static void intel_edp_backlight_power(struct intel_connector
*connector
,
2207 struct intel_dp
*intel_dp
= intel_attached_dp(&connector
->base
);
2211 is_enabled
= ironlake_get_pp_control(intel_dp
) & EDP_BLC_ENABLE
;
2212 pps_unlock(intel_dp
);
2214 if (is_enabled
== enable
)
2217 DRM_DEBUG_KMS("panel power control backlight %s\n",
2218 enable
? "enable" : "disable");
2221 _intel_edp_backlight_on(intel_dp
);
2223 _intel_edp_backlight_off(intel_dp
);
2226 static const char *state_string(bool enabled
)
2228 return enabled
? "on" : "off";
2231 static void assert_dp_port(struct intel_dp
*intel_dp
, bool state
)
2233 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
2234 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
2235 bool cur_state
= I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
;
2237 I915_STATE_WARN(cur_state
!= state
,
2238 "DP port %c state assertion failure (expected %s, current %s)\n",
2239 port_name(dig_port
->port
),
2240 state_string(state
), state_string(cur_state
));
2242 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2244 static void assert_edp_pll(struct drm_i915_private
*dev_priv
, bool state
)
2246 bool cur_state
= I915_READ(DP_A
) & DP_PLL_ENABLE
;
2248 I915_STATE_WARN(cur_state
!= state
,
2249 "eDP PLL state assertion failure (expected %s, current %s)\n",
2250 state_string(state
), state_string(cur_state
));
2252 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2253 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2255 static void ironlake_edp_pll_on(struct intel_dp
*intel_dp
)
2257 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2258 struct intel_crtc
*crtc
= to_intel_crtc(intel_dig_port
->base
.base
.crtc
);
2259 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
2261 assert_pipe_disabled(dev_priv
, crtc
->pipe
);
2262 assert_dp_port_disabled(intel_dp
);
2263 assert_edp_pll_disabled(dev_priv
);
2265 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2266 crtc
->config
->port_clock
);
2268 intel_dp
->DP
&= ~DP_PLL_FREQ_MASK
;
2270 if (crtc
->config
->port_clock
== 162000)
2271 intel_dp
->DP
|= DP_PLL_FREQ_162MHZ
;
2273 intel_dp
->DP
|= DP_PLL_FREQ_270MHZ
;
2275 I915_WRITE(DP_A
, intel_dp
->DP
);
2279 intel_dp
->DP
|= DP_PLL_ENABLE
;
2281 I915_WRITE(DP_A
, intel_dp
->DP
);
2286 static void ironlake_edp_pll_off(struct intel_dp
*intel_dp
)
2288 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2289 struct intel_crtc
*crtc
= to_intel_crtc(intel_dig_port
->base
.base
.crtc
);
2290 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
2292 assert_pipe_disabled(dev_priv
, crtc
->pipe
);
2293 assert_dp_port_disabled(intel_dp
);
2294 assert_edp_pll_enabled(dev_priv
);
2296 DRM_DEBUG_KMS("disabling eDP PLL\n");
2298 intel_dp
->DP
&= ~DP_PLL_ENABLE
;
2300 I915_WRITE(DP_A
, intel_dp
->DP
);
2305 /* If the sink supports it, try to set the power state appropriately */
2306 void intel_dp_sink_dpms(struct intel_dp
*intel_dp
, int mode
)
2310 /* Should have a valid DPCD by this point */
2311 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x11)
2314 if (mode
!= DRM_MODE_DPMS_ON
) {
2315 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
2319 * When turning on, we need to retry for 1ms to give the sink
2322 for (i
= 0; i
< 3; i
++) {
2323 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
2332 DRM_DEBUG_KMS("failed to %s sink power state\n",
2333 mode
== DRM_MODE_DPMS_ON
? "enable" : "disable");
2336 static bool intel_dp_get_hw_state(struct intel_encoder
*encoder
,
2339 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2340 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2341 struct drm_device
*dev
= encoder
->base
.dev
;
2342 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2343 enum intel_display_power_domain power_domain
;
2346 power_domain
= intel_display_port_power_domain(encoder
);
2347 if (!intel_display_power_is_enabled(dev_priv
, power_domain
))
2350 tmp
= I915_READ(intel_dp
->output_reg
);
2352 if (!(tmp
& DP_PORT_EN
))
2355 if (IS_GEN7(dev
) && port
== PORT_A
) {
2356 *pipe
= PORT_TO_PIPE_CPT(tmp
);
2357 } else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
2360 for_each_pipe(dev_priv
, p
) {
2361 u32 trans_dp
= I915_READ(TRANS_DP_CTL(p
));
2362 if (TRANS_DP_PIPE_TO_PORT(trans_dp
) == port
) {
2368 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2369 i915_mmio_reg_offset(intel_dp
->output_reg
));
2370 } else if (IS_CHERRYVIEW(dev
)) {
2371 *pipe
= DP_PORT_TO_PIPE_CHV(tmp
);
2373 *pipe
= PORT_TO_PIPE(tmp
);
2379 static void intel_dp_get_config(struct intel_encoder
*encoder
,
2380 struct intel_crtc_state
*pipe_config
)
2382 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2384 struct drm_device
*dev
= encoder
->base
.dev
;
2385 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2386 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2387 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2390 tmp
= I915_READ(intel_dp
->output_reg
);
2392 pipe_config
->has_audio
= tmp
& DP_AUDIO_OUTPUT_ENABLE
&& port
!= PORT_A
;
2394 if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
2395 u32 trans_dp
= I915_READ(TRANS_DP_CTL(crtc
->pipe
));
2397 if (trans_dp
& TRANS_DP_HSYNC_ACTIVE_HIGH
)
2398 flags
|= DRM_MODE_FLAG_PHSYNC
;
2400 flags
|= DRM_MODE_FLAG_NHSYNC
;
2402 if (trans_dp
& TRANS_DP_VSYNC_ACTIVE_HIGH
)
2403 flags
|= DRM_MODE_FLAG_PVSYNC
;
2405 flags
|= DRM_MODE_FLAG_NVSYNC
;
2407 if (tmp
& DP_SYNC_HS_HIGH
)
2408 flags
|= DRM_MODE_FLAG_PHSYNC
;
2410 flags
|= DRM_MODE_FLAG_NHSYNC
;
2412 if (tmp
& DP_SYNC_VS_HIGH
)
2413 flags
|= DRM_MODE_FLAG_PVSYNC
;
2415 flags
|= DRM_MODE_FLAG_NVSYNC
;
2418 pipe_config
->base
.adjusted_mode
.flags
|= flags
;
2420 if (!HAS_PCH_SPLIT(dev
) && !IS_VALLEYVIEW(dev
) &&
2421 tmp
& DP_COLOR_RANGE_16_235
)
2422 pipe_config
->limited_color_range
= true;
2424 pipe_config
->has_dp_encoder
= true;
2426 pipe_config
->lane_count
=
2427 ((tmp
& DP_PORT_WIDTH_MASK
) >> DP_PORT_WIDTH_SHIFT
) + 1;
2429 intel_dp_get_m_n(crtc
, pipe_config
);
2431 if (port
== PORT_A
) {
2432 if ((I915_READ(DP_A
) & DP_PLL_FREQ_MASK
) == DP_PLL_FREQ_162MHZ
)
2433 pipe_config
->port_clock
= 162000;
2435 pipe_config
->port_clock
= 270000;
2438 dotclock
= intel_dotclock_calculate(pipe_config
->port_clock
,
2439 &pipe_config
->dp_m_n
);
2441 if (HAS_PCH_SPLIT(dev_priv
->dev
) && port
!= PORT_A
)
2442 ironlake_check_encoder_dotclock(pipe_config
, dotclock
);
2444 pipe_config
->base
.adjusted_mode
.crtc_clock
= dotclock
;
2446 if (is_edp(intel_dp
) && dev_priv
->vbt
.edp_bpp
&&
2447 pipe_config
->pipe_bpp
> dev_priv
->vbt
.edp_bpp
) {
2449 * This is a big fat ugly hack.
2451 * Some machines in UEFI boot mode provide us a VBT that has 18
2452 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2453 * unknown we fail to light up. Yet the same BIOS boots up with
2454 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2455 * max, not what it tells us to use.
2457 * Note: This will still be broken if the eDP panel is not lit
2458 * up by the BIOS, and thus we can't get the mode at module
2461 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2462 pipe_config
->pipe_bpp
, dev_priv
->vbt
.edp_bpp
);
2463 dev_priv
->vbt
.edp_bpp
= pipe_config
->pipe_bpp
;
2467 static void intel_disable_dp(struct intel_encoder
*encoder
)
2469 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2470 struct drm_device
*dev
= encoder
->base
.dev
;
2471 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2473 if (crtc
->config
->has_audio
)
2474 intel_audio_codec_disable(encoder
);
2476 if (HAS_PSR(dev
) && !HAS_DDI(dev
))
2477 intel_psr_disable(intel_dp
);
2479 /* Make sure the panel is off before trying to change the mode. But also
2480 * ensure that we have vdd while we switch off the panel. */
2481 intel_edp_panel_vdd_on(intel_dp
);
2482 intel_edp_backlight_off(intel_dp
);
2483 intel_dp_sink_dpms(intel_dp
, DRM_MODE_DPMS_OFF
);
2484 intel_edp_panel_off(intel_dp
);
2486 /* disable the port before the pipe on g4x */
2487 if (INTEL_INFO(dev
)->gen
< 5)
2488 intel_dp_link_down(intel_dp
);
2491 static void ilk_post_disable_dp(struct intel_encoder
*encoder
)
2493 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2494 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2496 intel_dp_link_down(intel_dp
);
2498 /* Only ilk+ has port A */
2500 ironlake_edp_pll_off(intel_dp
);
2503 static void vlv_post_disable_dp(struct intel_encoder
*encoder
)
2505 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2507 intel_dp_link_down(intel_dp
);
2510 static void chv_data_lane_soft_reset(struct intel_encoder
*encoder
,
2513 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
2514 enum dpio_channel ch
= vlv_dport_to_channel(enc_to_dig_port(&encoder
->base
));
2515 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2516 enum pipe pipe
= crtc
->pipe
;
2519 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW0(ch
));
2521 val
&= ~(DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2523 val
|= DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
;
2524 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW0(ch
), val
);
2526 if (crtc
->config
->lane_count
> 2) {
2527 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW0(ch
));
2529 val
&= ~(DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2531 val
|= DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
;
2532 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW0(ch
), val
);
2535 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW1(ch
));
2536 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2538 val
&= ~DPIO_PCS_CLK_SOFT_RESET
;
2540 val
|= DPIO_PCS_CLK_SOFT_RESET
;
2541 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW1(ch
), val
);
2543 if (crtc
->config
->lane_count
> 2) {
2544 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW1(ch
));
2545 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2547 val
&= ~DPIO_PCS_CLK_SOFT_RESET
;
2549 val
|= DPIO_PCS_CLK_SOFT_RESET
;
2550 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW1(ch
), val
);
2554 static void chv_post_disable_dp(struct intel_encoder
*encoder
)
2556 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2557 struct drm_device
*dev
= encoder
->base
.dev
;
2558 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2560 intel_dp_link_down(intel_dp
);
2562 mutex_lock(&dev_priv
->sb_lock
);
2564 /* Assert data lane reset */
2565 chv_data_lane_soft_reset(encoder
, true);
2567 mutex_unlock(&dev_priv
->sb_lock
);
2571 _intel_dp_set_link_train(struct intel_dp
*intel_dp
,
2573 uint8_t dp_train_pat
)
2575 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2576 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
2577 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2578 enum port port
= intel_dig_port
->port
;
2581 uint32_t temp
= I915_READ(DP_TP_CTL(port
));
2583 if (dp_train_pat
& DP_LINK_SCRAMBLING_DISABLE
)
2584 temp
|= DP_TP_CTL_SCRAMBLE_DISABLE
;
2586 temp
&= ~DP_TP_CTL_SCRAMBLE_DISABLE
;
2588 temp
&= ~DP_TP_CTL_LINK_TRAIN_MASK
;
2589 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2590 case DP_TRAINING_PATTERN_DISABLE
:
2591 temp
|= DP_TP_CTL_LINK_TRAIN_NORMAL
;
2594 case DP_TRAINING_PATTERN_1
:
2595 temp
|= DP_TP_CTL_LINK_TRAIN_PAT1
;
2597 case DP_TRAINING_PATTERN_2
:
2598 temp
|= DP_TP_CTL_LINK_TRAIN_PAT2
;
2600 case DP_TRAINING_PATTERN_3
:
2601 temp
|= DP_TP_CTL_LINK_TRAIN_PAT3
;
2604 I915_WRITE(DP_TP_CTL(port
), temp
);
2606 } else if ((IS_GEN7(dev
) && port
== PORT_A
) ||
2607 (HAS_PCH_CPT(dev
) && port
!= PORT_A
)) {
2608 *DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
2610 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2611 case DP_TRAINING_PATTERN_DISABLE
:
2612 *DP
|= DP_LINK_TRAIN_OFF_CPT
;
2614 case DP_TRAINING_PATTERN_1
:
2615 *DP
|= DP_LINK_TRAIN_PAT_1_CPT
;
2617 case DP_TRAINING_PATTERN_2
:
2618 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
2620 case DP_TRAINING_PATTERN_3
:
2621 DRM_ERROR("DP training pattern 3 not supported\n");
2622 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
2627 if (IS_CHERRYVIEW(dev
))
2628 *DP
&= ~DP_LINK_TRAIN_MASK_CHV
;
2630 *DP
&= ~DP_LINK_TRAIN_MASK
;
2632 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2633 case DP_TRAINING_PATTERN_DISABLE
:
2634 *DP
|= DP_LINK_TRAIN_OFF
;
2636 case DP_TRAINING_PATTERN_1
:
2637 *DP
|= DP_LINK_TRAIN_PAT_1
;
2639 case DP_TRAINING_PATTERN_2
:
2640 *DP
|= DP_LINK_TRAIN_PAT_2
;
2642 case DP_TRAINING_PATTERN_3
:
2643 if (IS_CHERRYVIEW(dev
)) {
2644 *DP
|= DP_LINK_TRAIN_PAT_3_CHV
;
2646 DRM_ERROR("DP training pattern 3 not supported\n");
2647 *DP
|= DP_LINK_TRAIN_PAT_2
;
2654 static void intel_dp_enable_port(struct intel_dp
*intel_dp
)
2656 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2657 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2658 struct intel_crtc
*crtc
=
2659 to_intel_crtc(dp_to_dig_port(intel_dp
)->base
.base
.crtc
);
2661 /* enable with pattern 1 (as per spec) */
2662 _intel_dp_set_link_train(intel_dp
, &intel_dp
->DP
,
2663 DP_TRAINING_PATTERN_1
);
2665 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
2666 POSTING_READ(intel_dp
->output_reg
);
2669 * Magic for VLV/CHV. We _must_ first set up the register
2670 * without actually enabling the port, and then do another
2671 * write to enable the port. Otherwise link training will
2672 * fail when the power sequencer is freshly used for this port.
2674 intel_dp
->DP
|= DP_PORT_EN
;
2675 if (crtc
->config
->has_audio
)
2676 intel_dp
->DP
|= DP_AUDIO_OUTPUT_ENABLE
;
2678 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
2679 POSTING_READ(intel_dp
->output_reg
);
2682 static void intel_enable_dp(struct intel_encoder
*encoder
)
2684 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2685 struct drm_device
*dev
= encoder
->base
.dev
;
2686 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2687 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2688 uint32_t dp_reg
= I915_READ(intel_dp
->output_reg
);
2689 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2690 enum pipe pipe
= crtc
->pipe
;
2692 if (WARN_ON(dp_reg
& DP_PORT_EN
))
2697 if (IS_VALLEYVIEW(dev
))
2698 vlv_init_panel_power_sequencer(intel_dp
);
2701 * We get an occasional spurious underrun between the port
2702 * enable and vdd enable, when enabling port A eDP.
2704 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2707 intel_set_cpu_fifo_underrun_reporting(dev_priv
, pipe
, false);
2709 intel_dp_enable_port(intel_dp
);
2711 if (port
== PORT_A
&& IS_GEN5(dev_priv
)) {
2713 * Underrun reporting for the other pipe was disabled in
2714 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2715 * enabled, so it's now safe to re-enable underrun reporting.
2717 intel_wait_for_vblank_if_active(dev_priv
->dev
, !pipe
);
2718 intel_set_cpu_fifo_underrun_reporting(dev_priv
, !pipe
, true);
2719 intel_set_pch_fifo_underrun_reporting(dev_priv
, !pipe
, true);
2722 edp_panel_vdd_on(intel_dp
);
2723 edp_panel_on(intel_dp
);
2724 edp_panel_vdd_off(intel_dp
, true);
2727 intel_set_cpu_fifo_underrun_reporting(dev_priv
, pipe
, true);
2729 pps_unlock(intel_dp
);
2731 if (IS_VALLEYVIEW(dev
)) {
2732 unsigned int lane_mask
= 0x0;
2734 if (IS_CHERRYVIEW(dev
))
2735 lane_mask
= intel_dp_unused_lane_mask(crtc
->config
->lane_count
);
2737 vlv_wait_port_ready(dev_priv
, dp_to_dig_port(intel_dp
),
2741 intel_dp_sink_dpms(intel_dp
, DRM_MODE_DPMS_ON
);
2742 intel_dp_start_link_train(intel_dp
);
2743 intel_dp_stop_link_train(intel_dp
);
2745 if (crtc
->config
->has_audio
) {
2746 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2748 intel_audio_codec_enable(encoder
);
2752 static void g4x_enable_dp(struct intel_encoder
*encoder
)
2754 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2756 intel_enable_dp(encoder
);
2757 intel_edp_backlight_on(intel_dp
);
2760 static void vlv_enable_dp(struct intel_encoder
*encoder
)
2762 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2764 intel_edp_backlight_on(intel_dp
);
2765 intel_psr_enable(intel_dp
);
2768 static void g4x_pre_enable_dp(struct intel_encoder
*encoder
)
2770 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
2771 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2772 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2773 enum pipe pipe
= to_intel_crtc(encoder
->base
.crtc
)->pipe
;
2775 intel_dp_prepare(encoder
);
2777 if (port
== PORT_A
&& IS_GEN5(dev_priv
)) {
2779 * We get FIFO underruns on the other pipe when
2780 * enabling the CPU eDP PLL, and when enabling CPU
2781 * eDP port. We could potentially avoid the PLL
2782 * underrun with a vblank wait just prior to enabling
2783 * the PLL, but that doesn't appear to help the port
2784 * enable case. Just sweep it all under the rug.
2786 intel_set_cpu_fifo_underrun_reporting(dev_priv
, !pipe
, false);
2787 intel_set_pch_fifo_underrun_reporting(dev_priv
, !pipe
, false);
2790 /* Only ilk+ has port A */
2792 ironlake_edp_pll_on(intel_dp
);
2795 static void vlv_detach_power_sequencer(struct intel_dp
*intel_dp
)
2797 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2798 struct drm_i915_private
*dev_priv
= intel_dig_port
->base
.base
.dev
->dev_private
;
2799 enum pipe pipe
= intel_dp
->pps_pipe
;
2800 i915_reg_t pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
2802 edp_panel_vdd_off_sync(intel_dp
);
2805 * VLV seems to get confused when multiple power seqeuencers
2806 * have the same port selected (even if only one has power/vdd
2807 * enabled). The failure manifests as vlv_wait_port_ready() failing
2808 * CHV on the other hand doesn't seem to mind having the same port
2809 * selected in multiple power seqeuencers, but let's clear the
2810 * port select always when logically disconnecting a power sequencer
2813 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2814 pipe_name(pipe
), port_name(intel_dig_port
->port
));
2815 I915_WRITE(pp_on_reg
, 0);
2816 POSTING_READ(pp_on_reg
);
2818 intel_dp
->pps_pipe
= INVALID_PIPE
;
2821 static void vlv_steal_power_sequencer(struct drm_device
*dev
,
2824 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2825 struct intel_encoder
*encoder
;
2827 lockdep_assert_held(&dev_priv
->pps_mutex
);
2829 if (WARN_ON(pipe
!= PIPE_A
&& pipe
!= PIPE_B
))
2832 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
2834 struct intel_dp
*intel_dp
;
2837 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
2840 intel_dp
= enc_to_intel_dp(&encoder
->base
);
2841 port
= dp_to_dig_port(intel_dp
)->port
;
2843 if (intel_dp
->pps_pipe
!= pipe
)
2846 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2847 pipe_name(pipe
), port_name(port
));
2849 WARN(encoder
->base
.crtc
,
2850 "stealing pipe %c power sequencer from active eDP port %c\n",
2851 pipe_name(pipe
), port_name(port
));
2853 /* make sure vdd is off before we steal it */
2854 vlv_detach_power_sequencer(intel_dp
);
2858 static void vlv_init_panel_power_sequencer(struct intel_dp
*intel_dp
)
2860 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2861 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
2862 struct drm_device
*dev
= encoder
->base
.dev
;
2863 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2864 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2866 lockdep_assert_held(&dev_priv
->pps_mutex
);
2868 if (!is_edp(intel_dp
))
2871 if (intel_dp
->pps_pipe
== crtc
->pipe
)
2875 * If another power sequencer was being used on this
2876 * port previously make sure to turn off vdd there while
2877 * we still have control of it.
2879 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
2880 vlv_detach_power_sequencer(intel_dp
);
2883 * We may be stealing the power
2884 * sequencer from another port.
2886 vlv_steal_power_sequencer(dev
, crtc
->pipe
);
2888 /* now it's all ours */
2889 intel_dp
->pps_pipe
= crtc
->pipe
;
2891 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2892 pipe_name(intel_dp
->pps_pipe
), port_name(intel_dig_port
->port
));
2894 /* init power sequencer on this pipe and port */
2895 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
2896 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
2899 static void vlv_pre_enable_dp(struct intel_encoder
*encoder
)
2901 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2902 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2903 struct drm_device
*dev
= encoder
->base
.dev
;
2904 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2905 struct intel_crtc
*intel_crtc
= to_intel_crtc(encoder
->base
.crtc
);
2906 enum dpio_channel port
= vlv_dport_to_channel(dport
);
2907 int pipe
= intel_crtc
->pipe
;
2910 mutex_lock(&dev_priv
->sb_lock
);
2912 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW8(port
));
2919 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW8(port
), val
);
2920 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW14(port
), 0x00760018);
2921 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW23(port
), 0x00400888);
2923 mutex_unlock(&dev_priv
->sb_lock
);
2925 intel_enable_dp(encoder
);
2928 static void vlv_dp_pre_pll_enable(struct intel_encoder
*encoder
)
2930 struct intel_digital_port
*dport
= enc_to_dig_port(&encoder
->base
);
2931 struct drm_device
*dev
= encoder
->base
.dev
;
2932 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2933 struct intel_crtc
*intel_crtc
=
2934 to_intel_crtc(encoder
->base
.crtc
);
2935 enum dpio_channel port
= vlv_dport_to_channel(dport
);
2936 int pipe
= intel_crtc
->pipe
;
2938 intel_dp_prepare(encoder
);
2940 /* Program Tx lane resets to default */
2941 mutex_lock(&dev_priv
->sb_lock
);
2942 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW0(port
),
2943 DPIO_PCS_TX_LANE2_RESET
|
2944 DPIO_PCS_TX_LANE1_RESET
);
2945 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW1(port
),
2946 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN
|
2947 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN
|
2948 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT
) |
2949 DPIO_PCS_CLK_SOFT_RESET
);
2951 /* Fix up inter-pair skew failure */
2952 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW12(port
), 0x00750f00);
2953 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW11(port
), 0x00001500);
2954 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW14(port
), 0x40400000);
2955 mutex_unlock(&dev_priv
->sb_lock
);
2958 static void chv_pre_enable_dp(struct intel_encoder
*encoder
)
2960 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2961 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2962 struct drm_device
*dev
= encoder
->base
.dev
;
2963 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2964 struct intel_crtc
*intel_crtc
=
2965 to_intel_crtc(encoder
->base
.crtc
);
2966 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
2967 int pipe
= intel_crtc
->pipe
;
2968 int data
, i
, stagger
;
2971 mutex_lock(&dev_priv
->sb_lock
);
2973 /* allow hardware to manage TX FIFO reset source */
2974 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW11(ch
));
2975 val
&= ~DPIO_LANEDESKEW_STRAP_OVRD
;
2976 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW11(ch
), val
);
2978 if (intel_crtc
->config
->lane_count
> 2) {
2979 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW11(ch
));
2980 val
&= ~DPIO_LANEDESKEW_STRAP_OVRD
;
2981 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW11(ch
), val
);
2984 /* Program Tx lane latency optimal setting*/
2985 for (i
= 0; i
< intel_crtc
->config
->lane_count
; i
++) {
2986 /* Set the upar bit */
2987 if (intel_crtc
->config
->lane_count
== 1)
2990 data
= (i
== 1) ? 0x0 : 0x1;
2991 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW14(ch
, i
),
2992 data
<< DPIO_UPAR_SHIFT
);
2995 /* Data lane stagger programming */
2996 if (intel_crtc
->config
->port_clock
> 270000)
2998 else if (intel_crtc
->config
->port_clock
> 135000)
3000 else if (intel_crtc
->config
->port_clock
> 67500)
3002 else if (intel_crtc
->config
->port_clock
> 33750)
3007 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW11(ch
));
3008 val
|= DPIO_TX2_STAGGER_MASK(0x1f);
3009 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW11(ch
), val
);
3011 if (intel_crtc
->config
->lane_count
> 2) {
3012 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW11(ch
));
3013 val
|= DPIO_TX2_STAGGER_MASK(0x1f);
3014 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW11(ch
), val
);
3017 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW12(ch
),
3018 DPIO_LANESTAGGER_STRAP(stagger
) |
3019 DPIO_LANESTAGGER_STRAP_OVRD
|
3020 DPIO_TX1_STAGGER_MASK(0x1f) |
3021 DPIO_TX1_STAGGER_MULT(6) |
3022 DPIO_TX2_STAGGER_MULT(0));
3024 if (intel_crtc
->config
->lane_count
> 2) {
3025 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW12(ch
),
3026 DPIO_LANESTAGGER_STRAP(stagger
) |
3027 DPIO_LANESTAGGER_STRAP_OVRD
|
3028 DPIO_TX1_STAGGER_MASK(0x1f) |
3029 DPIO_TX1_STAGGER_MULT(7) |
3030 DPIO_TX2_STAGGER_MULT(5));
3033 /* Deassert data lane reset */
3034 chv_data_lane_soft_reset(encoder
, false);
3036 mutex_unlock(&dev_priv
->sb_lock
);
3038 intel_enable_dp(encoder
);
3040 /* Second common lane will stay alive on its own now */
3041 if (dport
->release_cl2_override
) {
3042 chv_phy_powergate_ch(dev_priv
, DPIO_PHY0
, DPIO_CH1
, false);
3043 dport
->release_cl2_override
= false;
3047 static void chv_dp_pre_pll_enable(struct intel_encoder
*encoder
)
3049 struct intel_digital_port
*dport
= enc_to_dig_port(&encoder
->base
);
3050 struct drm_device
*dev
= encoder
->base
.dev
;
3051 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3052 struct intel_crtc
*intel_crtc
=
3053 to_intel_crtc(encoder
->base
.crtc
);
3054 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
3055 enum pipe pipe
= intel_crtc
->pipe
;
3056 unsigned int lane_mask
=
3057 intel_dp_unused_lane_mask(intel_crtc
->config
->lane_count
);
3060 intel_dp_prepare(encoder
);
3063 * Must trick the second common lane into life.
3064 * Otherwise we can't even access the PLL.
3066 if (ch
== DPIO_CH0
&& pipe
== PIPE_B
)
3067 dport
->release_cl2_override
=
3068 !chv_phy_powergate_ch(dev_priv
, DPIO_PHY0
, DPIO_CH1
, true);
3070 chv_phy_powergate_lanes(encoder
, true, lane_mask
);
3072 mutex_lock(&dev_priv
->sb_lock
);
3074 /* Assert data lane reset */
3075 chv_data_lane_soft_reset(encoder
, true);
3077 /* program left/right clock distribution */
3078 if (pipe
!= PIPE_B
) {
3079 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
);
3080 val
&= ~(CHV_BUFLEFTENA1_MASK
| CHV_BUFRIGHTENA1_MASK
);
3082 val
|= CHV_BUFLEFTENA1_FORCE
;
3084 val
|= CHV_BUFRIGHTENA1_FORCE
;
3085 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
, val
);
3087 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
);
3088 val
&= ~(CHV_BUFLEFTENA2_MASK
| CHV_BUFRIGHTENA2_MASK
);
3090 val
|= CHV_BUFLEFTENA2_FORCE
;
3092 val
|= CHV_BUFRIGHTENA2_FORCE
;
3093 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
, val
);
3096 /* program clock channel usage */
3097 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW8(ch
));
3098 val
|= CHV_PCS_USEDCLKCHANNEL_OVRRIDE
;
3100 val
&= ~CHV_PCS_USEDCLKCHANNEL
;
3102 val
|= CHV_PCS_USEDCLKCHANNEL
;
3103 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW8(ch
), val
);
3105 if (intel_crtc
->config
->lane_count
> 2) {
3106 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW8(ch
));
3107 val
|= CHV_PCS_USEDCLKCHANNEL_OVRRIDE
;
3109 val
&= ~CHV_PCS_USEDCLKCHANNEL
;
3111 val
|= CHV_PCS_USEDCLKCHANNEL
;
3112 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW8(ch
), val
);
3116 * This a a bit weird since generally CL
3117 * matches the pipe, but here we need to
3118 * pick the CL based on the port.
3120 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW19(ch
));
3122 val
&= ~CHV_CMN_USEDCLKCHANNEL
;
3124 val
|= CHV_CMN_USEDCLKCHANNEL
;
3125 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW19(ch
), val
);
3127 mutex_unlock(&dev_priv
->sb_lock
);
3130 static void chv_dp_post_pll_disable(struct intel_encoder
*encoder
)
3132 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
3133 enum pipe pipe
= to_intel_crtc(encoder
->base
.crtc
)->pipe
;
3136 mutex_lock(&dev_priv
->sb_lock
);
3138 /* disable left/right clock distribution */
3139 if (pipe
!= PIPE_B
) {
3140 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
);
3141 val
&= ~(CHV_BUFLEFTENA1_MASK
| CHV_BUFRIGHTENA1_MASK
);
3142 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
, val
);
3144 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
);
3145 val
&= ~(CHV_BUFLEFTENA2_MASK
| CHV_BUFRIGHTENA2_MASK
);
3146 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
, val
);
3149 mutex_unlock(&dev_priv
->sb_lock
);
3152 * Leave the power down bit cleared for at least one
3153 * lane so that chv_powergate_phy_ch() will power
3154 * on something when the channel is otherwise unused.
3155 * When the port is off and the override is removed
3156 * the lanes power down anyway, so otherwise it doesn't
3157 * really matter what the state of power down bits is
3160 chv_phy_powergate_lanes(encoder
, false, 0x0);
3164 * Native read with retry for link status and receiver capability reads for
3165 * cases where the sink may still be asleep.
3167 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3168 * supposed to retry 3 times per the spec.
3171 intel_dp_dpcd_read_wake(struct drm_dp_aux
*aux
, unsigned int offset
,
3172 void *buffer
, size_t size
)
3178 * Sometime we just get the same incorrect byte repeated
3179 * over the entire buffer. Doing just one throw away read
3180 * initially seems to "solve" it.
3182 drm_dp_dpcd_read(aux
, DP_DPCD_REV
, buffer
, 1);
3184 for (i
= 0; i
< 3; i
++) {
3185 ret
= drm_dp_dpcd_read(aux
, offset
, buffer
, size
);
3195 * Fetch AUX CH registers 0x202 - 0x207 which contain
3196 * link status information
3199 intel_dp_get_link_status(struct intel_dp
*intel_dp
, uint8_t link_status
[DP_LINK_STATUS_SIZE
])
3201 return intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3204 DP_LINK_STATUS_SIZE
) == DP_LINK_STATUS_SIZE
;
3207 /* These are source-specific values. */
3209 intel_dp_voltage_max(struct intel_dp
*intel_dp
)
3211 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3212 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3213 enum port port
= dp_to_dig_port(intel_dp
)->port
;
3215 if (IS_BROXTON(dev
))
3216 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3217 else if (INTEL_INFO(dev
)->gen
>= 9) {
3218 if (dev_priv
->edp_low_vswing
&& port
== PORT_A
)
3219 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3220 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3221 } else if (IS_VALLEYVIEW(dev
))
3222 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3223 else if (IS_GEN7(dev
) && port
== PORT_A
)
3224 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3225 else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
)
3226 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3228 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3232 intel_dp_pre_emphasis_max(struct intel_dp
*intel_dp
, uint8_t voltage_swing
)
3234 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3235 enum port port
= dp_to_dig_port(intel_dp
)->port
;
3237 if (INTEL_INFO(dev
)->gen
>= 9) {
3238 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3239 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3240 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3241 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3242 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3243 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3244 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3245 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3246 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3248 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3250 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
3251 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3253 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3254 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3255 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3256 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3257 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3258 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3260 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3262 } else if (IS_VALLEYVIEW(dev
)) {
3263 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3265 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3267 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3269 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3272 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3274 } else if (IS_GEN7(dev
) && port
== PORT_A
) {
3275 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3277 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3279 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3280 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3282 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3285 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3286 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3287 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3288 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3289 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3290 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3291 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3294 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3299 static uint32_t vlv_signal_levels(struct intel_dp
*intel_dp
)
3301 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3302 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3303 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
3304 struct intel_crtc
*intel_crtc
=
3305 to_intel_crtc(dport
->base
.base
.crtc
);
3306 unsigned long demph_reg_value
, preemph_reg_value
,
3307 uniqtranscale_reg_value
;
3308 uint8_t train_set
= intel_dp
->train_set
[0];
3309 enum dpio_channel port
= vlv_dport_to_channel(dport
);
3310 int pipe
= intel_crtc
->pipe
;
3312 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3313 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3314 preemph_reg_value
= 0x0004000;
3315 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3317 demph_reg_value
= 0x2B405555;
3318 uniqtranscale_reg_value
= 0x552AB83A;
3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3321 demph_reg_value
= 0x2B404040;
3322 uniqtranscale_reg_value
= 0x5548B83A;
3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3325 demph_reg_value
= 0x2B245555;
3326 uniqtranscale_reg_value
= 0x5560B83A;
3328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3329 demph_reg_value
= 0x2B405555;
3330 uniqtranscale_reg_value
= 0x5598DA3A;
3336 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3337 preemph_reg_value
= 0x0002000;
3338 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3340 demph_reg_value
= 0x2B404040;
3341 uniqtranscale_reg_value
= 0x5552B83A;
3343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3344 demph_reg_value
= 0x2B404848;
3345 uniqtranscale_reg_value
= 0x5580B83A;
3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3348 demph_reg_value
= 0x2B404040;
3349 uniqtranscale_reg_value
= 0x55ADDA3A;
3355 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3356 preemph_reg_value
= 0x0000000;
3357 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3358 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3359 demph_reg_value
= 0x2B305555;
3360 uniqtranscale_reg_value
= 0x5570B83A;
3362 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3363 demph_reg_value
= 0x2B2B4040;
3364 uniqtranscale_reg_value
= 0x55ADDA3A;
3370 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3371 preemph_reg_value
= 0x0006000;
3372 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3374 demph_reg_value
= 0x1B405555;
3375 uniqtranscale_reg_value
= 0x55ADDA3A;
3385 mutex_lock(&dev_priv
->sb_lock
);
3386 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW5(port
), 0x00000000);
3387 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW4(port
), demph_reg_value
);
3388 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW2(port
),
3389 uniqtranscale_reg_value
);
3390 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW3(port
), 0x0C782040);
3391 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW11(port
), 0x00030000);
3392 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW9(port
), preemph_reg_value
);
3393 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW5(port
), 0x80000000);
3394 mutex_unlock(&dev_priv
->sb_lock
);
3399 static bool chv_need_uniq_trans_scale(uint8_t train_set
)
3401 return (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) == DP_TRAIN_PRE_EMPH_LEVEL_0
&&
3402 (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3405 static uint32_t chv_signal_levels(struct intel_dp
*intel_dp
)
3407 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3408 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3409 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
3410 struct intel_crtc
*intel_crtc
= to_intel_crtc(dport
->base
.base
.crtc
);
3411 u32 deemph_reg_value
, margin_reg_value
, val
;
3412 uint8_t train_set
= intel_dp
->train_set
[0];
3413 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
3414 enum pipe pipe
= intel_crtc
->pipe
;
3417 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3418 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3419 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3420 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3421 deemph_reg_value
= 128;
3422 margin_reg_value
= 52;
3424 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3425 deemph_reg_value
= 128;
3426 margin_reg_value
= 77;
3428 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3429 deemph_reg_value
= 128;
3430 margin_reg_value
= 102;
3432 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3433 deemph_reg_value
= 128;
3434 margin_reg_value
= 154;
3435 /* FIXME extra to set for 1200 */
3441 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3442 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3443 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3444 deemph_reg_value
= 85;
3445 margin_reg_value
= 78;
3447 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3448 deemph_reg_value
= 85;
3449 margin_reg_value
= 116;
3451 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3452 deemph_reg_value
= 85;
3453 margin_reg_value
= 154;
3459 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3460 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3461 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3462 deemph_reg_value
= 64;
3463 margin_reg_value
= 104;
3465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3466 deemph_reg_value
= 64;
3467 margin_reg_value
= 154;
3473 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3474 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3475 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3476 deemph_reg_value
= 43;
3477 margin_reg_value
= 154;
3487 mutex_lock(&dev_priv
->sb_lock
);
3489 /* Clear calc init */
3490 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW10(ch
));
3491 val
&= ~(DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
);
3492 val
&= ~(DPIO_PCS_TX1DEEMP_MASK
| DPIO_PCS_TX2DEEMP_MASK
);
3493 val
|= DPIO_PCS_TX1DEEMP_9P5
| DPIO_PCS_TX2DEEMP_9P5
;
3494 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW10(ch
), val
);
3496 if (intel_crtc
->config
->lane_count
> 2) {
3497 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW10(ch
));
3498 val
&= ~(DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
);
3499 val
&= ~(DPIO_PCS_TX1DEEMP_MASK
| DPIO_PCS_TX2DEEMP_MASK
);
3500 val
|= DPIO_PCS_TX1DEEMP_9P5
| DPIO_PCS_TX2DEEMP_9P5
;
3501 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW10(ch
), val
);
3504 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW9(ch
));
3505 val
&= ~(DPIO_PCS_TX1MARGIN_MASK
| DPIO_PCS_TX2MARGIN_MASK
);
3506 val
|= DPIO_PCS_TX1MARGIN_000
| DPIO_PCS_TX2MARGIN_000
;
3507 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW9(ch
), val
);
3509 if (intel_crtc
->config
->lane_count
> 2) {
3510 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW9(ch
));
3511 val
&= ~(DPIO_PCS_TX1MARGIN_MASK
| DPIO_PCS_TX2MARGIN_MASK
);
3512 val
|= DPIO_PCS_TX1MARGIN_000
| DPIO_PCS_TX2MARGIN_000
;
3513 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW9(ch
), val
);
3516 /* Program swing deemph */
3517 for (i
= 0; i
< intel_crtc
->config
->lane_count
; i
++) {
3518 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW4(ch
, i
));
3519 val
&= ~DPIO_SWING_DEEMPH9P5_MASK
;
3520 val
|= deemph_reg_value
<< DPIO_SWING_DEEMPH9P5_SHIFT
;
3521 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW4(ch
, i
), val
);
3524 /* Program swing margin */
3525 for (i
= 0; i
< intel_crtc
->config
->lane_count
; i
++) {
3526 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
));
3528 val
&= ~DPIO_SWING_MARGIN000_MASK
;
3529 val
|= margin_reg_value
<< DPIO_SWING_MARGIN000_SHIFT
;
3532 * Supposedly this value shouldn't matter when unique transition
3533 * scale is disabled, but in fact it does matter. Let's just
3534 * always program the same value and hope it's OK.
3536 val
&= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT
);
3537 val
|= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT
;
3539 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
), val
);
3543 * The document said it needs to set bit 27 for ch0 and bit 26
3544 * for ch1. Might be a typo in the doc.
3545 * For now, for this unique transition scale selection, set bit
3546 * 27 for ch0 and ch1.
3548 for (i
= 0; i
< intel_crtc
->config
->lane_count
; i
++) {
3549 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
));
3550 if (chv_need_uniq_trans_scale(train_set
))
3551 val
|= DPIO_TX_UNIQ_TRANS_SCALE_EN
;
3553 val
&= ~DPIO_TX_UNIQ_TRANS_SCALE_EN
;
3554 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
), val
);
3557 /* Start swing calculation */
3558 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW10(ch
));
3559 val
|= DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
;
3560 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW10(ch
), val
);
3562 if (intel_crtc
->config
->lane_count
> 2) {
3563 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW10(ch
));
3564 val
|= DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
;
3565 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW10(ch
), val
);
3568 mutex_unlock(&dev_priv
->sb_lock
);
3574 gen4_signal_levels(uint8_t train_set
)
3576 uint32_t signal_levels
= 0;
3578 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3579 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3581 signal_levels
|= DP_VOLTAGE_0_4
;
3583 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3584 signal_levels
|= DP_VOLTAGE_0_6
;
3586 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3587 signal_levels
|= DP_VOLTAGE_0_8
;
3589 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3590 signal_levels
|= DP_VOLTAGE_1_2
;
3593 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3594 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3596 signal_levels
|= DP_PRE_EMPHASIS_0
;
3598 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3599 signal_levels
|= DP_PRE_EMPHASIS_3_5
;
3601 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3602 signal_levels
|= DP_PRE_EMPHASIS_6
;
3604 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3605 signal_levels
|= DP_PRE_EMPHASIS_9_5
;
3608 return signal_levels
;
3611 /* Gen6's DP voltage swing and pre-emphasis control */
3613 gen6_edp_signal_levels(uint8_t train_set
)
3615 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3616 DP_TRAIN_PRE_EMPHASIS_MASK
);
3617 switch (signal_levels
) {
3618 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3619 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3620 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
3621 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3622 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B
;
3623 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3624 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3625 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B
;
3626 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3627 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3628 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B
;
3629 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3630 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3631 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B
;
3633 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3634 "0x%x\n", signal_levels
);
3635 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
3639 /* Gen7's DP voltage swing and pre-emphasis control */
3641 gen7_edp_signal_levels(uint8_t train_set
)
3643 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3644 DP_TRAIN_PRE_EMPHASIS_MASK
);
3645 switch (signal_levels
) {
3646 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3647 return EDP_LINK_TRAIN_400MV_0DB_IVB
;
3648 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3649 return EDP_LINK_TRAIN_400MV_3_5DB_IVB
;
3650 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3651 return EDP_LINK_TRAIN_400MV_6DB_IVB
;
3653 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3654 return EDP_LINK_TRAIN_600MV_0DB_IVB
;
3655 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3656 return EDP_LINK_TRAIN_600MV_3_5DB_IVB
;
3658 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3659 return EDP_LINK_TRAIN_800MV_0DB_IVB
;
3660 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3661 return EDP_LINK_TRAIN_800MV_3_5DB_IVB
;
3664 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3665 "0x%x\n", signal_levels
);
3666 return EDP_LINK_TRAIN_500MV_0DB_IVB
;
3671 intel_dp_set_signal_levels(struct intel_dp
*intel_dp
)
3673 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3674 enum port port
= intel_dig_port
->port
;
3675 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3676 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3677 uint32_t signal_levels
, mask
= 0;
3678 uint8_t train_set
= intel_dp
->train_set
[0];
3681 signal_levels
= ddi_signal_levels(intel_dp
);
3683 if (IS_BROXTON(dev
))
3686 mask
= DDI_BUF_EMP_MASK
;
3687 } else if (IS_CHERRYVIEW(dev
)) {
3688 signal_levels
= chv_signal_levels(intel_dp
);
3689 } else if (IS_VALLEYVIEW(dev
)) {
3690 signal_levels
= vlv_signal_levels(intel_dp
);
3691 } else if (IS_GEN7(dev
) && port
== PORT_A
) {
3692 signal_levels
= gen7_edp_signal_levels(train_set
);
3693 mask
= EDP_LINK_TRAIN_VOL_EMP_MASK_IVB
;
3694 } else if (IS_GEN6(dev
) && port
== PORT_A
) {
3695 signal_levels
= gen6_edp_signal_levels(train_set
);
3696 mask
= EDP_LINK_TRAIN_VOL_EMP_MASK_SNB
;
3698 signal_levels
= gen4_signal_levels(train_set
);
3699 mask
= DP_VOLTAGE_MASK
| DP_PRE_EMPHASIS_MASK
;
3703 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels
);
3705 DRM_DEBUG_KMS("Using vswing level %d\n",
3706 train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
);
3707 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3708 (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) >>
3709 DP_TRAIN_PRE_EMPHASIS_SHIFT
);
3711 intel_dp
->DP
= (intel_dp
->DP
& ~mask
) | signal_levels
;
3713 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
3714 POSTING_READ(intel_dp
->output_reg
);
3718 intel_dp_program_link_training_pattern(struct intel_dp
*intel_dp
,
3719 uint8_t dp_train_pat
)
3721 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3722 struct drm_i915_private
*dev_priv
=
3723 to_i915(intel_dig_port
->base
.base
.dev
);
3725 _intel_dp_set_link_train(intel_dp
, &intel_dp
->DP
, dp_train_pat
);
3727 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
3728 POSTING_READ(intel_dp
->output_reg
);
3731 void intel_dp_set_idle_link_train(struct intel_dp
*intel_dp
)
3733 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3734 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3735 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3736 enum port port
= intel_dig_port
->port
;
3742 val
= I915_READ(DP_TP_CTL(port
));
3743 val
&= ~DP_TP_CTL_LINK_TRAIN_MASK
;
3744 val
|= DP_TP_CTL_LINK_TRAIN_IDLE
;
3745 I915_WRITE(DP_TP_CTL(port
), val
);
3748 * On PORT_A we can have only eDP in SST mode. There the only reason
3749 * we need to set idle transmission mode is to work around a HW issue
3750 * where we enable the pipe while not in idle link-training mode.
3751 * In this case there is requirement to wait for a minimum number of
3752 * idle patterns to be sent.
3757 if (wait_for((I915_READ(DP_TP_STATUS(port
)) & DP_TP_STATUS_IDLE_DONE
),
3759 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3763 intel_dp_link_down(struct intel_dp
*intel_dp
)
3765 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3766 struct intel_crtc
*crtc
= to_intel_crtc(intel_dig_port
->base
.base
.crtc
);
3767 enum port port
= intel_dig_port
->port
;
3768 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3769 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3770 uint32_t DP
= intel_dp
->DP
;
3772 if (WARN_ON(HAS_DDI(dev
)))
3775 if (WARN_ON((I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
) == 0))
3778 DRM_DEBUG_KMS("\n");
3780 if ((IS_GEN7(dev
) && port
== PORT_A
) ||
3781 (HAS_PCH_CPT(dev
) && port
!= PORT_A
)) {
3782 DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
3783 DP
|= DP_LINK_TRAIN_PAT_IDLE_CPT
;
3785 if (IS_CHERRYVIEW(dev
))
3786 DP
&= ~DP_LINK_TRAIN_MASK_CHV
;
3788 DP
&= ~DP_LINK_TRAIN_MASK
;
3789 DP
|= DP_LINK_TRAIN_PAT_IDLE
;
3791 I915_WRITE(intel_dp
->output_reg
, DP
);
3792 POSTING_READ(intel_dp
->output_reg
);
3794 DP
&= ~(DP_PORT_EN
| DP_AUDIO_OUTPUT_ENABLE
);
3795 I915_WRITE(intel_dp
->output_reg
, DP
);
3796 POSTING_READ(intel_dp
->output_reg
);
3799 * HW workaround for IBX, we need to move the port
3800 * to transcoder A after disabling it to allow the
3801 * matching HDMI port to be enabled on transcoder A.
3803 if (HAS_PCH_IBX(dev
) && crtc
->pipe
== PIPE_B
&& port
!= PORT_A
) {
3805 * We get CPU/PCH FIFO underruns on the other pipe when
3806 * doing the workaround. Sweep them under the rug.
3808 intel_set_cpu_fifo_underrun_reporting(dev_priv
, PIPE_A
, false);
3809 intel_set_pch_fifo_underrun_reporting(dev_priv
, PIPE_A
, false);
3811 /* always enable with pattern 1 (as per spec) */
3812 DP
&= ~(DP_PIPEB_SELECT
| DP_LINK_TRAIN_MASK
);
3813 DP
|= DP_PORT_EN
| DP_LINK_TRAIN_PAT_1
;
3814 I915_WRITE(intel_dp
->output_reg
, DP
);
3815 POSTING_READ(intel_dp
->output_reg
);
3818 I915_WRITE(intel_dp
->output_reg
, DP
);
3819 POSTING_READ(intel_dp
->output_reg
);
3821 intel_wait_for_vblank_if_active(dev_priv
->dev
, PIPE_A
);
3822 intel_set_cpu_fifo_underrun_reporting(dev_priv
, PIPE_A
, true);
3823 intel_set_pch_fifo_underrun_reporting(dev_priv
, PIPE_A
, true);
3826 msleep(intel_dp
->panel_power_down_delay
);
3832 intel_dp_get_dpcd(struct intel_dp
*intel_dp
)
3834 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
3835 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
3836 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3839 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, 0x000, intel_dp
->dpcd
,
3840 sizeof(intel_dp
->dpcd
)) < 0)
3841 return false; /* aux transfer failed */
3843 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp
->dpcd
), intel_dp
->dpcd
);
3845 if (intel_dp
->dpcd
[DP_DPCD_REV
] == 0)
3846 return false; /* DPCD not present */
3848 /* Check if the panel supports PSR */
3849 memset(intel_dp
->psr_dpcd
, 0, sizeof(intel_dp
->psr_dpcd
));
3850 if (is_edp(intel_dp
)) {
3851 intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_PSR_SUPPORT
,
3853 sizeof(intel_dp
->psr_dpcd
));
3854 if (intel_dp
->psr_dpcd
[0] & DP_PSR_IS_SUPPORTED
) {
3855 dev_priv
->psr
.sink_support
= true;
3856 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3859 if (INTEL_INFO(dev
)->gen
>= 9 &&
3860 (intel_dp
->psr_dpcd
[0] & DP_PSR2_IS_SUPPORTED
)) {
3861 uint8_t frame_sync_cap
;
3863 dev_priv
->psr
.sink_support
= true;
3864 intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3865 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP
,
3866 &frame_sync_cap
, 1);
3867 dev_priv
->psr
.aux_frame_sync
= frame_sync_cap
? true : false;
3868 /* PSR2 needs frame sync as well */
3869 dev_priv
->psr
.psr2_support
= dev_priv
->psr
.aux_frame_sync
;
3870 DRM_DEBUG_KMS("PSR2 %s on sink",
3871 dev_priv
->psr
.psr2_support
? "supported" : "not supported");
3875 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3876 yesno(intel_dp_source_supports_hbr2(intel_dp
)),
3877 yesno(drm_dp_tps3_supported(intel_dp
->dpcd
)));
3879 /* Intermediate frequency support */
3880 if (is_edp(intel_dp
) &&
3881 (intel_dp
->dpcd
[DP_EDP_CONFIGURATION_CAP
] & DP_DPCD_DISPLAY_CONTROL_CAPABLE
) &&
3882 (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_EDP_DPCD_REV
, &rev
, 1) == 1) &&
3883 (rev
>= 0x03)) { /* eDp v1.4 or higher */
3884 __le16 sink_rates
[DP_MAX_SUPPORTED_RATES
];
3887 intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3888 DP_SUPPORTED_LINK_RATES
,
3890 sizeof(sink_rates
));
3892 for (i
= 0; i
< ARRAY_SIZE(sink_rates
); i
++) {
3893 int val
= le16_to_cpu(sink_rates
[i
]);
3898 /* Value read is in kHz while drm clock is saved in deca-kHz */
3899 intel_dp
->sink_rates
[i
] = (val
* 200) / 10;
3901 intel_dp
->num_sink_rates
= i
;
3904 intel_dp_print_rates(intel_dp
);
3906 if (!(intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] &
3907 DP_DWN_STRM_PORT_PRESENT
))
3908 return true; /* native DP sink */
3910 if (intel_dp
->dpcd
[DP_DPCD_REV
] == 0x10)
3911 return true; /* no per-port downstream info */
3913 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_DOWNSTREAM_PORT_0
,
3914 intel_dp
->downstream_ports
,
3915 DP_MAX_DOWNSTREAM_PORTS
) < 0)
3916 return false; /* downstream port status fetch failed */
3922 intel_dp_probe_oui(struct intel_dp
*intel_dp
)
3926 if (!(intel_dp
->dpcd
[DP_DOWN_STREAM_PORT_COUNT
] & DP_OUI_SUPPORT
))
3929 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_SINK_OUI
, buf
, 3) == 3)
3930 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3931 buf
[0], buf
[1], buf
[2]);
3933 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_BRANCH_OUI
, buf
, 3) == 3)
3934 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3935 buf
[0], buf
[1], buf
[2]);
3939 intel_dp_probe_mst(struct intel_dp
*intel_dp
)
3943 if (!intel_dp
->can_mst
)
3946 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x12)
3949 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_MSTM_CAP
, buf
, 1)) {
3950 if (buf
[0] & DP_MST_CAP
) {
3951 DRM_DEBUG_KMS("Sink is MST capable\n");
3952 intel_dp
->is_mst
= true;
3954 DRM_DEBUG_KMS("Sink is not MST capable\n");
3955 intel_dp
->is_mst
= false;
3959 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
3960 return intel_dp
->is_mst
;
3963 static int intel_dp_sink_crc_stop(struct intel_dp
*intel_dp
)
3965 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
3966 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
3967 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
3973 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK
, &buf
) < 0) {
3974 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3979 if (drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_SINK
,
3980 buf
& ~DP_TEST_SINK_START
) < 0) {
3981 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3987 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
3989 if (drm_dp_dpcd_readb(&intel_dp
->aux
,
3990 DP_TEST_SINK_MISC
, &buf
) < 0) {
3994 count
= buf
& DP_TEST_COUNT_MASK
;
3995 } while (--attempts
&& count
);
3997 if (attempts
== 0) {
3998 DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
4003 hsw_enable_ips(intel_crtc
);
4007 static int intel_dp_sink_crc_start(struct intel_dp
*intel_dp
)
4009 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
4010 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
4011 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
4015 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK_MISC
, &buf
) < 0)
4018 if (!(buf
& DP_TEST_CRC_SUPPORTED
))
4021 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK
, &buf
) < 0)
4024 if (buf
& DP_TEST_SINK_START
) {
4025 ret
= intel_dp_sink_crc_stop(intel_dp
);
4030 hsw_disable_ips(intel_crtc
);
4032 if (drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_SINK
,
4033 buf
| DP_TEST_SINK_START
) < 0) {
4034 hsw_enable_ips(intel_crtc
);
4038 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
4042 int intel_dp_sink_crc(struct intel_dp
*intel_dp
, u8
*crc
)
4044 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
4045 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
4046 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
4051 ret
= intel_dp_sink_crc_start(intel_dp
);
4056 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
4058 if (drm_dp_dpcd_readb(&intel_dp
->aux
,
4059 DP_TEST_SINK_MISC
, &buf
) < 0) {
4063 count
= buf
& DP_TEST_COUNT_MASK
;
4065 } while (--attempts
&& count
== 0);
4067 if (attempts
== 0) {
4068 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4073 if (drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_CRC_R_CR
, crc
, 6) < 0) {
4079 intel_dp_sink_crc_stop(intel_dp
);
4084 intel_dp_get_sink_irq(struct intel_dp
*intel_dp
, u8
*sink_irq_vector
)
4086 return intel_dp_dpcd_read_wake(&intel_dp
->aux
,
4087 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4088 sink_irq_vector
, 1) == 1;
4092 intel_dp_get_sink_irq_esi(struct intel_dp
*intel_dp
, u8
*sink_irq_vector
)
4096 ret
= intel_dp_dpcd_read_wake(&intel_dp
->aux
,
4098 sink_irq_vector
, 14);
4105 static uint8_t intel_dp_autotest_link_training(struct intel_dp
*intel_dp
)
4107 uint8_t test_result
= DP_TEST_ACK
;
4111 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp
*intel_dp
)
4113 uint8_t test_result
= DP_TEST_NAK
;
4117 static uint8_t intel_dp_autotest_edid(struct intel_dp
*intel_dp
)
4119 uint8_t test_result
= DP_TEST_NAK
;
4120 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4121 struct drm_connector
*connector
= &intel_connector
->base
;
4123 if (intel_connector
->detect_edid
== NULL
||
4124 connector
->edid_corrupt
||
4125 intel_dp
->aux
.i2c_defer_count
> 6) {
4126 /* Check EDID read for NACKs, DEFERs and corruption
4127 * (DP CTS 1.2 Core r1.1)
4128 * 4.2.2.4 : Failed EDID read, I2C_NAK
4129 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4130 * 4.2.2.6 : EDID corruption detected
4131 * Use failsafe mode for all cases
4133 if (intel_dp
->aux
.i2c_nack_count
> 0 ||
4134 intel_dp
->aux
.i2c_defer_count
> 0)
4135 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4136 intel_dp
->aux
.i2c_nack_count
,
4137 intel_dp
->aux
.i2c_defer_count
);
4138 intel_dp
->compliance_test_data
= INTEL_DP_RESOLUTION_FAILSAFE
;
4140 struct edid
*block
= intel_connector
->detect_edid
;
4142 /* We have to write the checksum
4143 * of the last block read
4145 block
+= intel_connector
->detect_edid
->extensions
;
4147 if (!drm_dp_dpcd_write(&intel_dp
->aux
,
4148 DP_TEST_EDID_CHECKSUM
,
4151 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4153 test_result
= DP_TEST_ACK
| DP_TEST_EDID_CHECKSUM_WRITE
;
4154 intel_dp
->compliance_test_data
= INTEL_DP_RESOLUTION_STANDARD
;
4157 /* Set test active flag here so userspace doesn't interrupt things */
4158 intel_dp
->compliance_test_active
= 1;
4163 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp
*intel_dp
)
4165 uint8_t test_result
= DP_TEST_NAK
;
4169 static void intel_dp_handle_test_request(struct intel_dp
*intel_dp
)
4171 uint8_t response
= DP_TEST_NAK
;
4175 status
= drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_REQUEST
, &rxdata
, 1);
4177 DRM_DEBUG_KMS("Could not read test request from sink\n");
4182 case DP_TEST_LINK_TRAINING
:
4183 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4184 intel_dp
->compliance_test_type
= DP_TEST_LINK_TRAINING
;
4185 response
= intel_dp_autotest_link_training(intel_dp
);
4187 case DP_TEST_LINK_VIDEO_PATTERN
:
4188 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4189 intel_dp
->compliance_test_type
= DP_TEST_LINK_VIDEO_PATTERN
;
4190 response
= intel_dp_autotest_video_pattern(intel_dp
);
4192 case DP_TEST_LINK_EDID_READ
:
4193 DRM_DEBUG_KMS("EDID test requested\n");
4194 intel_dp
->compliance_test_type
= DP_TEST_LINK_EDID_READ
;
4195 response
= intel_dp_autotest_edid(intel_dp
);
4197 case DP_TEST_LINK_PHY_TEST_PATTERN
:
4198 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4199 intel_dp
->compliance_test_type
= DP_TEST_LINK_PHY_TEST_PATTERN
;
4200 response
= intel_dp_autotest_phy_pattern(intel_dp
);
4203 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata
);
4208 status
= drm_dp_dpcd_write(&intel_dp
->aux
,
4212 DRM_DEBUG_KMS("Could not write test response to sink\n");
4216 intel_dp_check_mst_status(struct intel_dp
*intel_dp
)
4220 if (intel_dp
->is_mst
) {
4225 bret
= intel_dp_get_sink_irq_esi(intel_dp
, esi
);
4229 /* check link status - esi[10] = 0x200c */
4230 if (intel_dp
->active_mst_links
&&
4231 !drm_dp_channel_eq_ok(&esi
[10], intel_dp
->lane_count
)) {
4232 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4233 intel_dp_start_link_train(intel_dp
);
4234 intel_dp_stop_link_train(intel_dp
);
4237 DRM_DEBUG_KMS("got esi %3ph\n", esi
);
4238 ret
= drm_dp_mst_hpd_irq(&intel_dp
->mst_mgr
, esi
, &handled
);
4241 for (retry
= 0; retry
< 3; retry
++) {
4243 wret
= drm_dp_dpcd_write(&intel_dp
->aux
,
4244 DP_SINK_COUNT_ESI
+1,
4251 bret
= intel_dp_get_sink_irq_esi(intel_dp
, esi
);
4253 DRM_DEBUG_KMS("got esi2 %3ph\n", esi
);
4261 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4262 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4263 intel_dp
->is_mst
= false;
4264 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
4265 /* send a hotplug event */
4266 drm_kms_helper_hotplug_event(intel_dig_port
->base
.base
.dev
);
4273 * According to DP spec
4276 * 2. Configure link according to Receiver Capabilities
4277 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4278 * 4. Check link status on receipt of hot-plug interrupt
4281 intel_dp_check_link_status(struct intel_dp
*intel_dp
)
4283 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4284 struct intel_encoder
*intel_encoder
= &dp_to_dig_port(intel_dp
)->base
;
4286 u8 link_status
[DP_LINK_STATUS_SIZE
];
4288 WARN_ON(!drm_modeset_is_locked(&dev
->mode_config
.connection_mutex
));
4291 * Clearing compliance test variables to allow capturing
4292 * of values for next automated test request.
4294 intel_dp
->compliance_test_active
= 0;
4295 intel_dp
->compliance_test_type
= 0;
4296 intel_dp
->compliance_test_data
= 0;
4298 if (!intel_encoder
->base
.crtc
)
4301 if (!to_intel_crtc(intel_encoder
->base
.crtc
)->active
)
4304 /* Try to read receiver status if the link appears to be up */
4305 if (!intel_dp_get_link_status(intel_dp
, link_status
)) {
4309 /* Now read the DPCD to see if it's actually running */
4310 if (!intel_dp_get_dpcd(intel_dp
)) {
4314 /* Try to read the source of the interrupt */
4315 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4316 intel_dp_get_sink_irq(intel_dp
, &sink_irq_vector
)) {
4317 /* Clear interrupt source */
4318 drm_dp_dpcd_writeb(&intel_dp
->aux
,
4319 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4322 if (sink_irq_vector
& DP_AUTOMATED_TEST_REQUEST
)
4323 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4324 if (sink_irq_vector
& (DP_CP_IRQ
| DP_SINK_SPECIFIC_IRQ
))
4325 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4328 /* if link training is requested we should perform it always */
4329 if ((intel_dp
->compliance_test_type
== DP_TEST_LINK_TRAINING
) ||
4330 (!drm_dp_channel_eq_ok(link_status
, intel_dp
->lane_count
))) {
4331 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4332 intel_encoder
->base
.name
);
4333 intel_dp_start_link_train(intel_dp
);
4334 intel_dp_stop_link_train(intel_dp
);
4338 /* XXX this is probably wrong for multiple downstream ports */
4339 static enum drm_connector_status
4340 intel_dp_detect_dpcd(struct intel_dp
*intel_dp
)
4342 uint8_t *dpcd
= intel_dp
->dpcd
;
4345 if (!intel_dp_get_dpcd(intel_dp
))
4346 return connector_status_disconnected
;
4348 /* if there's no downstream port, we're done */
4349 if (!(dpcd
[DP_DOWNSTREAMPORT_PRESENT
] & DP_DWN_STRM_PORT_PRESENT
))
4350 return connector_status_connected
;
4352 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4353 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4354 intel_dp
->downstream_ports
[0] & DP_DS_PORT_HPD
) {
4357 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_SINK_COUNT
,
4359 return connector_status_unknown
;
4361 return DP_GET_SINK_COUNT(reg
) ? connector_status_connected
4362 : connector_status_disconnected
;
4365 /* If no HPD, poke DDC gently */
4366 if (drm_probe_ddc(&intel_dp
->aux
.ddc
))
4367 return connector_status_connected
;
4369 /* Well we tried, say unknown for unreliable port types */
4370 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11) {
4371 type
= intel_dp
->downstream_ports
[0] & DP_DS_PORT_TYPE_MASK
;
4372 if (type
== DP_DS_PORT_TYPE_VGA
||
4373 type
== DP_DS_PORT_TYPE_NON_EDID
)
4374 return connector_status_unknown
;
4376 type
= intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] &
4377 DP_DWN_STRM_PORT_TYPE_MASK
;
4378 if (type
== DP_DWN_STRM_PORT_TYPE_ANALOG
||
4379 type
== DP_DWN_STRM_PORT_TYPE_OTHER
)
4380 return connector_status_unknown
;
4383 /* Anything else is out of spec, warn and ignore */
4384 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4385 return connector_status_disconnected
;
4388 static enum drm_connector_status
4389 edp_detect(struct intel_dp
*intel_dp
)
4391 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4392 enum drm_connector_status status
;
4394 status
= intel_panel_detect(dev
);
4395 if (status
== connector_status_unknown
)
4396 status
= connector_status_connected
;
4401 static bool ibx_digital_port_connected(struct drm_i915_private
*dev_priv
,
4402 struct intel_digital_port
*port
)
4406 switch (port
->port
) {
4410 bit
= SDE_PORTB_HOTPLUG
;
4413 bit
= SDE_PORTC_HOTPLUG
;
4416 bit
= SDE_PORTD_HOTPLUG
;
4419 MISSING_CASE(port
->port
);
4423 return I915_READ(SDEISR
) & bit
;
4426 static bool cpt_digital_port_connected(struct drm_i915_private
*dev_priv
,
4427 struct intel_digital_port
*port
)
4431 switch (port
->port
) {
4435 bit
= SDE_PORTB_HOTPLUG_CPT
;
4438 bit
= SDE_PORTC_HOTPLUG_CPT
;
4441 bit
= SDE_PORTD_HOTPLUG_CPT
;
4444 bit
= SDE_PORTE_HOTPLUG_SPT
;
4447 MISSING_CASE(port
->port
);
4451 return I915_READ(SDEISR
) & bit
;
4454 static bool g4x_digital_port_connected(struct drm_i915_private
*dev_priv
,
4455 struct intel_digital_port
*port
)
4459 switch (port
->port
) {
4461 bit
= PORTB_HOTPLUG_LIVE_STATUS_G4X
;
4464 bit
= PORTC_HOTPLUG_LIVE_STATUS_G4X
;
4467 bit
= PORTD_HOTPLUG_LIVE_STATUS_G4X
;
4470 MISSING_CASE(port
->port
);
4474 return I915_READ(PORT_HOTPLUG_STAT
) & bit
;
4477 static bool vlv_digital_port_connected(struct drm_i915_private
*dev_priv
,
4478 struct intel_digital_port
*port
)
4482 switch (port
->port
) {
4484 bit
= PORTB_HOTPLUG_LIVE_STATUS_VLV
;
4487 bit
= PORTC_HOTPLUG_LIVE_STATUS_VLV
;
4490 bit
= PORTD_HOTPLUG_LIVE_STATUS_VLV
;
4493 MISSING_CASE(port
->port
);
4497 return I915_READ(PORT_HOTPLUG_STAT
) & bit
;
4500 static bool bxt_digital_port_connected(struct drm_i915_private
*dev_priv
,
4501 struct intel_digital_port
*intel_dig_port
)
4503 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
4507 intel_hpd_pin_to_port(intel_encoder
->hpd_pin
, &port
);
4510 bit
= BXT_DE_PORT_HP_DDIA
;
4513 bit
= BXT_DE_PORT_HP_DDIB
;
4516 bit
= BXT_DE_PORT_HP_DDIC
;
4523 return I915_READ(GEN8_DE_PORT_ISR
) & bit
;
4527 * intel_digital_port_connected - is the specified port connected?
4528 * @dev_priv: i915 private structure
4529 * @port: the port to test
4531 * Return %true if @port is connected, %false otherwise.
4533 bool intel_digital_port_connected(struct drm_i915_private
*dev_priv
,
4534 struct intel_digital_port
*port
)
4536 if (HAS_PCH_IBX(dev_priv
))
4537 return ibx_digital_port_connected(dev_priv
, port
);
4538 if (HAS_PCH_SPLIT(dev_priv
))
4539 return cpt_digital_port_connected(dev_priv
, port
);
4540 else if (IS_BROXTON(dev_priv
))
4541 return bxt_digital_port_connected(dev_priv
, port
);
4542 else if (IS_VALLEYVIEW(dev_priv
))
4543 return vlv_digital_port_connected(dev_priv
, port
);
4545 return g4x_digital_port_connected(dev_priv
, port
);
4548 static struct edid
*
4549 intel_dp_get_edid(struct intel_dp
*intel_dp
)
4551 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4553 /* use cached edid if we have one */
4554 if (intel_connector
->edid
) {
4556 if (IS_ERR(intel_connector
->edid
))
4559 return drm_edid_duplicate(intel_connector
->edid
);
4561 return drm_get_edid(&intel_connector
->base
,
4562 &intel_dp
->aux
.ddc
);
4566 intel_dp_set_edid(struct intel_dp
*intel_dp
)
4568 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4571 edid
= intel_dp_get_edid(intel_dp
);
4572 intel_connector
->detect_edid
= edid
;
4574 if (intel_dp
->force_audio
!= HDMI_AUDIO_AUTO
)
4575 intel_dp
->has_audio
= intel_dp
->force_audio
== HDMI_AUDIO_ON
;
4577 intel_dp
->has_audio
= drm_detect_monitor_audio(edid
);
4581 intel_dp_unset_edid(struct intel_dp
*intel_dp
)
4583 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4585 kfree(intel_connector
->detect_edid
);
4586 intel_connector
->detect_edid
= NULL
;
4588 intel_dp
->has_audio
= false;
4591 static enum drm_connector_status
4592 intel_dp_detect(struct drm_connector
*connector
, bool force
)
4594 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
4595 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4596 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
4597 struct drm_device
*dev
= connector
->dev
;
4598 enum drm_connector_status status
;
4599 enum intel_display_power_domain power_domain
;
4603 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4604 connector
->base
.id
, connector
->name
);
4605 intel_dp_unset_edid(intel_dp
);
4607 if (intel_dp
->is_mst
) {
4608 /* MST devices are disconnected from a monitor POV */
4609 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4610 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4611 return connector_status_disconnected
;
4614 power_domain
= intel_display_port_aux_power_domain(intel_encoder
);
4615 intel_display_power_get(to_i915(dev
), power_domain
);
4617 /* Can't disconnect eDP, but you can close the lid... */
4618 if (is_edp(intel_dp
))
4619 status
= edp_detect(intel_dp
);
4620 else if (intel_digital_port_connected(to_i915(dev
),
4621 dp_to_dig_port(intel_dp
)))
4622 status
= intel_dp_detect_dpcd(intel_dp
);
4624 status
= connector_status_disconnected
;
4626 if (status
!= connector_status_connected
) {
4627 intel_dp
->compliance_test_active
= 0;
4628 intel_dp
->compliance_test_type
= 0;
4629 intel_dp
->compliance_test_data
= 0;
4634 intel_dp_probe_oui(intel_dp
);
4636 ret
= intel_dp_probe_mst(intel_dp
);
4638 /* if we are in MST mode then this connector
4639 won't appear connected or have anything with EDID on it */
4640 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4641 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4642 status
= connector_status_disconnected
;
4647 * Clearing NACK and defer counts to get their exact values
4648 * while reading EDID which are required by Compliance tests
4649 * 4.2.2.4 and 4.2.2.5
4651 intel_dp
->aux
.i2c_nack_count
= 0;
4652 intel_dp
->aux
.i2c_defer_count
= 0;
4654 intel_dp_set_edid(intel_dp
);
4656 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4657 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4658 status
= connector_status_connected
;
4660 /* Try to read the source of the interrupt */
4661 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4662 intel_dp_get_sink_irq(intel_dp
, &sink_irq_vector
)) {
4663 /* Clear interrupt source */
4664 drm_dp_dpcd_writeb(&intel_dp
->aux
,
4665 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4668 if (sink_irq_vector
& DP_AUTOMATED_TEST_REQUEST
)
4669 intel_dp_handle_test_request(intel_dp
);
4670 if (sink_irq_vector
& (DP_CP_IRQ
| DP_SINK_SPECIFIC_IRQ
))
4671 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4675 intel_display_power_put(to_i915(dev
), power_domain
);
4680 intel_dp_force(struct drm_connector
*connector
)
4682 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
4683 struct intel_encoder
*intel_encoder
= &dp_to_dig_port(intel_dp
)->base
;
4684 struct drm_i915_private
*dev_priv
= to_i915(intel_encoder
->base
.dev
);
4685 enum intel_display_power_domain power_domain
;
4687 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4688 connector
->base
.id
, connector
->name
);
4689 intel_dp_unset_edid(intel_dp
);
4691 if (connector
->status
!= connector_status_connected
)
4694 power_domain
= intel_display_port_aux_power_domain(intel_encoder
);
4695 intel_display_power_get(dev_priv
, power_domain
);
4697 intel_dp_set_edid(intel_dp
);
4699 intel_display_power_put(dev_priv
, power_domain
);
4701 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4702 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4705 static int intel_dp_get_modes(struct drm_connector
*connector
)
4707 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4710 edid
= intel_connector
->detect_edid
;
4712 int ret
= intel_connector_update_modes(connector
, edid
);
4717 /* if eDP has no EDID, fall back to fixed mode */
4718 if (is_edp(intel_attached_dp(connector
)) &&
4719 intel_connector
->panel
.fixed_mode
) {
4720 struct drm_display_mode
*mode
;
4722 mode
= drm_mode_duplicate(connector
->dev
,
4723 intel_connector
->panel
.fixed_mode
);
4725 drm_mode_probed_add(connector
, mode
);
4734 intel_dp_detect_audio(struct drm_connector
*connector
)
4736 bool has_audio
= false;
4739 edid
= to_intel_connector(connector
)->detect_edid
;
4741 has_audio
= drm_detect_monitor_audio(edid
);
4747 intel_dp_set_property(struct drm_connector
*connector
,
4748 struct drm_property
*property
,
4751 struct drm_i915_private
*dev_priv
= connector
->dev
->dev_private
;
4752 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4753 struct intel_encoder
*intel_encoder
= intel_attached_encoder(connector
);
4754 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
4757 ret
= drm_object_property_set_value(&connector
->base
, property
, val
);
4761 if (property
== dev_priv
->force_audio_property
) {
4765 if (i
== intel_dp
->force_audio
)
4768 intel_dp
->force_audio
= i
;
4770 if (i
== HDMI_AUDIO_AUTO
)
4771 has_audio
= intel_dp_detect_audio(connector
);
4773 has_audio
= (i
== HDMI_AUDIO_ON
);
4775 if (has_audio
== intel_dp
->has_audio
)
4778 intel_dp
->has_audio
= has_audio
;
4782 if (property
== dev_priv
->broadcast_rgb_property
) {
4783 bool old_auto
= intel_dp
->color_range_auto
;
4784 bool old_range
= intel_dp
->limited_color_range
;
4787 case INTEL_BROADCAST_RGB_AUTO
:
4788 intel_dp
->color_range_auto
= true;
4790 case INTEL_BROADCAST_RGB_FULL
:
4791 intel_dp
->color_range_auto
= false;
4792 intel_dp
->limited_color_range
= false;
4794 case INTEL_BROADCAST_RGB_LIMITED
:
4795 intel_dp
->color_range_auto
= false;
4796 intel_dp
->limited_color_range
= true;
4802 if (old_auto
== intel_dp
->color_range_auto
&&
4803 old_range
== intel_dp
->limited_color_range
)
4809 if (is_edp(intel_dp
) &&
4810 property
== connector
->dev
->mode_config
.scaling_mode_property
) {
4811 if (val
== DRM_MODE_SCALE_NONE
) {
4812 DRM_DEBUG_KMS("no scaling not supported\n");
4816 if (intel_connector
->panel
.fitting_mode
== val
) {
4817 /* the eDP scaling property is not changed */
4820 intel_connector
->panel
.fitting_mode
= val
;
4828 if (intel_encoder
->base
.crtc
)
4829 intel_crtc_restore_mode(intel_encoder
->base
.crtc
);
4835 intel_dp_connector_destroy(struct drm_connector
*connector
)
4837 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4839 kfree(intel_connector
->detect_edid
);
4841 if (!IS_ERR_OR_NULL(intel_connector
->edid
))
4842 kfree(intel_connector
->edid
);
4844 /* Can't call is_edp() since the encoder may have been destroyed
4846 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4847 intel_panel_fini(&intel_connector
->panel
);
4849 drm_connector_cleanup(connector
);
4853 void intel_dp_encoder_destroy(struct drm_encoder
*encoder
)
4855 struct intel_digital_port
*intel_dig_port
= enc_to_dig_port(encoder
);
4856 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
4858 intel_dp_aux_fini(intel_dp
);
4859 intel_dp_mst_encoder_cleanup(intel_dig_port
);
4860 if (is_edp(intel_dp
)) {
4861 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
4863 * vdd might still be enabled do to the delayed vdd off.
4864 * Make sure vdd is actually turned off here.
4867 edp_panel_vdd_off_sync(intel_dp
);
4868 pps_unlock(intel_dp
);
4870 if (intel_dp
->edp_notifier
.notifier_call
) {
4871 unregister_reboot_notifier(&intel_dp
->edp_notifier
);
4872 intel_dp
->edp_notifier
.notifier_call
= NULL
;
4875 drm_encoder_cleanup(encoder
);
4876 kfree(intel_dig_port
);
4879 static void intel_dp_encoder_suspend(struct intel_encoder
*intel_encoder
)
4881 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
4883 if (!is_edp(intel_dp
))
4887 * vdd might still be enabled do to the delayed vdd off.
4888 * Make sure vdd is actually turned off here.
4890 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
4892 edp_panel_vdd_off_sync(intel_dp
);
4893 pps_unlock(intel_dp
);
4896 static void intel_edp_panel_vdd_sanitize(struct intel_dp
*intel_dp
)
4898 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4899 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
4900 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4901 enum intel_display_power_domain power_domain
;
4903 lockdep_assert_held(&dev_priv
->pps_mutex
);
4905 if (!edp_have_panel_vdd(intel_dp
))
4909 * The VDD bit needs a power domain reference, so if the bit is
4910 * already enabled when we boot or resume, grab this reference and
4911 * schedule a vdd off, so we don't hold on to the reference
4914 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4915 power_domain
= intel_display_port_aux_power_domain(&intel_dig_port
->base
);
4916 intel_display_power_get(dev_priv
, power_domain
);
4918 edp_panel_vdd_schedule_off(intel_dp
);
4921 static void intel_dp_encoder_reset(struct drm_encoder
*encoder
)
4923 struct intel_dp
*intel_dp
;
4925 if (to_intel_encoder(encoder
)->type
!= INTEL_OUTPUT_EDP
)
4928 intel_dp
= enc_to_intel_dp(encoder
);
4933 * Read out the current power sequencer assignment,
4934 * in case the BIOS did something with it.
4936 if (IS_VALLEYVIEW(encoder
->dev
))
4937 vlv_initial_power_sequencer_setup(intel_dp
);
4939 intel_edp_panel_vdd_sanitize(intel_dp
);
4941 pps_unlock(intel_dp
);
4944 static const struct drm_connector_funcs intel_dp_connector_funcs
= {
4945 .dpms
= drm_atomic_helper_connector_dpms
,
4946 .detect
= intel_dp_detect
,
4947 .force
= intel_dp_force
,
4948 .fill_modes
= drm_helper_probe_single_connector_modes
,
4949 .set_property
= intel_dp_set_property
,
4950 .atomic_get_property
= intel_connector_atomic_get_property
,
4951 .destroy
= intel_dp_connector_destroy
,
4952 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
4953 .atomic_duplicate_state
= drm_atomic_helper_connector_duplicate_state
,
4956 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs
= {
4957 .get_modes
= intel_dp_get_modes
,
4958 .mode_valid
= intel_dp_mode_valid
,
4959 .best_encoder
= intel_best_encoder
,
4962 static const struct drm_encoder_funcs intel_dp_enc_funcs
= {
4963 .reset
= intel_dp_encoder_reset
,
4964 .destroy
= intel_dp_encoder_destroy
,
4968 intel_dp_hpd_pulse(struct intel_digital_port
*intel_dig_port
, bool long_hpd
)
4970 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
4971 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
4972 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
4973 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4974 enum intel_display_power_domain power_domain
;
4975 enum irqreturn ret
= IRQ_NONE
;
4977 if (intel_dig_port
->base
.type
!= INTEL_OUTPUT_EDP
)
4978 intel_dig_port
->base
.type
= INTEL_OUTPUT_DISPLAYPORT
;
4980 if (long_hpd
&& intel_dig_port
->base
.type
== INTEL_OUTPUT_EDP
) {
4982 * vdd off can generate a long pulse on eDP which
4983 * would require vdd on to handle it, and thus we
4984 * would end up in an endless cycle of
4985 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4987 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4988 port_name(intel_dig_port
->port
));
4992 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4993 port_name(intel_dig_port
->port
),
4994 long_hpd
? "long" : "short");
4996 power_domain
= intel_display_port_aux_power_domain(intel_encoder
);
4997 intel_display_power_get(dev_priv
, power_domain
);
5000 /* indicate that we need to restart link training */
5001 intel_dp
->train_set_valid
= false;
5003 if (!intel_digital_port_connected(dev_priv
, intel_dig_port
))
5006 if (!intel_dp_get_dpcd(intel_dp
)) {
5010 intel_dp_probe_oui(intel_dp
);
5012 if (!intel_dp_probe_mst(intel_dp
)) {
5013 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
5014 intel_dp_check_link_status(intel_dp
);
5015 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
5019 if (intel_dp
->is_mst
) {
5020 if (intel_dp_check_mst_status(intel_dp
) == -EINVAL
)
5024 if (!intel_dp
->is_mst
) {
5025 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
5026 intel_dp_check_link_status(intel_dp
);
5027 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
5035 /* if we were in MST mode, and device is not there get out of MST mode */
5036 if (intel_dp
->is_mst
) {
5037 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp
->is_mst
, intel_dp
->mst_mgr
.mst_state
);
5038 intel_dp
->is_mst
= false;
5039 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
5042 intel_display_power_put(dev_priv
, power_domain
);
5047 /* check the VBT to see whether the eDP is on another port */
5048 bool intel_dp_is_edp(struct drm_device
*dev
, enum port port
)
5050 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5051 union child_device_config
*p_child
;
5053 static const short port_mapping
[] = {
5054 [PORT_B
] = DVO_PORT_DPB
,
5055 [PORT_C
] = DVO_PORT_DPC
,
5056 [PORT_D
] = DVO_PORT_DPD
,
5057 [PORT_E
] = DVO_PORT_DPE
,
5061 * eDP not supported on g4x. so bail out early just
5062 * for a bit extra safety in case the VBT is bonkers.
5064 if (INTEL_INFO(dev
)->gen
< 5)
5070 if (!dev_priv
->vbt
.child_dev_num
)
5073 for (i
= 0; i
< dev_priv
->vbt
.child_dev_num
; i
++) {
5074 p_child
= dev_priv
->vbt
.child_dev
+ i
;
5076 if (p_child
->common
.dvo_port
== port_mapping
[port
] &&
5077 (p_child
->common
.device_type
& DEVICE_TYPE_eDP_BITS
) ==
5078 (DEVICE_TYPE_eDP
& DEVICE_TYPE_eDP_BITS
))
5085 intel_dp_add_properties(struct intel_dp
*intel_dp
, struct drm_connector
*connector
)
5087 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
5089 intel_attach_force_audio_property(connector
);
5090 intel_attach_broadcast_rgb_property(connector
);
5091 intel_dp
->color_range_auto
= true;
5093 if (is_edp(intel_dp
)) {
5094 drm_mode_create_scaling_mode_property(connector
->dev
);
5095 drm_object_attach_property(
5097 connector
->dev
->mode_config
.scaling_mode_property
,
5098 DRM_MODE_SCALE_ASPECT
);
5099 intel_connector
->panel
.fitting_mode
= DRM_MODE_SCALE_ASPECT
;
5103 static void intel_dp_init_panel_power_timestamps(struct intel_dp
*intel_dp
)
5105 intel_dp
->last_power_cycle
= jiffies
;
5106 intel_dp
->last_power_on
= jiffies
;
5107 intel_dp
->last_backlight_off
= jiffies
;
5111 intel_dp_init_panel_power_sequencer(struct drm_device
*dev
,
5112 struct intel_dp
*intel_dp
)
5114 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5115 struct edp_power_seq cur
, vbt
, spec
,
5116 *final
= &intel_dp
->pps_delays
;
5117 u32 pp_on
, pp_off
, pp_div
= 0, pp_ctl
= 0;
5118 i915_reg_t pp_ctrl_reg
, pp_on_reg
, pp_off_reg
, pp_div_reg
;
5120 lockdep_assert_held(&dev_priv
->pps_mutex
);
5122 /* already initialized? */
5123 if (final
->t11_t12
!= 0)
5126 if (IS_BROXTON(dev
)) {
5128 * TODO: BXT has 2 sets of PPS registers.
5129 * Correct Register for Broxton need to be identified
5130 * using VBT. hardcoding for now
5132 pp_ctrl_reg
= BXT_PP_CONTROL(0);
5133 pp_on_reg
= BXT_PP_ON_DELAYS(0);
5134 pp_off_reg
= BXT_PP_OFF_DELAYS(0);
5135 } else if (HAS_PCH_SPLIT(dev
)) {
5136 pp_ctrl_reg
= PCH_PP_CONTROL
;
5137 pp_on_reg
= PCH_PP_ON_DELAYS
;
5138 pp_off_reg
= PCH_PP_OFF_DELAYS
;
5139 pp_div_reg
= PCH_PP_DIVISOR
;
5141 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
5143 pp_ctrl_reg
= VLV_PIPE_PP_CONTROL(pipe
);
5144 pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
5145 pp_off_reg
= VLV_PIPE_PP_OFF_DELAYS(pipe
);
5146 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
5149 /* Workaround: Need to write PP_CONTROL with the unlock key as
5150 * the very first thing. */
5151 pp_ctl
= ironlake_get_pp_control(intel_dp
);
5153 pp_on
= I915_READ(pp_on_reg
);
5154 pp_off
= I915_READ(pp_off_reg
);
5155 if (!IS_BROXTON(dev
)) {
5156 I915_WRITE(pp_ctrl_reg
, pp_ctl
);
5157 pp_div
= I915_READ(pp_div_reg
);
5160 /* Pull timing values out of registers */
5161 cur
.t1_t3
= (pp_on
& PANEL_POWER_UP_DELAY_MASK
) >>
5162 PANEL_POWER_UP_DELAY_SHIFT
;
5164 cur
.t8
= (pp_on
& PANEL_LIGHT_ON_DELAY_MASK
) >>
5165 PANEL_LIGHT_ON_DELAY_SHIFT
;
5167 cur
.t9
= (pp_off
& PANEL_LIGHT_OFF_DELAY_MASK
) >>
5168 PANEL_LIGHT_OFF_DELAY_SHIFT
;
5170 cur
.t10
= (pp_off
& PANEL_POWER_DOWN_DELAY_MASK
) >>
5171 PANEL_POWER_DOWN_DELAY_SHIFT
;
5173 if (IS_BROXTON(dev
)) {
5174 u16 tmp
= (pp_ctl
& BXT_POWER_CYCLE_DELAY_MASK
) >>
5175 BXT_POWER_CYCLE_DELAY_SHIFT
;
5177 cur
.t11_t12
= (tmp
- 1) * 1000;
5181 cur
.t11_t12
= ((pp_div
& PANEL_POWER_CYCLE_DELAY_MASK
) >>
5182 PANEL_POWER_CYCLE_DELAY_SHIFT
) * 1000;
5185 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5186 cur
.t1_t3
, cur
.t8
, cur
.t9
, cur
.t10
, cur
.t11_t12
);
5188 vbt
= dev_priv
->vbt
.edp_pps
;
5190 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5191 * our hw here, which are all in 100usec. */
5192 spec
.t1_t3
= 210 * 10;
5193 spec
.t8
= 50 * 10; /* no limit for t8, use t7 instead */
5194 spec
.t9
= 50 * 10; /* no limit for t9, make it symmetric with t8 */
5195 spec
.t10
= 500 * 10;
5196 /* This one is special and actually in units of 100ms, but zero
5197 * based in the hw (so we need to add 100 ms). But the sw vbt
5198 * table multiplies it with 1000 to make it in units of 100usec,
5200 spec
.t11_t12
= (510 + 100) * 10;
5202 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5203 vbt
.t1_t3
, vbt
.t8
, vbt
.t9
, vbt
.t10
, vbt
.t11_t12
);
5205 /* Use the max of the register settings and vbt. If both are
5206 * unset, fall back to the spec limits. */
5207 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5209 max(cur.field, vbt.field))
5210 assign_final(t1_t3
);
5214 assign_final(t11_t12
);
5217 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5218 intel_dp
->panel_power_up_delay
= get_delay(t1_t3
);
5219 intel_dp
->backlight_on_delay
= get_delay(t8
);
5220 intel_dp
->backlight_off_delay
= get_delay(t9
);
5221 intel_dp
->panel_power_down_delay
= get_delay(t10
);
5222 intel_dp
->panel_power_cycle_delay
= get_delay(t11_t12
);
5225 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5226 intel_dp
->panel_power_up_delay
, intel_dp
->panel_power_down_delay
,
5227 intel_dp
->panel_power_cycle_delay
);
5229 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5230 intel_dp
->backlight_on_delay
, intel_dp
->backlight_off_delay
);
5234 intel_dp_init_panel_power_sequencer_registers(struct drm_device
*dev
,
5235 struct intel_dp
*intel_dp
)
5237 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5238 u32 pp_on
, pp_off
, pp_div
, port_sel
= 0;
5239 int div
= HAS_PCH_SPLIT(dev
) ? intel_pch_rawclk(dev
) : intel_hrawclk(dev
);
5240 i915_reg_t pp_on_reg
, pp_off_reg
, pp_div_reg
, pp_ctrl_reg
;
5241 enum port port
= dp_to_dig_port(intel_dp
)->port
;
5242 const struct edp_power_seq
*seq
= &intel_dp
->pps_delays
;
5244 lockdep_assert_held(&dev_priv
->pps_mutex
);
5246 if (IS_BROXTON(dev
)) {
5248 * TODO: BXT has 2 sets of PPS registers.
5249 * Correct Register for Broxton need to be identified
5250 * using VBT. hardcoding for now
5252 pp_ctrl_reg
= BXT_PP_CONTROL(0);
5253 pp_on_reg
= BXT_PP_ON_DELAYS(0);
5254 pp_off_reg
= BXT_PP_OFF_DELAYS(0);
5256 } else if (HAS_PCH_SPLIT(dev
)) {
5257 pp_on_reg
= PCH_PP_ON_DELAYS
;
5258 pp_off_reg
= PCH_PP_OFF_DELAYS
;
5259 pp_div_reg
= PCH_PP_DIVISOR
;
5261 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
5263 pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
5264 pp_off_reg
= VLV_PIPE_PP_OFF_DELAYS(pipe
);
5265 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
5269 * And finally store the new values in the power sequencer. The
5270 * backlight delays are set to 1 because we do manual waits on them. For
5271 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5272 * we'll end up waiting for the backlight off delay twice: once when we
5273 * do the manual sleep, and once when we disable the panel and wait for
5274 * the PP_STATUS bit to become zero.
5276 pp_on
= (seq
->t1_t3
<< PANEL_POWER_UP_DELAY_SHIFT
) |
5277 (1 << PANEL_LIGHT_ON_DELAY_SHIFT
);
5278 pp_off
= (1 << PANEL_LIGHT_OFF_DELAY_SHIFT
) |
5279 (seq
->t10
<< PANEL_POWER_DOWN_DELAY_SHIFT
);
5280 /* Compute the divisor for the pp clock, simply match the Bspec
5282 if (IS_BROXTON(dev
)) {
5283 pp_div
= I915_READ(pp_ctrl_reg
);
5284 pp_div
&= ~BXT_POWER_CYCLE_DELAY_MASK
;
5285 pp_div
|= (DIV_ROUND_UP((seq
->t11_t12
+ 1), 1000)
5286 << BXT_POWER_CYCLE_DELAY_SHIFT
);
5288 pp_div
= ((100 * div
)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT
;
5289 pp_div
|= (DIV_ROUND_UP(seq
->t11_t12
, 1000)
5290 << PANEL_POWER_CYCLE_DELAY_SHIFT
);
5293 /* Haswell doesn't have any port selection bits for the panel
5294 * power sequencer any more. */
5295 if (IS_VALLEYVIEW(dev
)) {
5296 port_sel
= PANEL_PORT_SELECT_VLV(port
);
5297 } else if (HAS_PCH_IBX(dev
) || HAS_PCH_CPT(dev
)) {
5299 port_sel
= PANEL_PORT_SELECT_DPA
;
5301 port_sel
= PANEL_PORT_SELECT_DPD
;
5306 I915_WRITE(pp_on_reg
, pp_on
);
5307 I915_WRITE(pp_off_reg
, pp_off
);
5308 if (IS_BROXTON(dev
))
5309 I915_WRITE(pp_ctrl_reg
, pp_div
);
5311 I915_WRITE(pp_div_reg
, pp_div
);
5313 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5314 I915_READ(pp_on_reg
),
5315 I915_READ(pp_off_reg
),
5317 (I915_READ(pp_ctrl_reg
) & BXT_POWER_CYCLE_DELAY_MASK
) :
5318 I915_READ(pp_div_reg
));
5322 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5324 * @refresh_rate: RR to be programmed
5326 * This function gets called when refresh rate (RR) has to be changed from
5327 * one frequency to another. Switches can be between high and low RR
5328 * supported by the panel or to any other RR based on media playback (in
5329 * this case, RR value needs to be passed from user space).
5331 * The caller of this function needs to take a lock on dev_priv->drrs.
5333 static void intel_dp_set_drrs_state(struct drm_device
*dev
, int refresh_rate
)
5335 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5336 struct intel_encoder
*encoder
;
5337 struct intel_digital_port
*dig_port
= NULL
;
5338 struct intel_dp
*intel_dp
= dev_priv
->drrs
.dp
;
5339 struct intel_crtc_state
*config
= NULL
;
5340 struct intel_crtc
*intel_crtc
= NULL
;
5341 enum drrs_refresh_rate_type index
= DRRS_HIGH_RR
;
5343 if (refresh_rate
<= 0) {
5344 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5348 if (intel_dp
== NULL
) {
5349 DRM_DEBUG_KMS("DRRS not supported.\n");
5354 * FIXME: This needs proper synchronization with psr state for some
5355 * platforms that cannot have PSR and DRRS enabled at the same time.
5358 dig_port
= dp_to_dig_port(intel_dp
);
5359 encoder
= &dig_port
->base
;
5360 intel_crtc
= to_intel_crtc(encoder
->base
.crtc
);
5363 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5367 config
= intel_crtc
->config
;
5369 if (dev_priv
->drrs
.type
< SEAMLESS_DRRS_SUPPORT
) {
5370 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5374 if (intel_dp
->attached_connector
->panel
.downclock_mode
->vrefresh
==
5376 index
= DRRS_LOW_RR
;
5378 if (index
== dev_priv
->drrs
.refresh_rate_type
) {
5380 "DRRS requested for previously set RR...ignoring\n");
5384 if (!intel_crtc
->active
) {
5385 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5389 if (INTEL_INFO(dev
)->gen
>= 8 && !IS_CHERRYVIEW(dev
)) {
5392 intel_dp_set_m_n(intel_crtc
, M1_N1
);
5395 intel_dp_set_m_n(intel_crtc
, M2_N2
);
5399 DRM_ERROR("Unsupported refreshrate type\n");
5401 } else if (INTEL_INFO(dev
)->gen
> 6) {
5402 i915_reg_t reg
= PIPECONF(intel_crtc
->config
->cpu_transcoder
);
5405 val
= I915_READ(reg
);
5406 if (index
> DRRS_HIGH_RR
) {
5407 if (IS_VALLEYVIEW(dev
))
5408 val
|= PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
5410 val
|= PIPECONF_EDP_RR_MODE_SWITCH
;
5412 if (IS_VALLEYVIEW(dev
))
5413 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
5415 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH
;
5417 I915_WRITE(reg
, val
);
5420 dev_priv
->drrs
.refresh_rate_type
= index
;
5422 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate
);
5426 * intel_edp_drrs_enable - init drrs struct if supported
5427 * @intel_dp: DP struct
5429 * Initializes frontbuffer_bits and drrs.dp
5431 void intel_edp_drrs_enable(struct intel_dp
*intel_dp
)
5433 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
5434 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5435 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5436 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
5437 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5439 if (!intel_crtc
->config
->has_drrs
) {
5440 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5444 mutex_lock(&dev_priv
->drrs
.mutex
);
5445 if (WARN_ON(dev_priv
->drrs
.dp
)) {
5446 DRM_ERROR("DRRS already enabled\n");
5450 dev_priv
->drrs
.busy_frontbuffer_bits
= 0;
5452 dev_priv
->drrs
.dp
= intel_dp
;
5455 mutex_unlock(&dev_priv
->drrs
.mutex
);
5459 * intel_edp_drrs_disable - Disable DRRS
5460 * @intel_dp: DP struct
5463 void intel_edp_drrs_disable(struct intel_dp
*intel_dp
)
5465 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
5466 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5467 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5468 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
5469 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5471 if (!intel_crtc
->config
->has_drrs
)
5474 mutex_lock(&dev_priv
->drrs
.mutex
);
5475 if (!dev_priv
->drrs
.dp
) {
5476 mutex_unlock(&dev_priv
->drrs
.mutex
);
5480 if (dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5481 intel_dp_set_drrs_state(dev_priv
->dev
,
5482 intel_dp
->attached_connector
->panel
.
5483 fixed_mode
->vrefresh
);
5485 dev_priv
->drrs
.dp
= NULL
;
5486 mutex_unlock(&dev_priv
->drrs
.mutex
);
5488 cancel_delayed_work_sync(&dev_priv
->drrs
.work
);
5491 static void intel_edp_drrs_downclock_work(struct work_struct
*work
)
5493 struct drm_i915_private
*dev_priv
=
5494 container_of(work
, typeof(*dev_priv
), drrs
.work
.work
);
5495 struct intel_dp
*intel_dp
;
5497 mutex_lock(&dev_priv
->drrs
.mutex
);
5499 intel_dp
= dev_priv
->drrs
.dp
;
5505 * The delayed work can race with an invalidate hence we need to
5509 if (dev_priv
->drrs
.busy_frontbuffer_bits
)
5512 if (dev_priv
->drrs
.refresh_rate_type
!= DRRS_LOW_RR
)
5513 intel_dp_set_drrs_state(dev_priv
->dev
,
5514 intel_dp
->attached_connector
->panel
.
5515 downclock_mode
->vrefresh
);
5518 mutex_unlock(&dev_priv
->drrs
.mutex
);
5522 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5524 * @frontbuffer_bits: frontbuffer plane tracking bits
5526 * This function gets called everytime rendering on the given planes start.
5527 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5529 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5531 void intel_edp_drrs_invalidate(struct drm_device
*dev
,
5532 unsigned frontbuffer_bits
)
5534 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5535 struct drm_crtc
*crtc
;
5538 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
5541 cancel_delayed_work(&dev_priv
->drrs
.work
);
5543 mutex_lock(&dev_priv
->drrs
.mutex
);
5544 if (!dev_priv
->drrs
.dp
) {
5545 mutex_unlock(&dev_priv
->drrs
.mutex
);
5549 crtc
= dp_to_dig_port(dev_priv
->drrs
.dp
)->base
.base
.crtc
;
5550 pipe
= to_intel_crtc(crtc
)->pipe
;
5552 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
5553 dev_priv
->drrs
.busy_frontbuffer_bits
|= frontbuffer_bits
;
5555 /* invalidate means busy screen hence upclock */
5556 if (frontbuffer_bits
&& dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5557 intel_dp_set_drrs_state(dev_priv
->dev
,
5558 dev_priv
->drrs
.dp
->attached_connector
->panel
.
5559 fixed_mode
->vrefresh
);
5561 mutex_unlock(&dev_priv
->drrs
.mutex
);
5565 * intel_edp_drrs_flush - Restart Idleness DRRS
5567 * @frontbuffer_bits: frontbuffer plane tracking bits
5569 * This function gets called every time rendering on the given planes has
5570 * completed or flip on a crtc is completed. So DRRS should be upclocked
5571 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5572 * if no other planes are dirty.
5574 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5576 void intel_edp_drrs_flush(struct drm_device
*dev
,
5577 unsigned frontbuffer_bits
)
5579 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5580 struct drm_crtc
*crtc
;
5583 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
5586 cancel_delayed_work(&dev_priv
->drrs
.work
);
5588 mutex_lock(&dev_priv
->drrs
.mutex
);
5589 if (!dev_priv
->drrs
.dp
) {
5590 mutex_unlock(&dev_priv
->drrs
.mutex
);
5594 crtc
= dp_to_dig_port(dev_priv
->drrs
.dp
)->base
.base
.crtc
;
5595 pipe
= to_intel_crtc(crtc
)->pipe
;
5597 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
5598 dev_priv
->drrs
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
5600 /* flush means busy screen hence upclock */
5601 if (frontbuffer_bits
&& dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5602 intel_dp_set_drrs_state(dev_priv
->dev
,
5603 dev_priv
->drrs
.dp
->attached_connector
->panel
.
5604 fixed_mode
->vrefresh
);
5607 * flush also means no more activity hence schedule downclock, if all
5608 * other fbs are quiescent too
5610 if (!dev_priv
->drrs
.busy_frontbuffer_bits
)
5611 schedule_delayed_work(&dev_priv
->drrs
.work
,
5612 msecs_to_jiffies(1000));
5613 mutex_unlock(&dev_priv
->drrs
.mutex
);
5617 * DOC: Display Refresh Rate Switching (DRRS)
5619 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5620 * which enables swtching between low and high refresh rates,
5621 * dynamically, based on the usage scenario. This feature is applicable
5622 * for internal panels.
5624 * Indication that the panel supports DRRS is given by the panel EDID, which
5625 * would list multiple refresh rates for one resolution.
5627 * DRRS is of 2 types - static and seamless.
5628 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5629 * (may appear as a blink on screen) and is used in dock-undock scenario.
5630 * Seamless DRRS involves changing RR without any visual effect to the user
5631 * and can be used during normal system usage. This is done by programming
5632 * certain registers.
5634 * Support for static/seamless DRRS may be indicated in the VBT based on
5635 * inputs from the panel spec.
5637 * DRRS saves power by switching to low RR based on usage scenarios.
5640 * The implementation is based on frontbuffer tracking implementation.
5641 * When there is a disturbance on the screen triggered by user activity or a
5642 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5643 * When there is no movement on screen, after a timeout of 1 second, a switch
5644 * to low RR is made.
5645 * For integration with frontbuffer tracking code,
5646 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5648 * DRRS can be further extended to support other internal panels and also
5649 * the scenario of video playback wherein RR is set based on the rate
5650 * requested by userspace.
5654 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5655 * @intel_connector: eDP connector
5656 * @fixed_mode: preferred mode of panel
5658 * This function is called only once at driver load to initialize basic
5662 * Downclock mode if panel supports it, else return NULL.
5663 * DRRS support is determined by the presence of downclock mode (apart
5664 * from VBT setting).
5666 static struct drm_display_mode
*
5667 intel_dp_drrs_init(struct intel_connector
*intel_connector
,
5668 struct drm_display_mode
*fixed_mode
)
5670 struct drm_connector
*connector
= &intel_connector
->base
;
5671 struct drm_device
*dev
= connector
->dev
;
5672 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5673 struct drm_display_mode
*downclock_mode
= NULL
;
5675 INIT_DELAYED_WORK(&dev_priv
->drrs
.work
, intel_edp_drrs_downclock_work
);
5676 mutex_init(&dev_priv
->drrs
.mutex
);
5678 if (INTEL_INFO(dev
)->gen
<= 6) {
5679 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5683 if (dev_priv
->vbt
.drrs_type
!= SEAMLESS_DRRS_SUPPORT
) {
5684 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5688 downclock_mode
= intel_find_panel_downclock
5689 (dev
, fixed_mode
, connector
);
5691 if (!downclock_mode
) {
5692 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5696 dev_priv
->drrs
.type
= dev_priv
->vbt
.drrs_type
;
5698 dev_priv
->drrs
.refresh_rate_type
= DRRS_HIGH_RR
;
5699 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5700 return downclock_mode
;
5703 static bool intel_edp_init_connector(struct intel_dp
*intel_dp
,
5704 struct intel_connector
*intel_connector
)
5706 struct drm_connector
*connector
= &intel_connector
->base
;
5707 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
5708 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
5709 struct drm_device
*dev
= intel_encoder
->base
.dev
;
5710 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5711 struct drm_display_mode
*fixed_mode
= NULL
;
5712 struct drm_display_mode
*downclock_mode
= NULL
;
5714 struct drm_display_mode
*scan
;
5716 enum pipe pipe
= INVALID_PIPE
;
5718 if (!is_edp(intel_dp
))
5722 intel_edp_panel_vdd_sanitize(intel_dp
);
5723 pps_unlock(intel_dp
);
5725 /* Cache DPCD and EDID for edp. */
5726 has_dpcd
= intel_dp_get_dpcd(intel_dp
);
5729 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11)
5730 dev_priv
->no_aux_handshake
=
5731 intel_dp
->dpcd
[DP_MAX_DOWNSPREAD
] &
5732 DP_NO_AUX_HANDSHAKE_LINK_TRAINING
;
5734 /* if this fails, presume the device is a ghost */
5735 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5739 /* We now know it's not a ghost, init power sequence regs. */
5741 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
5742 pps_unlock(intel_dp
);
5744 mutex_lock(&dev
->mode_config
.mutex
);
5745 edid
= drm_get_edid(connector
, &intel_dp
->aux
.ddc
);
5747 if (drm_add_edid_modes(connector
, edid
)) {
5748 drm_mode_connector_update_edid_property(connector
,
5750 drm_edid_to_eld(connector
, edid
);
5753 edid
= ERR_PTR(-EINVAL
);
5756 edid
= ERR_PTR(-ENOENT
);
5758 intel_connector
->edid
= edid
;
5760 /* prefer fixed mode from EDID if available */
5761 list_for_each_entry(scan
, &connector
->probed_modes
, head
) {
5762 if ((scan
->type
& DRM_MODE_TYPE_PREFERRED
)) {
5763 fixed_mode
= drm_mode_duplicate(dev
, scan
);
5764 downclock_mode
= intel_dp_drrs_init(
5765 intel_connector
, fixed_mode
);
5770 /* fallback to VBT if available for eDP */
5771 if (!fixed_mode
&& dev_priv
->vbt
.lfp_lvds_vbt_mode
) {
5772 fixed_mode
= drm_mode_duplicate(dev
,
5773 dev_priv
->vbt
.lfp_lvds_vbt_mode
);
5775 fixed_mode
->type
|= DRM_MODE_TYPE_PREFERRED
;
5777 mutex_unlock(&dev
->mode_config
.mutex
);
5779 if (IS_VALLEYVIEW(dev
)) {
5780 intel_dp
->edp_notifier
.notifier_call
= edp_notify_handler
;
5781 register_reboot_notifier(&intel_dp
->edp_notifier
);
5784 * Figure out the current pipe for the initial backlight setup.
5785 * If the current pipe isn't valid, try the PPS pipe, and if that
5786 * fails just assume pipe A.
5788 if (IS_CHERRYVIEW(dev
))
5789 pipe
= DP_PORT_TO_PIPE_CHV(intel_dp
->DP
);
5791 pipe
= PORT_TO_PIPE(intel_dp
->DP
);
5793 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
5794 pipe
= intel_dp
->pps_pipe
;
5796 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
5799 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5803 intel_panel_init(&intel_connector
->panel
, fixed_mode
, downclock_mode
);
5804 intel_connector
->panel
.backlight
.power
= intel_edp_backlight_power
;
5805 intel_panel_setup_backlight(connector
, pipe
);
5811 intel_dp_init_connector(struct intel_digital_port
*intel_dig_port
,
5812 struct intel_connector
*intel_connector
)
5814 struct drm_connector
*connector
= &intel_connector
->base
;
5815 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
5816 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
5817 struct drm_device
*dev
= intel_encoder
->base
.dev
;
5818 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5819 enum port port
= intel_dig_port
->port
;
5822 intel_dp
->pps_pipe
= INVALID_PIPE
;
5824 /* intel_dp vfuncs */
5825 if (INTEL_INFO(dev
)->gen
>= 9)
5826 intel_dp
->get_aux_clock_divider
= skl_get_aux_clock_divider
;
5827 else if (IS_VALLEYVIEW(dev
))
5828 intel_dp
->get_aux_clock_divider
= vlv_get_aux_clock_divider
;
5829 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
5830 intel_dp
->get_aux_clock_divider
= hsw_get_aux_clock_divider
;
5831 else if (HAS_PCH_SPLIT(dev
))
5832 intel_dp
->get_aux_clock_divider
= ilk_get_aux_clock_divider
;
5834 intel_dp
->get_aux_clock_divider
= i9xx_get_aux_clock_divider
;
5836 if (INTEL_INFO(dev
)->gen
>= 9)
5837 intel_dp
->get_aux_send_ctl
= skl_get_aux_send_ctl
;
5839 intel_dp
->get_aux_send_ctl
= i9xx_get_aux_send_ctl
;
5842 intel_dp
->prepare_link_retrain
= intel_ddi_prepare_link_retrain
;
5844 /* Preserve the current hw state. */
5845 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
);
5846 intel_dp
->attached_connector
= intel_connector
;
5848 if (intel_dp_is_edp(dev
, port
))
5849 type
= DRM_MODE_CONNECTOR_eDP
;
5851 type
= DRM_MODE_CONNECTOR_DisplayPort
;
5854 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5855 * for DP the encoder type can be set by the caller to
5856 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5858 if (type
== DRM_MODE_CONNECTOR_eDP
)
5859 intel_encoder
->type
= INTEL_OUTPUT_EDP
;
5861 /* eDP only on port B and/or C on vlv/chv */
5862 if (WARN_ON(IS_VALLEYVIEW(dev
) && is_edp(intel_dp
) &&
5863 port
!= PORT_B
&& port
!= PORT_C
))
5866 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5867 type
== DRM_MODE_CONNECTOR_eDP
? "eDP" : "DP",
5870 drm_connector_init(dev
, connector
, &intel_dp_connector_funcs
, type
);
5871 drm_connector_helper_add(connector
, &intel_dp_connector_helper_funcs
);
5873 connector
->interlace_allowed
= true;
5874 connector
->doublescan_allowed
= 0;
5876 INIT_DELAYED_WORK(&intel_dp
->panel_vdd_work
,
5877 edp_panel_vdd_work
);
5879 intel_connector_attach_encoder(intel_connector
, intel_encoder
);
5880 drm_connector_register(connector
);
5883 intel_connector
->get_hw_state
= intel_ddi_connector_get_hw_state
;
5885 intel_connector
->get_hw_state
= intel_connector_get_hw_state
;
5886 intel_connector
->unregister
= intel_dp_connector_unregister
;
5888 /* Set up the hotplug pin. */
5891 intel_encoder
->hpd_pin
= HPD_PORT_A
;
5894 intel_encoder
->hpd_pin
= HPD_PORT_B
;
5895 if (IS_BXT_REVID(dev
, 0, BXT_REVID_A1
))
5896 intel_encoder
->hpd_pin
= HPD_PORT_A
;
5899 intel_encoder
->hpd_pin
= HPD_PORT_C
;
5902 intel_encoder
->hpd_pin
= HPD_PORT_D
;
5905 intel_encoder
->hpd_pin
= HPD_PORT_E
;
5911 if (is_edp(intel_dp
)) {
5913 intel_dp_init_panel_power_timestamps(intel_dp
);
5914 if (IS_VALLEYVIEW(dev
))
5915 vlv_initial_power_sequencer_setup(intel_dp
);
5917 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
5918 pps_unlock(intel_dp
);
5921 ret
= intel_dp_aux_init(intel_dp
, intel_connector
);
5925 /* init MST on ports that can support it */
5926 if (HAS_DP_MST(dev
) &&
5927 (port
== PORT_B
|| port
== PORT_C
|| port
== PORT_D
))
5928 intel_dp_mst_encoder_init(intel_dig_port
,
5929 intel_connector
->base
.base
.id
);
5931 if (!intel_edp_init_connector(intel_dp
, intel_connector
)) {
5932 intel_dp_aux_fini(intel_dp
);
5933 intel_dp_mst_encoder_cleanup(intel_dig_port
);
5937 intel_dp_add_properties(intel_dp
, connector
);
5939 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5940 * 0xd. Failure to do so will result in spurious interrupts being
5941 * generated on the port when a cable is not attached.
5943 if (IS_G4X(dev
) && !IS_GM45(dev
)) {
5944 u32 temp
= I915_READ(PEG_BAND_GAP_DATA
);
5945 I915_WRITE(PEG_BAND_GAP_DATA
, (temp
& ~0xf) | 0xd);
5948 i915_debugfs_connector_add(connector
);
5953 if (is_edp(intel_dp
)) {
5954 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
5956 * vdd might still be enabled do to the delayed vdd off.
5957 * Make sure vdd is actually turned off here.
5960 edp_panel_vdd_off_sync(intel_dp
);
5961 pps_unlock(intel_dp
);
5963 drm_connector_unregister(connector
);
5964 drm_connector_cleanup(connector
);
5970 intel_dp_init(struct drm_device
*dev
,
5971 i915_reg_t output_reg
, enum port port
)
5973 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5974 struct intel_digital_port
*intel_dig_port
;
5975 struct intel_encoder
*intel_encoder
;
5976 struct drm_encoder
*encoder
;
5977 struct intel_connector
*intel_connector
;
5979 intel_dig_port
= kzalloc(sizeof(*intel_dig_port
), GFP_KERNEL
);
5980 if (!intel_dig_port
)
5983 intel_connector
= intel_connector_alloc();
5984 if (!intel_connector
)
5985 goto err_connector_alloc
;
5987 intel_encoder
= &intel_dig_port
->base
;
5988 encoder
= &intel_encoder
->base
;
5990 drm_encoder_init(dev
, &intel_encoder
->base
, &intel_dp_enc_funcs
,
5991 DRM_MODE_ENCODER_TMDS
);
5993 intel_encoder
->compute_config
= intel_dp_compute_config
;
5994 intel_encoder
->disable
= intel_disable_dp
;
5995 intel_encoder
->get_hw_state
= intel_dp_get_hw_state
;
5996 intel_encoder
->get_config
= intel_dp_get_config
;
5997 intel_encoder
->suspend
= intel_dp_encoder_suspend
;
5998 if (IS_CHERRYVIEW(dev
)) {
5999 intel_encoder
->pre_pll_enable
= chv_dp_pre_pll_enable
;
6000 intel_encoder
->pre_enable
= chv_pre_enable_dp
;
6001 intel_encoder
->enable
= vlv_enable_dp
;
6002 intel_encoder
->post_disable
= chv_post_disable_dp
;
6003 intel_encoder
->post_pll_disable
= chv_dp_post_pll_disable
;
6004 } else if (IS_VALLEYVIEW(dev
)) {
6005 intel_encoder
->pre_pll_enable
= vlv_dp_pre_pll_enable
;
6006 intel_encoder
->pre_enable
= vlv_pre_enable_dp
;
6007 intel_encoder
->enable
= vlv_enable_dp
;
6008 intel_encoder
->post_disable
= vlv_post_disable_dp
;
6010 intel_encoder
->pre_enable
= g4x_pre_enable_dp
;
6011 intel_encoder
->enable
= g4x_enable_dp
;
6012 if (INTEL_INFO(dev
)->gen
>= 5)
6013 intel_encoder
->post_disable
= ilk_post_disable_dp
;
6016 intel_dig_port
->port
= port
;
6017 intel_dig_port
->dp
.output_reg
= output_reg
;
6019 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
6020 if (IS_CHERRYVIEW(dev
)) {
6022 intel_encoder
->crtc_mask
= 1 << 2;
6024 intel_encoder
->crtc_mask
= (1 << 0) | (1 << 1);
6026 intel_encoder
->crtc_mask
= (1 << 0) | (1 << 1) | (1 << 2);
6028 intel_encoder
->cloneable
= 0;
6030 intel_dig_port
->hpd_pulse
= intel_dp_hpd_pulse
;
6031 dev_priv
->hotplug
.irq_port
[port
] = intel_dig_port
;
6033 if (!intel_dp_init_connector(intel_dig_port
, intel_connector
))
6034 goto err_init_connector
;
6039 drm_encoder_cleanup(encoder
);
6040 kfree(intel_connector
);
6041 err_connector_alloc
:
6042 kfree(intel_dig_port
);
6047 void intel_dp_mst_suspend(struct drm_device
*dev
)
6049 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6053 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
6054 struct intel_digital_port
*intel_dig_port
= dev_priv
->hotplug
.irq_port
[i
];
6055 if (!intel_dig_port
)
6058 if (intel_dig_port
->base
.type
== INTEL_OUTPUT_DISPLAYPORT
) {
6059 if (!intel_dig_port
->dp
.can_mst
)
6061 if (intel_dig_port
->dp
.is_mst
)
6062 drm_dp_mst_topology_mgr_suspend(&intel_dig_port
->dp
.mst_mgr
);
6067 void intel_dp_mst_resume(struct drm_device
*dev
)
6069 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6072 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
6073 struct intel_digital_port
*intel_dig_port
= dev_priv
->hotplug
.irq_port
[i
];
6074 if (!intel_dig_port
)
6076 if (intel_dig_port
->base
.type
== INTEL_OUTPUT_DISPLAYPORT
) {
6079 if (!intel_dig_port
->dp
.can_mst
)
6082 ret
= drm_dp_mst_topology_mgr_resume(&intel_dig_port
->dp
.mst_mgr
);
6084 intel_dp_check_mst_status(&intel_dig_port
->dp
);