2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
55 static const struct dp_link_dpll gen4_dpll
[] = {
57 { .p1
= 2, .p2
= 10, .n
= 2, .m1
= 23, .m2
= 8 } },
59 { .p1
= 1, .p2
= 10, .n
= 1, .m1
= 14, .m2
= 2 } }
62 static const struct dp_link_dpll pch_dpll
[] = {
64 { .p1
= 2, .p2
= 10, .n
= 1, .m1
= 12, .m2
= 9 } },
66 { .p1
= 1, .p2
= 10, .n
= 2, .m1
= 14, .m2
= 8 } }
69 static const struct dp_link_dpll vlv_dpll
[] = {
71 { .p1
= 3, .p2
= 2, .n
= 5, .m1
= 3, .m2
= 81 } },
73 { .p1
= 2, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 27 } }
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
80 static const struct dp_link_dpll chv_dpll
[] = {
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1
= 4, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1
= 4, .p2
= 1, .n
= 1, .m1
= 2, .m2
= 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1
= 2, .p2
= 1, .n
= 1, .m1
= 2, .m2
= 0x6c00000 } }
94 static const int bxt_rates
[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates
[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int chv_rates
[] = { 162000, 202500, 210000, 216000,
99 243000, 270000, 324000, 405000,
100 420000, 432000, 540000 };
101 static const int default_rates
[] = { 162000, 270000, 540000 };
104 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
105 * @intel_dp: DP struct
107 * If a CPU or PCH DP output is attached to an eDP panel, this function
108 * will return true, and false otherwise.
110 static bool is_edp(struct intel_dp
*intel_dp
)
112 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
114 return intel_dig_port
->base
.type
== INTEL_OUTPUT_EDP
;
117 static struct drm_device
*intel_dp_to_dev(struct intel_dp
*intel_dp
)
119 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
121 return intel_dig_port
->base
.base
.dev
;
124 static struct intel_dp
*intel_attached_dp(struct drm_connector
*connector
)
126 return enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
129 static void intel_dp_link_down(struct intel_dp
*intel_dp
);
130 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
);
131 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
);
132 static void vlv_init_panel_power_sequencer(struct intel_dp
*intel_dp
);
133 static void vlv_steal_power_sequencer(struct drm_device
*dev
,
137 intel_dp_max_link_bw(struct intel_dp
*intel_dp
)
139 int max_link_bw
= intel_dp
->dpcd
[DP_MAX_LINK_RATE
];
141 switch (max_link_bw
) {
142 case DP_LINK_BW_1_62
:
147 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
149 max_link_bw
= DP_LINK_BW_1_62
;
155 static u8
intel_dp_max_lane_count(struct intel_dp
*intel_dp
)
157 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
158 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
159 u8 source_max
, sink_max
;
162 if (HAS_DDI(dev
) && intel_dig_port
->port
== PORT_A
&&
163 (intel_dig_port
->saved_port_bits
& DDI_A_4_LANES
) == 0)
166 sink_max
= drm_dp_max_lane_count(intel_dp
->dpcd
);
168 return min(source_max
, sink_max
);
172 * The units on the numbers in the next two are... bizarre. Examples will
173 * make it clearer; this one parallels an example in the eDP spec.
175 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
177 * 270000 * 1 * 8 / 10 == 216000
179 * The actual data capacity of that configuration is 2.16Gbit/s, so the
180 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
181 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
182 * 119000. At 18bpp that's 2142000 kilobits per second.
184 * Thus the strange-looking division by 10 in intel_dp_link_required, to
185 * get the result in decakilobits instead of kilobits.
189 intel_dp_link_required(int pixel_clock
, int bpp
)
191 return (pixel_clock
* bpp
+ 9) / 10;
195 intel_dp_max_data_rate(int max_link_clock
, int max_lanes
)
197 return (max_link_clock
* max_lanes
* 8) / 10;
200 static enum drm_mode_status
201 intel_dp_mode_valid(struct drm_connector
*connector
,
202 struct drm_display_mode
*mode
)
204 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
205 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
206 struct drm_display_mode
*fixed_mode
= intel_connector
->panel
.fixed_mode
;
207 int target_clock
= mode
->clock
;
208 int max_rate
, mode_rate
, max_lanes
, max_link_clock
;
210 if (is_edp(intel_dp
) && fixed_mode
) {
211 if (mode
->hdisplay
> fixed_mode
->hdisplay
)
214 if (mode
->vdisplay
> fixed_mode
->vdisplay
)
217 target_clock
= fixed_mode
->clock
;
220 max_link_clock
= intel_dp_max_link_rate(intel_dp
);
221 max_lanes
= intel_dp_max_lane_count(intel_dp
);
223 max_rate
= intel_dp_max_data_rate(max_link_clock
, max_lanes
);
224 mode_rate
= intel_dp_link_required(target_clock
, 18);
226 if (mode_rate
> max_rate
)
227 return MODE_CLOCK_HIGH
;
229 if (mode
->clock
< 10000)
230 return MODE_CLOCK_LOW
;
232 if (mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
233 return MODE_H_ILLEGAL
;
238 uint32_t intel_dp_pack_aux(const uint8_t *src
, int src_bytes
)
245 for (i
= 0; i
< src_bytes
; i
++)
246 v
|= ((uint32_t) src
[i
]) << ((3-i
) * 8);
250 static void intel_dp_unpack_aux(uint32_t src
, uint8_t *dst
, int dst_bytes
)
255 for (i
= 0; i
< dst_bytes
; i
++)
256 dst
[i
] = src
>> ((3-i
) * 8);
259 /* hrawclock is 1/4 the FSB frequency */
261 intel_hrawclk(struct drm_device
*dev
)
263 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
266 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
267 if (IS_VALLEYVIEW(dev
))
270 clkcfg
= I915_READ(CLKCFG
);
271 switch (clkcfg
& CLKCFG_FSB_MASK
) {
280 case CLKCFG_FSB_1067
:
282 case CLKCFG_FSB_1333
:
284 /* these two are just a guess; one of them might be right */
285 case CLKCFG_FSB_1600
:
286 case CLKCFG_FSB_1600_ALT
:
294 intel_dp_init_panel_power_sequencer(struct drm_device
*dev
,
295 struct intel_dp
*intel_dp
);
297 intel_dp_init_panel_power_sequencer_registers(struct drm_device
*dev
,
298 struct intel_dp
*intel_dp
);
300 static void pps_lock(struct intel_dp
*intel_dp
)
302 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
303 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
304 struct drm_device
*dev
= encoder
->base
.dev
;
305 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
306 enum intel_display_power_domain power_domain
;
309 * See vlv_power_sequencer_reset() why we need
310 * a power domain reference here.
312 power_domain
= intel_display_port_power_domain(encoder
);
313 intel_display_power_get(dev_priv
, power_domain
);
315 mutex_lock(&dev_priv
->pps_mutex
);
318 static void pps_unlock(struct intel_dp
*intel_dp
)
320 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
321 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
322 struct drm_device
*dev
= encoder
->base
.dev
;
323 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
324 enum intel_display_power_domain power_domain
;
326 mutex_unlock(&dev_priv
->pps_mutex
);
328 power_domain
= intel_display_port_power_domain(encoder
);
329 intel_display_power_put(dev_priv
, power_domain
);
333 vlv_power_sequencer_kick(struct intel_dp
*intel_dp
)
335 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
336 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
337 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
338 enum pipe pipe
= intel_dp
->pps_pipe
;
342 if (WARN(I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
,
343 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
344 pipe_name(pipe
), port_name(intel_dig_port
->port
)))
347 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
348 pipe_name(pipe
), port_name(intel_dig_port
->port
));
350 /* Preserve the BIOS-computed detected bit. This is
351 * supposed to be read-only.
353 DP
= I915_READ(intel_dp
->output_reg
) & DP_DETECTED
;
354 DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
355 DP
|= DP_PORT_WIDTH(1);
356 DP
|= DP_LINK_TRAIN_PAT_1
;
358 if (IS_CHERRYVIEW(dev
))
359 DP
|= DP_PIPE_SELECT_CHV(pipe
);
360 else if (pipe
== PIPE_B
)
361 DP
|= DP_PIPEB_SELECT
;
363 pll_enabled
= I915_READ(DPLL(pipe
)) & DPLL_VCO_ENABLE
;
366 * The DPLL for the pipe must be enabled for this to work.
367 * So enable temporarily it if it's not already enabled.
370 vlv_force_pll_on(dev
, pipe
, IS_CHERRYVIEW(dev
) ?
371 &chv_dpll
[0].dpll
: &vlv_dpll
[0].dpll
);
374 * Similar magic as in intel_dp_enable_port().
375 * We _must_ do this port enable + disable trick
376 * to make this power seqeuencer lock onto the port.
377 * Otherwise even VDD force bit won't work.
379 I915_WRITE(intel_dp
->output_reg
, DP
);
380 POSTING_READ(intel_dp
->output_reg
);
382 I915_WRITE(intel_dp
->output_reg
, DP
| DP_PORT_EN
);
383 POSTING_READ(intel_dp
->output_reg
);
385 I915_WRITE(intel_dp
->output_reg
, DP
& ~DP_PORT_EN
);
386 POSTING_READ(intel_dp
->output_reg
);
389 vlv_force_pll_off(dev
, pipe
);
393 vlv_power_sequencer_pipe(struct intel_dp
*intel_dp
)
395 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
396 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
397 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
398 struct intel_encoder
*encoder
;
399 unsigned int pipes
= (1 << PIPE_A
) | (1 << PIPE_B
);
402 lockdep_assert_held(&dev_priv
->pps_mutex
);
404 /* We should never land here with regular DP ports */
405 WARN_ON(!is_edp(intel_dp
));
407 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
408 return intel_dp
->pps_pipe
;
411 * We don't have power sequencer currently.
412 * Pick one that's not used by other ports.
414 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
416 struct intel_dp
*tmp
;
418 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
421 tmp
= enc_to_intel_dp(&encoder
->base
);
423 if (tmp
->pps_pipe
!= INVALID_PIPE
)
424 pipes
&= ~(1 << tmp
->pps_pipe
);
428 * Didn't find one. This should not happen since there
429 * are two power sequencers and up to two eDP ports.
431 if (WARN_ON(pipes
== 0))
434 pipe
= ffs(pipes
) - 1;
436 vlv_steal_power_sequencer(dev
, pipe
);
437 intel_dp
->pps_pipe
= pipe
;
439 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
440 pipe_name(intel_dp
->pps_pipe
),
441 port_name(intel_dig_port
->port
));
443 /* init power sequencer on this pipe and port */
444 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
445 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
448 * Even vdd force doesn't work until we've made
449 * the power sequencer lock in on the port.
451 vlv_power_sequencer_kick(intel_dp
);
453 return intel_dp
->pps_pipe
;
456 typedef bool (*vlv_pipe_check
)(struct drm_i915_private
*dev_priv
,
459 static bool vlv_pipe_has_pp_on(struct drm_i915_private
*dev_priv
,
462 return I915_READ(VLV_PIPE_PP_STATUS(pipe
)) & PP_ON
;
465 static bool vlv_pipe_has_vdd_on(struct drm_i915_private
*dev_priv
,
468 return I915_READ(VLV_PIPE_PP_CONTROL(pipe
)) & EDP_FORCE_VDD
;
471 static bool vlv_pipe_any(struct drm_i915_private
*dev_priv
,
478 vlv_initial_pps_pipe(struct drm_i915_private
*dev_priv
,
480 vlv_pipe_check pipe_check
)
484 for (pipe
= PIPE_A
; pipe
<= PIPE_B
; pipe
++) {
485 u32 port_sel
= I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe
)) &
486 PANEL_PORT_SELECT_MASK
;
488 if (port_sel
!= PANEL_PORT_SELECT_VLV(port
))
491 if (!pipe_check(dev_priv
, pipe
))
501 vlv_initial_power_sequencer_setup(struct intel_dp
*intel_dp
)
503 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
504 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
505 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
506 enum port port
= intel_dig_port
->port
;
508 lockdep_assert_held(&dev_priv
->pps_mutex
);
510 /* try to find a pipe with this port selected */
511 /* first pick one where the panel is on */
512 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
514 /* didn't find one? pick one where vdd is on */
515 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
516 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
517 vlv_pipe_has_vdd_on
);
518 /* didn't find one? pick one with just the correct port */
519 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
520 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
523 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
524 if (intel_dp
->pps_pipe
== INVALID_PIPE
) {
525 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
530 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
531 port_name(port
), pipe_name(intel_dp
->pps_pipe
));
533 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
534 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
537 void vlv_power_sequencer_reset(struct drm_i915_private
*dev_priv
)
539 struct drm_device
*dev
= dev_priv
->dev
;
540 struct intel_encoder
*encoder
;
542 if (WARN_ON(!IS_VALLEYVIEW(dev
)))
546 * We can't grab pps_mutex here due to deadlock with power_domain
547 * mutex when power_domain functions are called while holding pps_mutex.
548 * That also means that in order to use pps_pipe the code needs to
549 * hold both a power domain reference and pps_mutex, and the power domain
550 * reference get/put must be done while _not_ holding pps_mutex.
551 * pps_{lock,unlock}() do these steps in the correct order, so one
552 * should use them always.
555 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, base
.head
) {
556 struct intel_dp
*intel_dp
;
558 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
561 intel_dp
= enc_to_intel_dp(&encoder
->base
);
562 intel_dp
->pps_pipe
= INVALID_PIPE
;
566 static u32
_pp_ctrl_reg(struct intel_dp
*intel_dp
)
568 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
571 return BXT_PP_CONTROL(0);
572 else if (HAS_PCH_SPLIT(dev
))
573 return PCH_PP_CONTROL
;
575 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp
));
578 static u32
_pp_stat_reg(struct intel_dp
*intel_dp
)
580 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
583 return BXT_PP_STATUS(0);
584 else if (HAS_PCH_SPLIT(dev
))
585 return PCH_PP_STATUS
;
587 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp
));
590 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
591 This function only applicable when panel PM state is not to be tracked */
592 static int edp_notify_handler(struct notifier_block
*this, unsigned long code
,
595 struct intel_dp
*intel_dp
= container_of(this, typeof(* intel_dp
),
597 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
598 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
600 u32 pp_ctrl_reg
, pp_div_reg
;
602 if (!is_edp(intel_dp
) || code
!= SYS_RESTART
)
607 if (IS_VALLEYVIEW(dev
)) {
608 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
610 pp_ctrl_reg
= VLV_PIPE_PP_CONTROL(pipe
);
611 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
612 pp_div
= I915_READ(pp_div_reg
);
613 pp_div
&= PP_REFERENCE_DIVIDER_MASK
;
615 /* 0x1F write to PP_DIV_REG sets max cycle delay */
616 I915_WRITE(pp_div_reg
, pp_div
| 0x1F);
617 I915_WRITE(pp_ctrl_reg
, PANEL_UNLOCK_REGS
| PANEL_POWER_OFF
);
618 msleep(intel_dp
->panel_power_cycle_delay
);
621 pps_unlock(intel_dp
);
626 static bool edp_have_panel_power(struct intel_dp
*intel_dp
)
628 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
629 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
631 lockdep_assert_held(&dev_priv
->pps_mutex
);
633 if (IS_VALLEYVIEW(dev
) &&
634 intel_dp
->pps_pipe
== INVALID_PIPE
)
637 return (I915_READ(_pp_stat_reg(intel_dp
)) & PP_ON
) != 0;
640 static bool edp_have_panel_vdd(struct intel_dp
*intel_dp
)
642 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
643 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
645 lockdep_assert_held(&dev_priv
->pps_mutex
);
647 if (IS_VALLEYVIEW(dev
) &&
648 intel_dp
->pps_pipe
== INVALID_PIPE
)
651 return I915_READ(_pp_ctrl_reg(intel_dp
)) & EDP_FORCE_VDD
;
655 intel_dp_check_edp(struct intel_dp
*intel_dp
)
657 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
658 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
660 if (!is_edp(intel_dp
))
663 if (!edp_have_panel_power(intel_dp
) && !edp_have_panel_vdd(intel_dp
)) {
664 WARN(1, "eDP powered off while attempting aux channel communication.\n");
665 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
666 I915_READ(_pp_stat_reg(intel_dp
)),
667 I915_READ(_pp_ctrl_reg(intel_dp
)));
672 intel_dp_aux_wait_done(struct intel_dp
*intel_dp
, bool has_aux_irq
)
674 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
675 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
676 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
677 uint32_t ch_ctl
= intel_dp
->aux_ch_ctl_reg
;
681 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
683 done
= wait_event_timeout(dev_priv
->gmbus_wait_queue
, C
,
684 msecs_to_jiffies_timeout(10));
686 done
= wait_for_atomic(C
, 10) == 0;
688 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
695 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
697 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
698 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
701 * The clock divider is based off the hrawclk, and would like to run at
702 * 2MHz. So, take the hrawclk value and divide by 2 and use that
704 return index
? 0 : intel_hrawclk(dev
) / 2;
707 static uint32_t ilk_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
709 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
710 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
711 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
716 if (intel_dig_port
->port
== PORT_A
) {
717 return DIV_ROUND_UP(dev_priv
->cdclk_freq
, 2000);
720 return DIV_ROUND_UP(intel_pch_rawclk(dev
), 2);
724 static uint32_t hsw_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
726 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
727 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
728 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
730 if (intel_dig_port
->port
== PORT_A
) {
733 return DIV_ROUND_CLOSEST(dev_priv
->cdclk_freq
, 2000);
734 } else if (dev_priv
->pch_id
== INTEL_PCH_LPT_DEVICE_ID_TYPE
) {
735 /* Workaround for non-ULT HSW */
742 return index
? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev
), 2);
746 static uint32_t vlv_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
748 return index
? 0 : 100;
751 static uint32_t skl_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
754 * SKL doesn't need us to program the AUX clock divider (Hardware will
755 * derive the clock from CDCLK automatically). We still implement the
756 * get_aux_clock_divider vfunc to plug-in into the existing code.
758 return index
? 0 : 1;
761 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp
*intel_dp
,
764 uint32_t aux_clock_divider
)
766 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
767 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
768 uint32_t precharge
, timeout
;
775 if (IS_BROADWELL(dev
) && intel_dp
->aux_ch_ctl_reg
== DPA_AUX_CH_CTL
)
776 timeout
= DP_AUX_CH_CTL_TIME_OUT_600us
;
778 timeout
= DP_AUX_CH_CTL_TIME_OUT_400us
;
780 return DP_AUX_CH_CTL_SEND_BUSY
|
782 (has_aux_irq
? DP_AUX_CH_CTL_INTERRUPT
: 0) |
783 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
785 DP_AUX_CH_CTL_RECEIVE_ERROR
|
786 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
787 (precharge
<< DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT
) |
788 (aux_clock_divider
<< DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT
);
791 static uint32_t skl_get_aux_send_ctl(struct intel_dp
*intel_dp
,
796 return DP_AUX_CH_CTL_SEND_BUSY
|
798 (has_aux_irq
? DP_AUX_CH_CTL_INTERRUPT
: 0) |
799 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
800 DP_AUX_CH_CTL_TIME_OUT_1600us
|
801 DP_AUX_CH_CTL_RECEIVE_ERROR
|
802 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
803 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
807 intel_dp_aux_ch(struct intel_dp
*intel_dp
,
808 const uint8_t *send
, int send_bytes
,
809 uint8_t *recv
, int recv_size
)
811 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
812 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
813 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
814 uint32_t ch_ctl
= intel_dp
->aux_ch_ctl_reg
;
815 uint32_t ch_data
= ch_ctl
+ 4;
816 uint32_t aux_clock_divider
;
817 int i
, ret
, recv_bytes
;
820 bool has_aux_irq
= HAS_AUX_IRQ(dev
);
826 * We will be called with VDD already enabled for dpcd/edid/oui reads.
827 * In such cases we want to leave VDD enabled and it's up to upper layers
828 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
831 vdd
= edp_panel_vdd_on(intel_dp
);
833 /* dp aux is extremely sensitive to irq latency, hence request the
834 * lowest possible wakeup latency and so prevent the cpu from going into
837 pm_qos_update_request(&dev_priv
->pm_qos
, 0);
839 intel_dp_check_edp(intel_dp
);
841 intel_aux_display_runtime_get(dev_priv
);
843 /* Try to wait for any previous AUX channel activity */
844 for (try = 0; try < 3; try++) {
845 status
= I915_READ_NOTRACE(ch_ctl
);
846 if ((status
& DP_AUX_CH_CTL_SEND_BUSY
) == 0)
852 static u32 last_status
= -1;
853 const u32 status
= I915_READ(ch_ctl
);
855 if (status
!= last_status
) {
856 WARN(1, "dp_aux_ch not started status 0x%08x\n",
858 last_status
= status
;
865 /* Only 5 data registers! */
866 if (WARN_ON(send_bytes
> 20 || recv_size
> 20)) {
871 while ((aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, clock
++))) {
872 u32 send_ctl
= intel_dp
->get_aux_send_ctl(intel_dp
,
877 /* Must try at least 3 times according to DP spec */
878 for (try = 0; try < 5; try++) {
879 /* Load the send data into the aux channel data registers */
880 for (i
= 0; i
< send_bytes
; i
+= 4)
881 I915_WRITE(ch_data
+ i
,
882 intel_dp_pack_aux(send
+ i
,
885 /* Send the command and wait for it to complete */
886 I915_WRITE(ch_ctl
, send_ctl
);
888 status
= intel_dp_aux_wait_done(intel_dp
, has_aux_irq
);
890 /* Clear done status and any errors */
894 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
895 DP_AUX_CH_CTL_RECEIVE_ERROR
);
897 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
)
900 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
901 * 400us delay required for errors and timeouts
902 * Timeout errors from the HW already meet this
903 * requirement so skip to next iteration
905 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
906 usleep_range(400, 500);
909 if (status
& DP_AUX_CH_CTL_DONE
)
914 if ((status
& DP_AUX_CH_CTL_DONE
) == 0) {
915 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status
);
921 /* Check for timeout or receive error.
922 * Timeouts occur when the sink is not connected
924 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
925 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status
);
930 /* Timeouts occur when the device isn't connected, so they're
931 * "normal" -- don't fill the kernel log with these */
932 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
) {
933 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status
);
938 /* Unload any bytes sent back from the other side */
939 recv_bytes
= ((status
& DP_AUX_CH_CTL_MESSAGE_SIZE_MASK
) >>
940 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
);
941 if (recv_bytes
> recv_size
)
942 recv_bytes
= recv_size
;
944 for (i
= 0; i
< recv_bytes
; i
+= 4)
945 intel_dp_unpack_aux(I915_READ(ch_data
+ i
),
946 recv
+ i
, recv_bytes
- i
);
950 pm_qos_update_request(&dev_priv
->pm_qos
, PM_QOS_DEFAULT_VALUE
);
951 intel_aux_display_runtime_put(dev_priv
);
954 edp_panel_vdd_off(intel_dp
, false);
956 pps_unlock(intel_dp
);
961 #define BARE_ADDRESS_SIZE 3
962 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
964 intel_dp_aux_transfer(struct drm_dp_aux
*aux
, struct drm_dp_aux_msg
*msg
)
966 struct intel_dp
*intel_dp
= container_of(aux
, struct intel_dp
, aux
);
967 uint8_t txbuf
[20], rxbuf
[20];
968 size_t txsize
, rxsize
;
971 txbuf
[0] = (msg
->request
<< 4) |
972 ((msg
->address
>> 16) & 0xf);
973 txbuf
[1] = (msg
->address
>> 8) & 0xff;
974 txbuf
[2] = msg
->address
& 0xff;
975 txbuf
[3] = msg
->size
- 1;
977 switch (msg
->request
& ~DP_AUX_I2C_MOT
) {
978 case DP_AUX_NATIVE_WRITE
:
979 case DP_AUX_I2C_WRITE
:
980 txsize
= msg
->size
? HEADER_SIZE
+ msg
->size
: BARE_ADDRESS_SIZE
;
981 rxsize
= 2; /* 0 or 1 data bytes */
983 if (WARN_ON(txsize
> 20))
986 memcpy(txbuf
+ HEADER_SIZE
, msg
->buffer
, msg
->size
);
988 ret
= intel_dp_aux_ch(intel_dp
, txbuf
, txsize
, rxbuf
, rxsize
);
990 msg
->reply
= rxbuf
[0] >> 4;
993 /* Number of bytes written in a short write. */
994 ret
= clamp_t(int, rxbuf
[1], 0, msg
->size
);
996 /* Return payload size. */
1002 case DP_AUX_NATIVE_READ
:
1003 case DP_AUX_I2C_READ
:
1004 txsize
= msg
->size
? HEADER_SIZE
: BARE_ADDRESS_SIZE
;
1005 rxsize
= msg
->size
+ 1;
1007 if (WARN_ON(rxsize
> 20))
1010 ret
= intel_dp_aux_ch(intel_dp
, txbuf
, txsize
, rxbuf
, rxsize
);
1012 msg
->reply
= rxbuf
[0] >> 4;
1014 * Assume happy day, and copy the data. The caller is
1015 * expected to check msg->reply before touching it.
1017 * Return payload size.
1020 memcpy(msg
->buffer
, rxbuf
+ 1, ret
);
1033 intel_dp_aux_init(struct intel_dp
*intel_dp
, struct intel_connector
*connector
)
1035 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1036 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1037 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1038 enum port port
= intel_dig_port
->port
;
1039 struct ddi_vbt_port_info
*info
= &dev_priv
->vbt
.ddi_port_info
[port
];
1040 const char *name
= NULL
;
1041 uint32_t porte_aux_ctl_reg
= DPA_AUX_CH_CTL
;
1044 /* On SKL we don't have Aux for port E so we rely on VBT to set
1045 * a proper alternate aux channel.
1047 if (IS_SKYLAKE(dev
) && port
== PORT_E
) {
1048 switch (info
->alternate_aux_channel
) {
1050 porte_aux_ctl_reg
= DPB_AUX_CH_CTL
;
1053 porte_aux_ctl_reg
= DPC_AUX_CH_CTL
;
1056 porte_aux_ctl_reg
= DPD_AUX_CH_CTL
;
1060 porte_aux_ctl_reg
= DPA_AUX_CH_CTL
;
1066 intel_dp
->aux_ch_ctl_reg
= DPA_AUX_CH_CTL
;
1070 intel_dp
->aux_ch_ctl_reg
= PCH_DPB_AUX_CH_CTL
;
1074 intel_dp
->aux_ch_ctl_reg
= PCH_DPC_AUX_CH_CTL
;
1078 intel_dp
->aux_ch_ctl_reg
= PCH_DPD_AUX_CH_CTL
;
1082 intel_dp
->aux_ch_ctl_reg
= porte_aux_ctl_reg
;
1090 * The AUX_CTL register is usually DP_CTL + 0x10.
1092 * On Haswell and Broadwell though:
1093 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1094 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1096 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1098 if (!IS_HASWELL(dev
) && !IS_BROADWELL(dev
) && port
!= PORT_E
)
1099 intel_dp
->aux_ch_ctl_reg
= intel_dp
->output_reg
+ 0x10;
1101 intel_dp
->aux
.name
= name
;
1102 intel_dp
->aux
.dev
= dev
->dev
;
1103 intel_dp
->aux
.transfer
= intel_dp_aux_transfer
;
1105 DRM_DEBUG_KMS("registering %s bus for %s\n", name
,
1106 connector
->base
.kdev
->kobj
.name
);
1108 ret
= drm_dp_aux_register(&intel_dp
->aux
);
1110 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1115 ret
= sysfs_create_link(&connector
->base
.kdev
->kobj
,
1116 &intel_dp
->aux
.ddc
.dev
.kobj
,
1117 intel_dp
->aux
.ddc
.dev
.kobj
.name
);
1119 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name
, ret
);
1120 drm_dp_aux_unregister(&intel_dp
->aux
);
1125 intel_dp_connector_unregister(struct intel_connector
*intel_connector
)
1127 struct intel_dp
*intel_dp
= intel_attached_dp(&intel_connector
->base
);
1129 if (!intel_connector
->mst_port
)
1130 sysfs_remove_link(&intel_connector
->base
.kdev
->kobj
,
1131 intel_dp
->aux
.ddc
.dev
.kobj
.name
);
1132 intel_connector_unregister(intel_connector
);
1136 skl_edp_set_pll_config(struct intel_crtc_state
*pipe_config
)
1140 memset(&pipe_config
->dpll_hw_state
, 0,
1141 sizeof(pipe_config
->dpll_hw_state
));
1143 pipe_config
->ddi_pll_sel
= SKL_DPLL0
;
1144 pipe_config
->dpll_hw_state
.cfgcr1
= 0;
1145 pipe_config
->dpll_hw_state
.cfgcr2
= 0;
1147 ctrl1
= DPLL_CTRL1_OVERRIDE(SKL_DPLL0
);
1148 switch (pipe_config
->port_clock
/ 2) {
1150 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810
,
1154 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350
,
1158 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700
,
1162 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620
,
1165 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1166 results in CDCLK change. Need to handle the change of CDCLK by
1167 disabling pipes and re-enabling them */
1169 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080
,
1173 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160
,
1178 pipe_config
->dpll_hw_state
.ctrl1
= ctrl1
;
1182 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state
*pipe_config
)
1184 memset(&pipe_config
->dpll_hw_state
, 0,
1185 sizeof(pipe_config
->dpll_hw_state
));
1187 switch (pipe_config
->port_clock
/ 2) {
1189 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_810
;
1192 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_1350
;
1195 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_2700
;
1201 intel_dp_sink_rates(struct intel_dp
*intel_dp
, const int **sink_rates
)
1203 if (intel_dp
->num_sink_rates
) {
1204 *sink_rates
= intel_dp
->sink_rates
;
1205 return intel_dp
->num_sink_rates
;
1208 *sink_rates
= default_rates
;
1210 return (intel_dp_max_link_bw(intel_dp
) >> 3) + 1;
1214 intel_dp_source_rates(struct drm_device
*dev
, const int **source_rates
)
1216 if (IS_BROXTON(dev
)) {
1217 *source_rates
= bxt_rates
;
1218 return ARRAY_SIZE(bxt_rates
);
1219 } else if (IS_SKYLAKE(dev
)) {
1220 *source_rates
= skl_rates
;
1221 return ARRAY_SIZE(skl_rates
);
1222 } else if (IS_CHERRYVIEW(dev
)) {
1223 *source_rates
= chv_rates
;
1224 return ARRAY_SIZE(chv_rates
);
1227 *source_rates
= default_rates
;
1229 if (IS_SKYLAKE(dev
) && INTEL_REVID(dev
) <= SKL_REVID_B0
)
1230 /* WaDisableHBR2:skl */
1231 return (DP_LINK_BW_2_7
>> 3) + 1;
1232 else if (INTEL_INFO(dev
)->gen
>= 8 ||
1233 (IS_HASWELL(dev
) && !IS_HSW_ULX(dev
)))
1234 return (DP_LINK_BW_5_4
>> 3) + 1;
1236 return (DP_LINK_BW_2_7
>> 3) + 1;
1240 intel_dp_set_clock(struct intel_encoder
*encoder
,
1241 struct intel_crtc_state
*pipe_config
)
1243 struct drm_device
*dev
= encoder
->base
.dev
;
1244 const struct dp_link_dpll
*divisor
= NULL
;
1248 divisor
= gen4_dpll
;
1249 count
= ARRAY_SIZE(gen4_dpll
);
1250 } else if (HAS_PCH_SPLIT(dev
)) {
1252 count
= ARRAY_SIZE(pch_dpll
);
1253 } else if (IS_CHERRYVIEW(dev
)) {
1255 count
= ARRAY_SIZE(chv_dpll
);
1256 } else if (IS_VALLEYVIEW(dev
)) {
1258 count
= ARRAY_SIZE(vlv_dpll
);
1261 if (divisor
&& count
) {
1262 for (i
= 0; i
< count
; i
++) {
1263 if (pipe_config
->port_clock
== divisor
[i
].clock
) {
1264 pipe_config
->dpll
= divisor
[i
].dpll
;
1265 pipe_config
->clock_set
= true;
1272 static int intersect_rates(const int *source_rates
, int source_len
,
1273 const int *sink_rates
, int sink_len
,
1276 int i
= 0, j
= 0, k
= 0;
1278 while (i
< source_len
&& j
< sink_len
) {
1279 if (source_rates
[i
] == sink_rates
[j
]) {
1280 if (WARN_ON(k
>= DP_MAX_SUPPORTED_RATES
))
1282 common_rates
[k
] = source_rates
[i
];
1286 } else if (source_rates
[i
] < sink_rates
[j
]) {
1295 static int intel_dp_common_rates(struct intel_dp
*intel_dp
,
1298 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1299 const int *source_rates
, *sink_rates
;
1300 int source_len
, sink_len
;
1302 sink_len
= intel_dp_sink_rates(intel_dp
, &sink_rates
);
1303 source_len
= intel_dp_source_rates(dev
, &source_rates
);
1305 return intersect_rates(source_rates
, source_len
,
1306 sink_rates
, sink_len
,
1310 static void snprintf_int_array(char *str
, size_t len
,
1311 const int *array
, int nelem
)
1317 for (i
= 0; i
< nelem
; i
++) {
1318 int r
= snprintf(str
, len
, "%s%d", i
? ", " : "", array
[i
]);
1326 static void intel_dp_print_rates(struct intel_dp
*intel_dp
)
1328 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1329 const int *source_rates
, *sink_rates
;
1330 int source_len
, sink_len
, common_len
;
1331 int common_rates
[DP_MAX_SUPPORTED_RATES
];
1332 char str
[128]; /* FIXME: too big for stack? */
1334 if ((drm_debug
& DRM_UT_KMS
) == 0)
1337 source_len
= intel_dp_source_rates(dev
, &source_rates
);
1338 snprintf_int_array(str
, sizeof(str
), source_rates
, source_len
);
1339 DRM_DEBUG_KMS("source rates: %s\n", str
);
1341 sink_len
= intel_dp_sink_rates(intel_dp
, &sink_rates
);
1342 snprintf_int_array(str
, sizeof(str
), sink_rates
, sink_len
);
1343 DRM_DEBUG_KMS("sink rates: %s\n", str
);
1345 common_len
= intel_dp_common_rates(intel_dp
, common_rates
);
1346 snprintf_int_array(str
, sizeof(str
), common_rates
, common_len
);
1347 DRM_DEBUG_KMS("common rates: %s\n", str
);
1350 static int rate_to_index(int find
, const int *rates
)
1354 for (i
= 0; i
< DP_MAX_SUPPORTED_RATES
; ++i
)
1355 if (find
== rates
[i
])
1362 intel_dp_max_link_rate(struct intel_dp
*intel_dp
)
1364 int rates
[DP_MAX_SUPPORTED_RATES
] = {};
1367 len
= intel_dp_common_rates(intel_dp
, rates
);
1368 if (WARN_ON(len
<= 0))
1371 return rates
[rate_to_index(0, rates
) - 1];
1374 int intel_dp_rate_select(struct intel_dp
*intel_dp
, int rate
)
1376 return rate_to_index(rate
, intel_dp
->sink_rates
);
1379 static void intel_dp_compute_rate(struct intel_dp
*intel_dp
, int port_clock
,
1380 uint8_t *link_bw
, uint8_t *rate_select
)
1382 if (intel_dp
->num_sink_rates
) {
1385 intel_dp_rate_select(intel_dp
, port_clock
);
1387 *link_bw
= drm_dp_link_rate_to_bw_code(port_clock
);
1393 intel_dp_compute_config(struct intel_encoder
*encoder
,
1394 struct intel_crtc_state
*pipe_config
)
1396 struct drm_device
*dev
= encoder
->base
.dev
;
1397 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1398 struct drm_display_mode
*adjusted_mode
= &pipe_config
->base
.adjusted_mode
;
1399 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
1400 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1401 struct intel_crtc
*intel_crtc
= to_intel_crtc(pipe_config
->base
.crtc
);
1402 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
1403 int lane_count
, clock
;
1404 int min_lane_count
= 1;
1405 int max_lane_count
= intel_dp_max_lane_count(intel_dp
);
1406 /* Conveniently, the link BW constants become indices with a shift...*/
1410 int link_avail
, link_clock
;
1411 int common_rates
[DP_MAX_SUPPORTED_RATES
] = {};
1413 uint8_t link_bw
, rate_select
;
1415 common_len
= intel_dp_common_rates(intel_dp
, common_rates
);
1417 /* No common link rates between source and sink */
1418 WARN_ON(common_len
<= 0);
1420 max_clock
= common_len
- 1;
1422 if (HAS_PCH_SPLIT(dev
) && !HAS_DDI(dev
) && port
!= PORT_A
)
1423 pipe_config
->has_pch_encoder
= true;
1425 pipe_config
->has_dp_encoder
= true;
1426 pipe_config
->has_drrs
= false;
1427 pipe_config
->has_audio
= intel_dp
->has_audio
&& port
!= PORT_A
;
1429 if (is_edp(intel_dp
) && intel_connector
->panel
.fixed_mode
) {
1430 intel_fixed_panel_mode(intel_connector
->panel
.fixed_mode
,
1433 if (INTEL_INFO(dev
)->gen
>= 9) {
1435 ret
= skl_update_scaler_crtc(pipe_config
);
1440 if (!HAS_PCH_SPLIT(dev
))
1441 intel_gmch_panel_fitting(intel_crtc
, pipe_config
,
1442 intel_connector
->panel
.fitting_mode
);
1444 intel_pch_panel_fitting(intel_crtc
, pipe_config
,
1445 intel_connector
->panel
.fitting_mode
);
1448 if (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
1451 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1452 "max bw %d pixel clock %iKHz\n",
1453 max_lane_count
, common_rates
[max_clock
],
1454 adjusted_mode
->crtc_clock
);
1456 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1457 * bpc in between. */
1458 bpp
= pipe_config
->pipe_bpp
;
1459 if (is_edp(intel_dp
)) {
1461 /* Get bpp from vbt only for panels that dont have bpp in edid */
1462 if (intel_connector
->base
.display_info
.bpc
== 0 &&
1463 (dev_priv
->vbt
.edp_bpp
&& dev_priv
->vbt
.edp_bpp
< bpp
)) {
1464 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1465 dev_priv
->vbt
.edp_bpp
);
1466 bpp
= dev_priv
->vbt
.edp_bpp
;
1470 * Use the maximum clock and number of lanes the eDP panel
1471 * advertizes being capable of. The panels are generally
1472 * designed to support only a single clock and lane
1473 * configuration, and typically these values correspond to the
1474 * native resolution of the panel.
1476 min_lane_count
= max_lane_count
;
1477 min_clock
= max_clock
;
1480 for (; bpp
>= 6*3; bpp
-= 2*3) {
1481 mode_rate
= intel_dp_link_required(adjusted_mode
->crtc_clock
,
1484 for (clock
= min_clock
; clock
<= max_clock
; clock
++) {
1485 for (lane_count
= min_lane_count
;
1486 lane_count
<= max_lane_count
;
1489 link_clock
= common_rates
[clock
];
1490 link_avail
= intel_dp_max_data_rate(link_clock
,
1493 if (mode_rate
<= link_avail
) {
1503 if (intel_dp
->color_range_auto
) {
1506 * CEA-861-E - 5.1 Default Encoding Parameters
1507 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1509 pipe_config
->limited_color_range
=
1510 bpp
!= 18 && drm_match_cea_mode(adjusted_mode
) > 1;
1512 pipe_config
->limited_color_range
=
1513 intel_dp
->limited_color_range
;
1516 pipe_config
->lane_count
= lane_count
;
1518 pipe_config
->pipe_bpp
= bpp
;
1519 pipe_config
->port_clock
= common_rates
[clock
];
1521 intel_dp_compute_rate(intel_dp
, pipe_config
->port_clock
,
1522 &link_bw
, &rate_select
);
1524 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1525 link_bw
, rate_select
, pipe_config
->lane_count
,
1526 pipe_config
->port_clock
, bpp
);
1527 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1528 mode_rate
, link_avail
);
1530 intel_link_compute_m_n(bpp
, lane_count
,
1531 adjusted_mode
->crtc_clock
,
1532 pipe_config
->port_clock
,
1533 &pipe_config
->dp_m_n
);
1535 if (intel_connector
->panel
.downclock_mode
!= NULL
&&
1536 dev_priv
->drrs
.type
== SEAMLESS_DRRS_SUPPORT
) {
1537 pipe_config
->has_drrs
= true;
1538 intel_link_compute_m_n(bpp
, lane_count
,
1539 intel_connector
->panel
.downclock_mode
->clock
,
1540 pipe_config
->port_clock
,
1541 &pipe_config
->dp_m2_n2
);
1544 if (IS_SKYLAKE(dev
) && is_edp(intel_dp
))
1545 skl_edp_set_pll_config(pipe_config
);
1546 else if (IS_BROXTON(dev
))
1547 /* handled in ddi */;
1548 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1549 hsw_dp_set_ddi_pll_sel(pipe_config
);
1551 intel_dp_set_clock(encoder
, pipe_config
);
1556 static void ironlake_set_pll_cpu_edp(struct intel_dp
*intel_dp
)
1558 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1559 struct intel_crtc
*crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
1560 struct drm_device
*dev
= crtc
->base
.dev
;
1561 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1564 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1565 crtc
->config
->port_clock
);
1566 dpa_ctl
= I915_READ(DP_A
);
1567 dpa_ctl
&= ~DP_PLL_FREQ_MASK
;
1569 if (crtc
->config
->port_clock
== 162000) {
1570 /* For a long time we've carried around a ILK-DevA w/a for the
1571 * 160MHz clock. If we're really unlucky, it's still required.
1573 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1574 dpa_ctl
|= DP_PLL_FREQ_160MHZ
;
1575 intel_dp
->DP
|= DP_PLL_FREQ_160MHZ
;
1577 dpa_ctl
|= DP_PLL_FREQ_270MHZ
;
1578 intel_dp
->DP
|= DP_PLL_FREQ_270MHZ
;
1581 I915_WRITE(DP_A
, dpa_ctl
);
1587 void intel_dp_set_link_params(struct intel_dp
*intel_dp
,
1588 const struct intel_crtc_state
*pipe_config
)
1590 intel_dp
->link_rate
= pipe_config
->port_clock
;
1591 intel_dp
->lane_count
= pipe_config
->lane_count
;
1594 static void intel_dp_prepare(struct intel_encoder
*encoder
)
1596 struct drm_device
*dev
= encoder
->base
.dev
;
1597 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1598 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
1599 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1600 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
1601 struct drm_display_mode
*adjusted_mode
= &crtc
->config
->base
.adjusted_mode
;
1603 intel_dp_set_link_params(intel_dp
, crtc
->config
);
1606 * There are four kinds of DP registers:
1613 * IBX PCH and CPU are the same for almost everything,
1614 * except that the CPU DP PLL is configured in this
1617 * CPT PCH is quite different, having many bits moved
1618 * to the TRANS_DP_CTL register instead. That
1619 * configuration happens (oddly) in ironlake_pch_enable
1622 /* Preserve the BIOS-computed detected bit. This is
1623 * supposed to be read-only.
1625 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
) & DP_DETECTED
;
1627 /* Handle DP bits in common between all three register formats */
1628 intel_dp
->DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
1629 intel_dp
->DP
|= DP_PORT_WIDTH(crtc
->config
->lane_count
);
1631 if (crtc
->config
->has_audio
)
1632 intel_dp
->DP
|= DP_AUDIO_OUTPUT_ENABLE
;
1634 /* Split out the IBX/CPU vs CPT settings */
1636 if (IS_GEN7(dev
) && port
== PORT_A
) {
1637 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
1638 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
1639 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
1640 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
1641 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
1643 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1644 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
1646 intel_dp
->DP
|= crtc
->pipe
<< 29;
1647 } else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
1650 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
1652 trans_dp
= I915_READ(TRANS_DP_CTL(crtc
->pipe
));
1653 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1654 trans_dp
|= TRANS_DP_ENH_FRAMING
;
1656 trans_dp
&= ~TRANS_DP_ENH_FRAMING
;
1657 I915_WRITE(TRANS_DP_CTL(crtc
->pipe
), trans_dp
);
1659 if (!HAS_PCH_SPLIT(dev
) && !IS_VALLEYVIEW(dev
) &&
1660 crtc
->config
->limited_color_range
)
1661 intel_dp
->DP
|= DP_COLOR_RANGE_16_235
;
1663 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
1664 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
1665 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
1666 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
1667 intel_dp
->DP
|= DP_LINK_TRAIN_OFF
;
1669 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1670 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
1672 if (IS_CHERRYVIEW(dev
))
1673 intel_dp
->DP
|= DP_PIPE_SELECT_CHV(crtc
->pipe
);
1674 else if (crtc
->pipe
== PIPE_B
)
1675 intel_dp
->DP
|= DP_PIPEB_SELECT
;
1679 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1680 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1682 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1683 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1685 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1686 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1688 static void wait_panel_status(struct intel_dp
*intel_dp
,
1692 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1693 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1694 u32 pp_stat_reg
, pp_ctrl_reg
;
1696 lockdep_assert_held(&dev_priv
->pps_mutex
);
1698 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1699 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1701 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1703 I915_READ(pp_stat_reg
),
1704 I915_READ(pp_ctrl_reg
));
1706 if (_wait_for((I915_READ(pp_stat_reg
) & mask
) == value
, 5000, 10)) {
1707 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1708 I915_READ(pp_stat_reg
),
1709 I915_READ(pp_ctrl_reg
));
1712 DRM_DEBUG_KMS("Wait complete\n");
1715 static void wait_panel_on(struct intel_dp
*intel_dp
)
1717 DRM_DEBUG_KMS("Wait for panel power on\n");
1718 wait_panel_status(intel_dp
, IDLE_ON_MASK
, IDLE_ON_VALUE
);
1721 static void wait_panel_off(struct intel_dp
*intel_dp
)
1723 DRM_DEBUG_KMS("Wait for panel power off time\n");
1724 wait_panel_status(intel_dp
, IDLE_OFF_MASK
, IDLE_OFF_VALUE
);
1727 static void wait_panel_power_cycle(struct intel_dp
*intel_dp
)
1729 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1731 /* When we disable the VDD override bit last we have to do the manual
1733 wait_remaining_ms_from_jiffies(intel_dp
->last_power_cycle
,
1734 intel_dp
->panel_power_cycle_delay
);
1736 wait_panel_status(intel_dp
, IDLE_CYCLE_MASK
, IDLE_CYCLE_VALUE
);
1739 static void wait_backlight_on(struct intel_dp
*intel_dp
)
1741 wait_remaining_ms_from_jiffies(intel_dp
->last_power_on
,
1742 intel_dp
->backlight_on_delay
);
1745 static void edp_wait_backlight_off(struct intel_dp
*intel_dp
)
1747 wait_remaining_ms_from_jiffies(intel_dp
->last_backlight_off
,
1748 intel_dp
->backlight_off_delay
);
1751 /* Read the current pp_control value, unlocking the register if it
1755 static u32
ironlake_get_pp_control(struct intel_dp
*intel_dp
)
1757 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1758 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1761 lockdep_assert_held(&dev_priv
->pps_mutex
);
1763 control
= I915_READ(_pp_ctrl_reg(intel_dp
));
1764 if (!IS_BROXTON(dev
)) {
1765 control
&= ~PANEL_UNLOCK_MASK
;
1766 control
|= PANEL_UNLOCK_REGS
;
1772 * Must be paired with edp_panel_vdd_off().
1773 * Must hold pps_mutex around the whole on/off sequence.
1774 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1776 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
)
1778 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1779 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1780 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
1781 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1782 enum intel_display_power_domain power_domain
;
1784 u32 pp_stat_reg
, pp_ctrl_reg
;
1785 bool need_to_disable
= !intel_dp
->want_panel_vdd
;
1787 lockdep_assert_held(&dev_priv
->pps_mutex
);
1789 if (!is_edp(intel_dp
))
1792 cancel_delayed_work(&intel_dp
->panel_vdd_work
);
1793 intel_dp
->want_panel_vdd
= true;
1795 if (edp_have_panel_vdd(intel_dp
))
1796 return need_to_disable
;
1798 power_domain
= intel_display_port_power_domain(intel_encoder
);
1799 intel_display_power_get(dev_priv
, power_domain
);
1801 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1802 port_name(intel_dig_port
->port
));
1804 if (!edp_have_panel_power(intel_dp
))
1805 wait_panel_power_cycle(intel_dp
);
1807 pp
= ironlake_get_pp_control(intel_dp
);
1808 pp
|= EDP_FORCE_VDD
;
1810 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1811 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1813 I915_WRITE(pp_ctrl_reg
, pp
);
1814 POSTING_READ(pp_ctrl_reg
);
1815 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1816 I915_READ(pp_stat_reg
), I915_READ(pp_ctrl_reg
));
1818 * If the panel wasn't on, delay before accessing aux channel
1820 if (!edp_have_panel_power(intel_dp
)) {
1821 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1822 port_name(intel_dig_port
->port
));
1823 msleep(intel_dp
->panel_power_up_delay
);
1826 return need_to_disable
;
1830 * Must be paired with intel_edp_panel_vdd_off() or
1831 * intel_edp_panel_off().
1832 * Nested calls to these functions are not allowed since
1833 * we drop the lock. Caller must use some higher level
1834 * locking to prevent nested calls from other threads.
1836 void intel_edp_panel_vdd_on(struct intel_dp
*intel_dp
)
1840 if (!is_edp(intel_dp
))
1844 vdd
= edp_panel_vdd_on(intel_dp
);
1845 pps_unlock(intel_dp
);
1847 I915_STATE_WARN(!vdd
, "eDP port %c VDD already requested on\n",
1848 port_name(dp_to_dig_port(intel_dp
)->port
));
1851 static void edp_panel_vdd_off_sync(struct intel_dp
*intel_dp
)
1853 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1854 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1855 struct intel_digital_port
*intel_dig_port
=
1856 dp_to_dig_port(intel_dp
);
1857 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
1858 enum intel_display_power_domain power_domain
;
1860 u32 pp_stat_reg
, pp_ctrl_reg
;
1862 lockdep_assert_held(&dev_priv
->pps_mutex
);
1864 WARN_ON(intel_dp
->want_panel_vdd
);
1866 if (!edp_have_panel_vdd(intel_dp
))
1869 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1870 port_name(intel_dig_port
->port
));
1872 pp
= ironlake_get_pp_control(intel_dp
);
1873 pp
&= ~EDP_FORCE_VDD
;
1875 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1876 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1878 I915_WRITE(pp_ctrl_reg
, pp
);
1879 POSTING_READ(pp_ctrl_reg
);
1881 /* Make sure sequencer is idle before allowing subsequent activity */
1882 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1883 I915_READ(pp_stat_reg
), I915_READ(pp_ctrl_reg
));
1885 if ((pp
& POWER_TARGET_ON
) == 0)
1886 intel_dp
->last_power_cycle
= jiffies
;
1888 power_domain
= intel_display_port_power_domain(intel_encoder
);
1889 intel_display_power_put(dev_priv
, power_domain
);
1892 static void edp_panel_vdd_work(struct work_struct
*__work
)
1894 struct intel_dp
*intel_dp
= container_of(to_delayed_work(__work
),
1895 struct intel_dp
, panel_vdd_work
);
1898 if (!intel_dp
->want_panel_vdd
)
1899 edp_panel_vdd_off_sync(intel_dp
);
1900 pps_unlock(intel_dp
);
1903 static void edp_panel_vdd_schedule_off(struct intel_dp
*intel_dp
)
1905 unsigned long delay
;
1908 * Queue the timer to fire a long time from now (relative to the power
1909 * down delay) to keep the panel power up across a sequence of
1912 delay
= msecs_to_jiffies(intel_dp
->panel_power_cycle_delay
* 5);
1913 schedule_delayed_work(&intel_dp
->panel_vdd_work
, delay
);
1917 * Must be paired with edp_panel_vdd_on().
1918 * Must hold pps_mutex around the whole on/off sequence.
1919 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1921 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
)
1923 struct drm_i915_private
*dev_priv
=
1924 intel_dp_to_dev(intel_dp
)->dev_private
;
1926 lockdep_assert_held(&dev_priv
->pps_mutex
);
1928 if (!is_edp(intel_dp
))
1931 I915_STATE_WARN(!intel_dp
->want_panel_vdd
, "eDP port %c VDD not forced on",
1932 port_name(dp_to_dig_port(intel_dp
)->port
));
1934 intel_dp
->want_panel_vdd
= false;
1937 edp_panel_vdd_off_sync(intel_dp
);
1939 edp_panel_vdd_schedule_off(intel_dp
);
1942 static void edp_panel_on(struct intel_dp
*intel_dp
)
1944 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1945 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1949 lockdep_assert_held(&dev_priv
->pps_mutex
);
1951 if (!is_edp(intel_dp
))
1954 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1955 port_name(dp_to_dig_port(intel_dp
)->port
));
1957 if (WARN(edp_have_panel_power(intel_dp
),
1958 "eDP port %c panel power already on\n",
1959 port_name(dp_to_dig_port(intel_dp
)->port
)))
1962 wait_panel_power_cycle(intel_dp
);
1964 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1965 pp
= ironlake_get_pp_control(intel_dp
);
1967 /* ILK workaround: disable reset around power sequence */
1968 pp
&= ~PANEL_POWER_RESET
;
1969 I915_WRITE(pp_ctrl_reg
, pp
);
1970 POSTING_READ(pp_ctrl_reg
);
1973 pp
|= POWER_TARGET_ON
;
1975 pp
|= PANEL_POWER_RESET
;
1977 I915_WRITE(pp_ctrl_reg
, pp
);
1978 POSTING_READ(pp_ctrl_reg
);
1980 wait_panel_on(intel_dp
);
1981 intel_dp
->last_power_on
= jiffies
;
1984 pp
|= PANEL_POWER_RESET
; /* restore panel reset bit */
1985 I915_WRITE(pp_ctrl_reg
, pp
);
1986 POSTING_READ(pp_ctrl_reg
);
1990 void intel_edp_panel_on(struct intel_dp
*intel_dp
)
1992 if (!is_edp(intel_dp
))
1996 edp_panel_on(intel_dp
);
1997 pps_unlock(intel_dp
);
2001 static void edp_panel_off(struct intel_dp
*intel_dp
)
2003 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2004 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
2005 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2006 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2007 enum intel_display_power_domain power_domain
;
2011 lockdep_assert_held(&dev_priv
->pps_mutex
);
2013 if (!is_edp(intel_dp
))
2016 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2017 port_name(dp_to_dig_port(intel_dp
)->port
));
2019 WARN(!intel_dp
->want_panel_vdd
, "Need eDP port %c VDD to turn off panel\n",
2020 port_name(dp_to_dig_port(intel_dp
)->port
));
2022 pp
= ironlake_get_pp_control(intel_dp
);
2023 /* We need to switch off panel power _and_ force vdd, for otherwise some
2024 * panels get very unhappy and cease to work. */
2025 pp
&= ~(POWER_TARGET_ON
| PANEL_POWER_RESET
| EDP_FORCE_VDD
|
2028 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2030 intel_dp
->want_panel_vdd
= false;
2032 I915_WRITE(pp_ctrl_reg
, pp
);
2033 POSTING_READ(pp_ctrl_reg
);
2035 intel_dp
->last_power_cycle
= jiffies
;
2036 wait_panel_off(intel_dp
);
2038 /* We got a reference when we enabled the VDD. */
2039 power_domain
= intel_display_port_power_domain(intel_encoder
);
2040 intel_display_power_put(dev_priv
, power_domain
);
2043 void intel_edp_panel_off(struct intel_dp
*intel_dp
)
2045 if (!is_edp(intel_dp
))
2049 edp_panel_off(intel_dp
);
2050 pps_unlock(intel_dp
);
2053 /* Enable backlight in the panel power control. */
2054 static void _intel_edp_backlight_on(struct intel_dp
*intel_dp
)
2056 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2057 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
2058 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2063 * If we enable the backlight right away following a panel power
2064 * on, we may see slight flicker as the panel syncs with the eDP
2065 * link. So delay a bit to make sure the image is solid before
2066 * allowing it to appear.
2068 wait_backlight_on(intel_dp
);
2072 pp
= ironlake_get_pp_control(intel_dp
);
2073 pp
|= EDP_BLC_ENABLE
;
2075 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2077 I915_WRITE(pp_ctrl_reg
, pp
);
2078 POSTING_READ(pp_ctrl_reg
);
2080 pps_unlock(intel_dp
);
2083 /* Enable backlight PWM and backlight PP control. */
2084 void intel_edp_backlight_on(struct intel_dp
*intel_dp
)
2086 if (!is_edp(intel_dp
))
2089 DRM_DEBUG_KMS("\n");
2091 intel_panel_enable_backlight(intel_dp
->attached_connector
);
2092 _intel_edp_backlight_on(intel_dp
);
2095 /* Disable backlight in the panel power control. */
2096 static void _intel_edp_backlight_off(struct intel_dp
*intel_dp
)
2098 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2099 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2103 if (!is_edp(intel_dp
))
2108 pp
= ironlake_get_pp_control(intel_dp
);
2109 pp
&= ~EDP_BLC_ENABLE
;
2111 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2113 I915_WRITE(pp_ctrl_reg
, pp
);
2114 POSTING_READ(pp_ctrl_reg
);
2116 pps_unlock(intel_dp
);
2118 intel_dp
->last_backlight_off
= jiffies
;
2119 edp_wait_backlight_off(intel_dp
);
2122 /* Disable backlight PP control and backlight PWM. */
2123 void intel_edp_backlight_off(struct intel_dp
*intel_dp
)
2125 if (!is_edp(intel_dp
))
2128 DRM_DEBUG_KMS("\n");
2130 _intel_edp_backlight_off(intel_dp
);
2131 intel_panel_disable_backlight(intel_dp
->attached_connector
);
2135 * Hook for controlling the panel power control backlight through the bl_power
2136 * sysfs attribute. Take care to handle multiple calls.
2138 static void intel_edp_backlight_power(struct intel_connector
*connector
,
2141 struct intel_dp
*intel_dp
= intel_attached_dp(&connector
->base
);
2145 is_enabled
= ironlake_get_pp_control(intel_dp
) & EDP_BLC_ENABLE
;
2146 pps_unlock(intel_dp
);
2148 if (is_enabled
== enable
)
2151 DRM_DEBUG_KMS("panel power control backlight %s\n",
2152 enable
? "enable" : "disable");
2155 _intel_edp_backlight_on(intel_dp
);
2157 _intel_edp_backlight_off(intel_dp
);
2160 static void ironlake_edp_pll_on(struct intel_dp
*intel_dp
)
2162 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2163 struct drm_crtc
*crtc
= intel_dig_port
->base
.base
.crtc
;
2164 struct drm_device
*dev
= crtc
->dev
;
2165 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2168 assert_pipe_disabled(dev_priv
,
2169 to_intel_crtc(crtc
)->pipe
);
2171 DRM_DEBUG_KMS("\n");
2172 dpa_ctl
= I915_READ(DP_A
);
2173 WARN(dpa_ctl
& DP_PLL_ENABLE
, "dp pll on, should be off\n");
2174 WARN(dpa_ctl
& DP_PORT_EN
, "dp port still on, should be off\n");
2176 /* We don't adjust intel_dp->DP while tearing down the link, to
2177 * facilitate link retraining (e.g. after hotplug). Hence clear all
2178 * enable bits here to ensure that we don't enable too much. */
2179 intel_dp
->DP
&= ~(DP_PORT_EN
| DP_AUDIO_OUTPUT_ENABLE
);
2180 intel_dp
->DP
|= DP_PLL_ENABLE
;
2181 I915_WRITE(DP_A
, intel_dp
->DP
);
2186 static void ironlake_edp_pll_off(struct intel_dp
*intel_dp
)
2188 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2189 struct drm_crtc
*crtc
= intel_dig_port
->base
.base
.crtc
;
2190 struct drm_device
*dev
= crtc
->dev
;
2191 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2194 assert_pipe_disabled(dev_priv
,
2195 to_intel_crtc(crtc
)->pipe
);
2197 dpa_ctl
= I915_READ(DP_A
);
2198 WARN((dpa_ctl
& DP_PLL_ENABLE
) == 0,
2199 "dp pll off, should be on\n");
2200 WARN(dpa_ctl
& DP_PORT_EN
, "dp port still on, should be off\n");
2202 /* We can't rely on the value tracked for the DP register in
2203 * intel_dp->DP because link_down must not change that (otherwise link
2204 * re-training will fail. */
2205 dpa_ctl
&= ~DP_PLL_ENABLE
;
2206 I915_WRITE(DP_A
, dpa_ctl
);
2211 /* If the sink supports it, try to set the power state appropriately */
2212 void intel_dp_sink_dpms(struct intel_dp
*intel_dp
, int mode
)
2216 /* Should have a valid DPCD by this point */
2217 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x11)
2220 if (mode
!= DRM_MODE_DPMS_ON
) {
2221 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
2225 * When turning on, we need to retry for 1ms to give the sink
2228 for (i
= 0; i
< 3; i
++) {
2229 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
2238 DRM_DEBUG_KMS("failed to %s sink power state\n",
2239 mode
== DRM_MODE_DPMS_ON
? "enable" : "disable");
2242 static bool intel_dp_get_hw_state(struct intel_encoder
*encoder
,
2245 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2246 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2247 struct drm_device
*dev
= encoder
->base
.dev
;
2248 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2249 enum intel_display_power_domain power_domain
;
2252 power_domain
= intel_display_port_power_domain(encoder
);
2253 if (!intel_display_power_is_enabled(dev_priv
, power_domain
))
2256 tmp
= I915_READ(intel_dp
->output_reg
);
2258 if (!(tmp
& DP_PORT_EN
))
2261 if (IS_GEN7(dev
) && port
== PORT_A
) {
2262 *pipe
= PORT_TO_PIPE_CPT(tmp
);
2263 } else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
2266 for_each_pipe(dev_priv
, p
) {
2267 u32 trans_dp
= I915_READ(TRANS_DP_CTL(p
));
2268 if (TRANS_DP_PIPE_TO_PORT(trans_dp
) == port
) {
2274 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2275 intel_dp
->output_reg
);
2276 } else if (IS_CHERRYVIEW(dev
)) {
2277 *pipe
= DP_PORT_TO_PIPE_CHV(tmp
);
2279 *pipe
= PORT_TO_PIPE(tmp
);
2285 static void intel_dp_get_config(struct intel_encoder
*encoder
,
2286 struct intel_crtc_state
*pipe_config
)
2288 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2290 struct drm_device
*dev
= encoder
->base
.dev
;
2291 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2292 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2293 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2296 tmp
= I915_READ(intel_dp
->output_reg
);
2298 pipe_config
->has_audio
= tmp
& DP_AUDIO_OUTPUT_ENABLE
&& port
!= PORT_A
;
2300 if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
2301 u32 trans_dp
= I915_READ(TRANS_DP_CTL(crtc
->pipe
));
2303 if (trans_dp
& TRANS_DP_HSYNC_ACTIVE_HIGH
)
2304 flags
|= DRM_MODE_FLAG_PHSYNC
;
2306 flags
|= DRM_MODE_FLAG_NHSYNC
;
2308 if (trans_dp
& TRANS_DP_VSYNC_ACTIVE_HIGH
)
2309 flags
|= DRM_MODE_FLAG_PVSYNC
;
2311 flags
|= DRM_MODE_FLAG_NVSYNC
;
2313 if (tmp
& DP_SYNC_HS_HIGH
)
2314 flags
|= DRM_MODE_FLAG_PHSYNC
;
2316 flags
|= DRM_MODE_FLAG_NHSYNC
;
2318 if (tmp
& DP_SYNC_VS_HIGH
)
2319 flags
|= DRM_MODE_FLAG_PVSYNC
;
2321 flags
|= DRM_MODE_FLAG_NVSYNC
;
2324 pipe_config
->base
.adjusted_mode
.flags
|= flags
;
2326 if (!HAS_PCH_SPLIT(dev
) && !IS_VALLEYVIEW(dev
) &&
2327 tmp
& DP_COLOR_RANGE_16_235
)
2328 pipe_config
->limited_color_range
= true;
2330 pipe_config
->has_dp_encoder
= true;
2332 pipe_config
->lane_count
=
2333 ((tmp
& DP_PORT_WIDTH_MASK
) >> DP_PORT_WIDTH_SHIFT
) + 1;
2335 intel_dp_get_m_n(crtc
, pipe_config
);
2337 if (port
== PORT_A
) {
2338 if ((I915_READ(DP_A
) & DP_PLL_FREQ_MASK
) == DP_PLL_FREQ_160MHZ
)
2339 pipe_config
->port_clock
= 162000;
2341 pipe_config
->port_clock
= 270000;
2344 dotclock
= intel_dotclock_calculate(pipe_config
->port_clock
,
2345 &pipe_config
->dp_m_n
);
2347 if (HAS_PCH_SPLIT(dev_priv
->dev
) && port
!= PORT_A
)
2348 ironlake_check_encoder_dotclock(pipe_config
, dotclock
);
2350 pipe_config
->base
.adjusted_mode
.crtc_clock
= dotclock
;
2352 if (is_edp(intel_dp
) && dev_priv
->vbt
.edp_bpp
&&
2353 pipe_config
->pipe_bpp
> dev_priv
->vbt
.edp_bpp
) {
2355 * This is a big fat ugly hack.
2357 * Some machines in UEFI boot mode provide us a VBT that has 18
2358 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2359 * unknown we fail to light up. Yet the same BIOS boots up with
2360 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2361 * max, not what it tells us to use.
2363 * Note: This will still be broken if the eDP panel is not lit
2364 * up by the BIOS, and thus we can't get the mode at module
2367 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2368 pipe_config
->pipe_bpp
, dev_priv
->vbt
.edp_bpp
);
2369 dev_priv
->vbt
.edp_bpp
= pipe_config
->pipe_bpp
;
2373 static void intel_disable_dp(struct intel_encoder
*encoder
)
2375 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2376 struct drm_device
*dev
= encoder
->base
.dev
;
2377 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2379 if (crtc
->config
->has_audio
)
2380 intel_audio_codec_disable(encoder
);
2382 if (HAS_PSR(dev
) && !HAS_DDI(dev
))
2383 intel_psr_disable(intel_dp
);
2385 /* Make sure the panel is off before trying to change the mode. But also
2386 * ensure that we have vdd while we switch off the panel. */
2387 intel_edp_panel_vdd_on(intel_dp
);
2388 intel_edp_backlight_off(intel_dp
);
2389 intel_dp_sink_dpms(intel_dp
, DRM_MODE_DPMS_OFF
);
2390 intel_edp_panel_off(intel_dp
);
2392 /* disable the port before the pipe on g4x */
2393 if (INTEL_INFO(dev
)->gen
< 5)
2394 intel_dp_link_down(intel_dp
);
2397 static void ilk_post_disable_dp(struct intel_encoder
*encoder
)
2399 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2400 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2402 intel_dp_link_down(intel_dp
);
2404 ironlake_edp_pll_off(intel_dp
);
2407 static void vlv_post_disable_dp(struct intel_encoder
*encoder
)
2409 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2411 intel_dp_link_down(intel_dp
);
2414 static void chv_post_disable_dp(struct intel_encoder
*encoder
)
2416 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2417 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2418 struct drm_device
*dev
= encoder
->base
.dev
;
2419 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2420 struct intel_crtc
*intel_crtc
=
2421 to_intel_crtc(encoder
->base
.crtc
);
2422 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
2423 enum pipe pipe
= intel_crtc
->pipe
;
2426 intel_dp_link_down(intel_dp
);
2428 mutex_lock(&dev_priv
->sb_lock
);
2430 /* Propagate soft reset to data lane reset */
2431 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW1(ch
));
2432 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2433 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW1(ch
), val
);
2435 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW1(ch
));
2436 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2437 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW1(ch
), val
);
2439 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW0(ch
));
2440 val
&= ~(DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2441 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW0(ch
), val
);
2443 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW0(ch
));
2444 val
&= ~(DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2445 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW0(ch
), val
);
2447 mutex_unlock(&dev_priv
->sb_lock
);
2451 _intel_dp_set_link_train(struct intel_dp
*intel_dp
,
2453 uint8_t dp_train_pat
)
2455 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2456 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
2457 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2458 enum port port
= intel_dig_port
->port
;
2461 uint32_t temp
= I915_READ(DP_TP_CTL(port
));
2463 if (dp_train_pat
& DP_LINK_SCRAMBLING_DISABLE
)
2464 temp
|= DP_TP_CTL_SCRAMBLE_DISABLE
;
2466 temp
&= ~DP_TP_CTL_SCRAMBLE_DISABLE
;
2468 temp
&= ~DP_TP_CTL_LINK_TRAIN_MASK
;
2469 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2470 case DP_TRAINING_PATTERN_DISABLE
:
2471 temp
|= DP_TP_CTL_LINK_TRAIN_NORMAL
;
2474 case DP_TRAINING_PATTERN_1
:
2475 temp
|= DP_TP_CTL_LINK_TRAIN_PAT1
;
2477 case DP_TRAINING_PATTERN_2
:
2478 temp
|= DP_TP_CTL_LINK_TRAIN_PAT2
;
2480 case DP_TRAINING_PATTERN_3
:
2481 temp
|= DP_TP_CTL_LINK_TRAIN_PAT3
;
2484 I915_WRITE(DP_TP_CTL(port
), temp
);
2486 } else if ((IS_GEN7(dev
) && port
== PORT_A
) ||
2487 (HAS_PCH_CPT(dev
) && port
!= PORT_A
)) {
2488 *DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
2490 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2491 case DP_TRAINING_PATTERN_DISABLE
:
2492 *DP
|= DP_LINK_TRAIN_OFF_CPT
;
2494 case DP_TRAINING_PATTERN_1
:
2495 *DP
|= DP_LINK_TRAIN_PAT_1_CPT
;
2497 case DP_TRAINING_PATTERN_2
:
2498 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
2500 case DP_TRAINING_PATTERN_3
:
2501 DRM_ERROR("DP training pattern 3 not supported\n");
2502 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
2507 if (IS_CHERRYVIEW(dev
))
2508 *DP
&= ~DP_LINK_TRAIN_MASK_CHV
;
2510 *DP
&= ~DP_LINK_TRAIN_MASK
;
2512 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2513 case DP_TRAINING_PATTERN_DISABLE
:
2514 *DP
|= DP_LINK_TRAIN_OFF
;
2516 case DP_TRAINING_PATTERN_1
:
2517 *DP
|= DP_LINK_TRAIN_PAT_1
;
2519 case DP_TRAINING_PATTERN_2
:
2520 *DP
|= DP_LINK_TRAIN_PAT_2
;
2522 case DP_TRAINING_PATTERN_3
:
2523 if (IS_CHERRYVIEW(dev
)) {
2524 *DP
|= DP_LINK_TRAIN_PAT_3_CHV
;
2526 DRM_ERROR("DP training pattern 3 not supported\n");
2527 *DP
|= DP_LINK_TRAIN_PAT_2
;
2534 static void intel_dp_enable_port(struct intel_dp
*intel_dp
)
2536 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2537 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2539 /* enable with pattern 1 (as per spec) */
2540 _intel_dp_set_link_train(intel_dp
, &intel_dp
->DP
,
2541 DP_TRAINING_PATTERN_1
);
2543 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
2544 POSTING_READ(intel_dp
->output_reg
);
2547 * Magic for VLV/CHV. We _must_ first set up the register
2548 * without actually enabling the port, and then do another
2549 * write to enable the port. Otherwise link training will
2550 * fail when the power sequencer is freshly used for this port.
2552 intel_dp
->DP
|= DP_PORT_EN
;
2554 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
2555 POSTING_READ(intel_dp
->output_reg
);
2558 static void intel_enable_dp(struct intel_encoder
*encoder
)
2560 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2561 struct drm_device
*dev
= encoder
->base
.dev
;
2562 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2563 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2564 uint32_t dp_reg
= I915_READ(intel_dp
->output_reg
);
2565 unsigned int lane_mask
= 0x0;
2567 if (WARN_ON(dp_reg
& DP_PORT_EN
))
2572 if (IS_VALLEYVIEW(dev
))
2573 vlv_init_panel_power_sequencer(intel_dp
);
2575 intel_dp_enable_port(intel_dp
);
2577 edp_panel_vdd_on(intel_dp
);
2578 edp_panel_on(intel_dp
);
2579 edp_panel_vdd_off(intel_dp
, true);
2581 pps_unlock(intel_dp
);
2583 if (IS_VALLEYVIEW(dev
))
2584 vlv_wait_port_ready(dev_priv
, dp_to_dig_port(intel_dp
),
2587 intel_dp_sink_dpms(intel_dp
, DRM_MODE_DPMS_ON
);
2588 intel_dp_start_link_train(intel_dp
);
2589 intel_dp_complete_link_train(intel_dp
);
2590 intel_dp_stop_link_train(intel_dp
);
2592 if (crtc
->config
->has_audio
) {
2593 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2594 pipe_name(crtc
->pipe
));
2595 intel_audio_codec_enable(encoder
);
2599 static void g4x_enable_dp(struct intel_encoder
*encoder
)
2601 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2603 intel_enable_dp(encoder
);
2604 intel_edp_backlight_on(intel_dp
);
2607 static void vlv_enable_dp(struct intel_encoder
*encoder
)
2609 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2611 intel_edp_backlight_on(intel_dp
);
2612 intel_psr_enable(intel_dp
);
2615 static void g4x_pre_enable_dp(struct intel_encoder
*encoder
)
2617 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2618 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2620 intel_dp_prepare(encoder
);
2622 /* Only ilk+ has port A */
2623 if (dport
->port
== PORT_A
) {
2624 ironlake_set_pll_cpu_edp(intel_dp
);
2625 ironlake_edp_pll_on(intel_dp
);
2629 static void vlv_detach_power_sequencer(struct intel_dp
*intel_dp
)
2631 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2632 struct drm_i915_private
*dev_priv
= intel_dig_port
->base
.base
.dev
->dev_private
;
2633 enum pipe pipe
= intel_dp
->pps_pipe
;
2634 int pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
2636 edp_panel_vdd_off_sync(intel_dp
);
2639 * VLV seems to get confused when multiple power seqeuencers
2640 * have the same port selected (even if only one has power/vdd
2641 * enabled). The failure manifests as vlv_wait_port_ready() failing
2642 * CHV on the other hand doesn't seem to mind having the same port
2643 * selected in multiple power seqeuencers, but let's clear the
2644 * port select always when logically disconnecting a power sequencer
2647 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2648 pipe_name(pipe
), port_name(intel_dig_port
->port
));
2649 I915_WRITE(pp_on_reg
, 0);
2650 POSTING_READ(pp_on_reg
);
2652 intel_dp
->pps_pipe
= INVALID_PIPE
;
2655 static void vlv_steal_power_sequencer(struct drm_device
*dev
,
2658 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2659 struct intel_encoder
*encoder
;
2661 lockdep_assert_held(&dev_priv
->pps_mutex
);
2663 if (WARN_ON(pipe
!= PIPE_A
&& pipe
!= PIPE_B
))
2666 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
2668 struct intel_dp
*intel_dp
;
2671 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
2674 intel_dp
= enc_to_intel_dp(&encoder
->base
);
2675 port
= dp_to_dig_port(intel_dp
)->port
;
2677 if (intel_dp
->pps_pipe
!= pipe
)
2680 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2681 pipe_name(pipe
), port_name(port
));
2683 WARN(encoder
->base
.crtc
,
2684 "stealing pipe %c power sequencer from active eDP port %c\n",
2685 pipe_name(pipe
), port_name(port
));
2687 /* make sure vdd is off before we steal it */
2688 vlv_detach_power_sequencer(intel_dp
);
2692 static void vlv_init_panel_power_sequencer(struct intel_dp
*intel_dp
)
2694 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2695 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
2696 struct drm_device
*dev
= encoder
->base
.dev
;
2697 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2698 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2700 lockdep_assert_held(&dev_priv
->pps_mutex
);
2702 if (!is_edp(intel_dp
))
2705 if (intel_dp
->pps_pipe
== crtc
->pipe
)
2709 * If another power sequencer was being used on this
2710 * port previously make sure to turn off vdd there while
2711 * we still have control of it.
2713 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
2714 vlv_detach_power_sequencer(intel_dp
);
2717 * We may be stealing the power
2718 * sequencer from another port.
2720 vlv_steal_power_sequencer(dev
, crtc
->pipe
);
2722 /* now it's all ours */
2723 intel_dp
->pps_pipe
= crtc
->pipe
;
2725 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2726 pipe_name(intel_dp
->pps_pipe
), port_name(intel_dig_port
->port
));
2728 /* init power sequencer on this pipe and port */
2729 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
2730 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
2733 static void vlv_pre_enable_dp(struct intel_encoder
*encoder
)
2735 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2736 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2737 struct drm_device
*dev
= encoder
->base
.dev
;
2738 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2739 struct intel_crtc
*intel_crtc
= to_intel_crtc(encoder
->base
.crtc
);
2740 enum dpio_channel port
= vlv_dport_to_channel(dport
);
2741 int pipe
= intel_crtc
->pipe
;
2744 mutex_lock(&dev_priv
->sb_lock
);
2746 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW8(port
));
2753 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW8(port
), val
);
2754 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW14(port
), 0x00760018);
2755 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW23(port
), 0x00400888);
2757 mutex_unlock(&dev_priv
->sb_lock
);
2759 intel_enable_dp(encoder
);
2762 static void vlv_dp_pre_pll_enable(struct intel_encoder
*encoder
)
2764 struct intel_digital_port
*dport
= enc_to_dig_port(&encoder
->base
);
2765 struct drm_device
*dev
= encoder
->base
.dev
;
2766 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2767 struct intel_crtc
*intel_crtc
=
2768 to_intel_crtc(encoder
->base
.crtc
);
2769 enum dpio_channel port
= vlv_dport_to_channel(dport
);
2770 int pipe
= intel_crtc
->pipe
;
2772 intel_dp_prepare(encoder
);
2774 /* Program Tx lane resets to default */
2775 mutex_lock(&dev_priv
->sb_lock
);
2776 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW0(port
),
2777 DPIO_PCS_TX_LANE2_RESET
|
2778 DPIO_PCS_TX_LANE1_RESET
);
2779 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW1(port
),
2780 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN
|
2781 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN
|
2782 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT
) |
2783 DPIO_PCS_CLK_SOFT_RESET
);
2785 /* Fix up inter-pair skew failure */
2786 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW12(port
), 0x00750f00);
2787 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW11(port
), 0x00001500);
2788 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW14(port
), 0x40400000);
2789 mutex_unlock(&dev_priv
->sb_lock
);
2792 static void chv_pre_enable_dp(struct intel_encoder
*encoder
)
2794 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2795 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2796 struct drm_device
*dev
= encoder
->base
.dev
;
2797 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2798 struct intel_crtc
*intel_crtc
=
2799 to_intel_crtc(encoder
->base
.crtc
);
2800 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
2801 int pipe
= intel_crtc
->pipe
;
2802 int data
, i
, stagger
;
2805 mutex_lock(&dev_priv
->sb_lock
);
2807 /* allow hardware to manage TX FIFO reset source */
2808 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW11(ch
));
2809 val
&= ~DPIO_LANEDESKEW_STRAP_OVRD
;
2810 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW11(ch
), val
);
2812 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW11(ch
));
2813 val
&= ~DPIO_LANEDESKEW_STRAP_OVRD
;
2814 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW11(ch
), val
);
2816 /* Deassert soft data lane reset*/
2817 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW1(ch
));
2818 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2819 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW1(ch
), val
);
2821 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW1(ch
));
2822 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2823 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW1(ch
), val
);
2825 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW0(ch
));
2826 val
|= (DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2827 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW0(ch
), val
);
2829 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW0(ch
));
2830 val
|= (DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2831 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW0(ch
), val
);
2833 /* Program Tx lane latency optimal setting*/
2834 for (i
= 0; i
< 4; i
++) {
2835 /* Set the upar bit */
2836 data
= (i
== 1) ? 0x0 : 0x1;
2837 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW14(ch
, i
),
2838 data
<< DPIO_UPAR_SHIFT
);
2841 /* Data lane stagger programming */
2842 if (intel_crtc
->config
->port_clock
> 270000)
2844 else if (intel_crtc
->config
->port_clock
> 135000)
2846 else if (intel_crtc
->config
->port_clock
> 67500)
2848 else if (intel_crtc
->config
->port_clock
> 33750)
2853 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW11(ch
));
2854 val
|= DPIO_TX2_STAGGER_MASK(0x1f);
2855 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW11(ch
), val
);
2857 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW11(ch
));
2858 val
|= DPIO_TX2_STAGGER_MASK(0x1f);
2859 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW11(ch
), val
);
2861 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW12(ch
),
2862 DPIO_LANESTAGGER_STRAP(stagger
) |
2863 DPIO_LANESTAGGER_STRAP_OVRD
|
2864 DPIO_TX1_STAGGER_MASK(0x1f) |
2865 DPIO_TX1_STAGGER_MULT(6) |
2866 DPIO_TX2_STAGGER_MULT(0));
2868 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW12(ch
),
2869 DPIO_LANESTAGGER_STRAP(stagger
) |
2870 DPIO_LANESTAGGER_STRAP_OVRD
|
2871 DPIO_TX1_STAGGER_MASK(0x1f) |
2872 DPIO_TX1_STAGGER_MULT(7) |
2873 DPIO_TX2_STAGGER_MULT(5));
2875 mutex_unlock(&dev_priv
->sb_lock
);
2877 intel_enable_dp(encoder
);
2880 static void chv_dp_pre_pll_enable(struct intel_encoder
*encoder
)
2882 struct intel_digital_port
*dport
= enc_to_dig_port(&encoder
->base
);
2883 struct drm_device
*dev
= encoder
->base
.dev
;
2884 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2885 struct intel_crtc
*intel_crtc
=
2886 to_intel_crtc(encoder
->base
.crtc
);
2887 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
2888 enum pipe pipe
= intel_crtc
->pipe
;
2891 intel_dp_prepare(encoder
);
2893 mutex_lock(&dev_priv
->sb_lock
);
2895 /* program left/right clock distribution */
2896 if (pipe
!= PIPE_B
) {
2897 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
);
2898 val
&= ~(CHV_BUFLEFTENA1_MASK
| CHV_BUFRIGHTENA1_MASK
);
2900 val
|= CHV_BUFLEFTENA1_FORCE
;
2902 val
|= CHV_BUFRIGHTENA1_FORCE
;
2903 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
, val
);
2905 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
);
2906 val
&= ~(CHV_BUFLEFTENA2_MASK
| CHV_BUFRIGHTENA2_MASK
);
2908 val
|= CHV_BUFLEFTENA2_FORCE
;
2910 val
|= CHV_BUFRIGHTENA2_FORCE
;
2911 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
, val
);
2914 /* program clock channel usage */
2915 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW8(ch
));
2916 val
|= CHV_PCS_USEDCLKCHANNEL_OVRRIDE
;
2918 val
&= ~CHV_PCS_USEDCLKCHANNEL
;
2920 val
|= CHV_PCS_USEDCLKCHANNEL
;
2921 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW8(ch
), val
);
2923 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW8(ch
));
2924 val
|= CHV_PCS_USEDCLKCHANNEL_OVRRIDE
;
2926 val
&= ~CHV_PCS_USEDCLKCHANNEL
;
2928 val
|= CHV_PCS_USEDCLKCHANNEL
;
2929 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW8(ch
), val
);
2932 * This a a bit weird since generally CL
2933 * matches the pipe, but here we need to
2934 * pick the CL based on the port.
2936 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW19(ch
));
2938 val
&= ~CHV_CMN_USEDCLKCHANNEL
;
2940 val
|= CHV_CMN_USEDCLKCHANNEL
;
2941 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW19(ch
), val
);
2943 mutex_unlock(&dev_priv
->sb_lock
);
2946 static void chv_dp_post_pll_disable(struct intel_encoder
*encoder
)
2948 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
2949 enum pipe pipe
= to_intel_crtc(encoder
->base
.crtc
)->pipe
;
2952 mutex_lock(&dev_priv
->sb_lock
);
2954 /* disable left/right clock distribution */
2955 if (pipe
!= PIPE_B
) {
2956 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
);
2957 val
&= ~(CHV_BUFLEFTENA1_MASK
| CHV_BUFRIGHTENA1_MASK
);
2958 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
, val
);
2960 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
);
2961 val
&= ~(CHV_BUFLEFTENA2_MASK
| CHV_BUFRIGHTENA2_MASK
);
2962 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
, val
);
2965 mutex_unlock(&dev_priv
->sb_lock
);
2969 * Native read with retry for link status and receiver capability reads for
2970 * cases where the sink may still be asleep.
2972 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2973 * supposed to retry 3 times per the spec.
2976 intel_dp_dpcd_read_wake(struct drm_dp_aux
*aux
, unsigned int offset
,
2977 void *buffer
, size_t size
)
2983 * Sometime we just get the same incorrect byte repeated
2984 * over the entire buffer. Doing just one throw away read
2985 * initially seems to "solve" it.
2987 drm_dp_dpcd_read(aux
, DP_DPCD_REV
, buffer
, 1);
2989 for (i
= 0; i
< 3; i
++) {
2990 ret
= drm_dp_dpcd_read(aux
, offset
, buffer
, size
);
3000 * Fetch AUX CH registers 0x202 - 0x207 which contain
3001 * link status information
3004 intel_dp_get_link_status(struct intel_dp
*intel_dp
, uint8_t link_status
[DP_LINK_STATUS_SIZE
])
3006 return intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3009 DP_LINK_STATUS_SIZE
) == DP_LINK_STATUS_SIZE
;
3012 /* These are source-specific values. */
3014 intel_dp_voltage_max(struct intel_dp
*intel_dp
)
3016 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3017 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3018 enum port port
= dp_to_dig_port(intel_dp
)->port
;
3020 if (IS_BROXTON(dev
))
3021 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3022 else if (INTEL_INFO(dev
)->gen
>= 9) {
3023 if (dev_priv
->edp_low_vswing
&& port
== PORT_A
)
3024 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3025 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3026 } else if (IS_VALLEYVIEW(dev
))
3027 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3028 else if (IS_GEN7(dev
) && port
== PORT_A
)
3029 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3030 else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
)
3031 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3033 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3037 intel_dp_pre_emphasis_max(struct intel_dp
*intel_dp
, uint8_t voltage_swing
)
3039 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3040 enum port port
= dp_to_dig_port(intel_dp
)->port
;
3042 if (INTEL_INFO(dev
)->gen
>= 9) {
3043 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3044 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3045 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3046 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3047 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3048 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3049 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3050 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3051 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3053 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3055 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
3056 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3057 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3058 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3060 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3061 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3062 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3063 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3065 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3067 } else if (IS_VALLEYVIEW(dev
)) {
3068 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3069 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3070 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3071 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3072 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3073 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3074 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3075 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3077 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3079 } else if (IS_GEN7(dev
) && port
== PORT_A
) {
3080 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3081 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3082 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3083 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3084 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3085 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3087 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3090 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3092 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3093 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3094 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3095 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3096 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3097 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3099 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3104 static uint32_t vlv_signal_levels(struct intel_dp
*intel_dp
)
3106 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3107 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3108 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
3109 struct intel_crtc
*intel_crtc
=
3110 to_intel_crtc(dport
->base
.base
.crtc
);
3111 unsigned long demph_reg_value
, preemph_reg_value
,
3112 uniqtranscale_reg_value
;
3113 uint8_t train_set
= intel_dp
->train_set
[0];
3114 enum dpio_channel port
= vlv_dport_to_channel(dport
);
3115 int pipe
= intel_crtc
->pipe
;
3117 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3118 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3119 preemph_reg_value
= 0x0004000;
3120 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3121 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3122 demph_reg_value
= 0x2B405555;
3123 uniqtranscale_reg_value
= 0x552AB83A;
3125 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3126 demph_reg_value
= 0x2B404040;
3127 uniqtranscale_reg_value
= 0x5548B83A;
3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3130 demph_reg_value
= 0x2B245555;
3131 uniqtranscale_reg_value
= 0x5560B83A;
3133 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3134 demph_reg_value
= 0x2B405555;
3135 uniqtranscale_reg_value
= 0x5598DA3A;
3141 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3142 preemph_reg_value
= 0x0002000;
3143 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3144 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3145 demph_reg_value
= 0x2B404040;
3146 uniqtranscale_reg_value
= 0x5552B83A;
3148 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3149 demph_reg_value
= 0x2B404848;
3150 uniqtranscale_reg_value
= 0x5580B83A;
3152 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3153 demph_reg_value
= 0x2B404040;
3154 uniqtranscale_reg_value
= 0x55ADDA3A;
3160 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3161 preemph_reg_value
= 0x0000000;
3162 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3163 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3164 demph_reg_value
= 0x2B305555;
3165 uniqtranscale_reg_value
= 0x5570B83A;
3167 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3168 demph_reg_value
= 0x2B2B4040;
3169 uniqtranscale_reg_value
= 0x55ADDA3A;
3175 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3176 preemph_reg_value
= 0x0006000;
3177 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3178 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3179 demph_reg_value
= 0x1B405555;
3180 uniqtranscale_reg_value
= 0x55ADDA3A;
3190 mutex_lock(&dev_priv
->sb_lock
);
3191 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW5(port
), 0x00000000);
3192 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW4(port
), demph_reg_value
);
3193 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW2(port
),
3194 uniqtranscale_reg_value
);
3195 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW3(port
), 0x0C782040);
3196 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW11(port
), 0x00030000);
3197 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW9(port
), preemph_reg_value
);
3198 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW5(port
), 0x80000000);
3199 mutex_unlock(&dev_priv
->sb_lock
);
3204 static bool chv_need_uniq_trans_scale(uint8_t train_set
)
3206 return (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) == DP_TRAIN_PRE_EMPH_LEVEL_0
&&
3207 (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3210 static uint32_t chv_signal_levels(struct intel_dp
*intel_dp
)
3212 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3213 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3214 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
3215 struct intel_crtc
*intel_crtc
= to_intel_crtc(dport
->base
.base
.crtc
);
3216 u32 deemph_reg_value
, margin_reg_value
, val
;
3217 uint8_t train_set
= intel_dp
->train_set
[0];
3218 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
3219 enum pipe pipe
= intel_crtc
->pipe
;
3222 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3223 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3224 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3225 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3226 deemph_reg_value
= 128;
3227 margin_reg_value
= 52;
3229 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3230 deemph_reg_value
= 128;
3231 margin_reg_value
= 77;
3233 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3234 deemph_reg_value
= 128;
3235 margin_reg_value
= 102;
3237 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3238 deemph_reg_value
= 128;
3239 margin_reg_value
= 154;
3240 /* FIXME extra to set for 1200 */
3246 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3247 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3248 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3249 deemph_reg_value
= 85;
3250 margin_reg_value
= 78;
3252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3253 deemph_reg_value
= 85;
3254 margin_reg_value
= 116;
3256 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3257 deemph_reg_value
= 85;
3258 margin_reg_value
= 154;
3264 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3265 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3267 deemph_reg_value
= 64;
3268 margin_reg_value
= 104;
3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3271 deemph_reg_value
= 64;
3272 margin_reg_value
= 154;
3278 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3279 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3281 deemph_reg_value
= 43;
3282 margin_reg_value
= 154;
3292 mutex_lock(&dev_priv
->sb_lock
);
3294 /* Clear calc init */
3295 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW10(ch
));
3296 val
&= ~(DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
);
3297 val
&= ~(DPIO_PCS_TX1DEEMP_MASK
| DPIO_PCS_TX2DEEMP_MASK
);
3298 val
|= DPIO_PCS_TX1DEEMP_9P5
| DPIO_PCS_TX2DEEMP_9P5
;
3299 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW10(ch
), val
);
3301 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW10(ch
));
3302 val
&= ~(DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
);
3303 val
&= ~(DPIO_PCS_TX1DEEMP_MASK
| DPIO_PCS_TX2DEEMP_MASK
);
3304 val
|= DPIO_PCS_TX1DEEMP_9P5
| DPIO_PCS_TX2DEEMP_9P5
;
3305 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW10(ch
), val
);
3307 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW9(ch
));
3308 val
&= ~(DPIO_PCS_TX1MARGIN_MASK
| DPIO_PCS_TX2MARGIN_MASK
);
3309 val
|= DPIO_PCS_TX1MARGIN_000
| DPIO_PCS_TX2MARGIN_000
;
3310 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW9(ch
), val
);
3312 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW9(ch
));
3313 val
&= ~(DPIO_PCS_TX1MARGIN_MASK
| DPIO_PCS_TX2MARGIN_MASK
);
3314 val
|= DPIO_PCS_TX1MARGIN_000
| DPIO_PCS_TX2MARGIN_000
;
3315 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW9(ch
), val
);
3317 /* Program swing deemph */
3318 for (i
= 0; i
< 4; i
++) {
3319 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW4(ch
, i
));
3320 val
&= ~DPIO_SWING_DEEMPH9P5_MASK
;
3321 val
|= deemph_reg_value
<< DPIO_SWING_DEEMPH9P5_SHIFT
;
3322 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW4(ch
, i
), val
);
3325 /* Program swing margin */
3326 for (i
= 0; i
< 4; i
++) {
3327 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
));
3329 val
&= ~DPIO_SWING_MARGIN000_MASK
;
3330 val
|= margin_reg_value
<< DPIO_SWING_MARGIN000_SHIFT
;
3333 * Supposedly this value shouldn't matter when unique transition
3334 * scale is disabled, but in fact it does matter. Let's just
3335 * always program the same value and hope it's OK.
3337 val
&= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT
);
3338 val
|= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT
;
3340 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
), val
);
3344 * The document said it needs to set bit 27 for ch0 and bit 26
3345 * for ch1. Might be a typo in the doc.
3346 * For now, for this unique transition scale selection, set bit
3347 * 27 for ch0 and ch1.
3349 for (i
= 0; i
< 4; i
++) {
3350 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
));
3351 if (chv_need_uniq_trans_scale(train_set
))
3352 val
|= DPIO_TX_UNIQ_TRANS_SCALE_EN
;
3354 val
&= ~DPIO_TX_UNIQ_TRANS_SCALE_EN
;
3355 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
), val
);
3358 /* Start swing calculation */
3359 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW10(ch
));
3360 val
|= DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
;
3361 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW10(ch
), val
);
3363 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW10(ch
));
3364 val
|= DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
;
3365 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW10(ch
), val
);
3368 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW30
);
3369 val
|= DPIO_LRC_BYPASS
;
3370 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW30
, val
);
3372 mutex_unlock(&dev_priv
->sb_lock
);
3378 intel_get_adjust_train(struct intel_dp
*intel_dp
,
3379 const uint8_t link_status
[DP_LINK_STATUS_SIZE
])
3384 uint8_t voltage_max
;
3385 uint8_t preemph_max
;
3387 for (lane
= 0; lane
< intel_dp
->lane_count
; lane
++) {
3388 uint8_t this_v
= drm_dp_get_adjust_request_voltage(link_status
, lane
);
3389 uint8_t this_p
= drm_dp_get_adjust_request_pre_emphasis(link_status
, lane
);
3397 voltage_max
= intel_dp_voltage_max(intel_dp
);
3398 if (v
>= voltage_max
)
3399 v
= voltage_max
| DP_TRAIN_MAX_SWING_REACHED
;
3401 preemph_max
= intel_dp_pre_emphasis_max(intel_dp
, v
);
3402 if (p
>= preemph_max
)
3403 p
= preemph_max
| DP_TRAIN_MAX_PRE_EMPHASIS_REACHED
;
3405 for (lane
= 0; lane
< 4; lane
++)
3406 intel_dp
->train_set
[lane
] = v
| p
;
3410 gen4_signal_levels(uint8_t train_set
)
3412 uint32_t signal_levels
= 0;
3414 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3415 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3417 signal_levels
|= DP_VOLTAGE_0_4
;
3419 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3420 signal_levels
|= DP_VOLTAGE_0_6
;
3422 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3423 signal_levels
|= DP_VOLTAGE_0_8
;
3425 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3426 signal_levels
|= DP_VOLTAGE_1_2
;
3429 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3430 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3432 signal_levels
|= DP_PRE_EMPHASIS_0
;
3434 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3435 signal_levels
|= DP_PRE_EMPHASIS_3_5
;
3437 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3438 signal_levels
|= DP_PRE_EMPHASIS_6
;
3440 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3441 signal_levels
|= DP_PRE_EMPHASIS_9_5
;
3444 return signal_levels
;
3447 /* Gen6's DP voltage swing and pre-emphasis control */
3449 gen6_edp_signal_levels(uint8_t train_set
)
3451 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3452 DP_TRAIN_PRE_EMPHASIS_MASK
);
3453 switch (signal_levels
) {
3454 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3455 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3456 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
3457 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3458 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B
;
3459 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3460 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3461 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B
;
3462 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3464 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B
;
3465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3467 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B
;
3469 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3470 "0x%x\n", signal_levels
);
3471 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
3475 /* Gen7's DP voltage swing and pre-emphasis control */
3477 gen7_edp_signal_levels(uint8_t train_set
)
3479 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3480 DP_TRAIN_PRE_EMPHASIS_MASK
);
3481 switch (signal_levels
) {
3482 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3483 return EDP_LINK_TRAIN_400MV_0DB_IVB
;
3484 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3485 return EDP_LINK_TRAIN_400MV_3_5DB_IVB
;
3486 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3487 return EDP_LINK_TRAIN_400MV_6DB_IVB
;
3489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3490 return EDP_LINK_TRAIN_600MV_0DB_IVB
;
3491 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3492 return EDP_LINK_TRAIN_600MV_3_5DB_IVB
;
3494 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3495 return EDP_LINK_TRAIN_800MV_0DB_IVB
;
3496 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3497 return EDP_LINK_TRAIN_800MV_3_5DB_IVB
;
3500 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3501 "0x%x\n", signal_levels
);
3502 return EDP_LINK_TRAIN_500MV_0DB_IVB
;
3506 /* Properly updates "DP" with the correct signal levels. */
3508 intel_dp_set_signal_levels(struct intel_dp
*intel_dp
, uint32_t *DP
)
3510 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3511 enum port port
= intel_dig_port
->port
;
3512 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3513 uint32_t signal_levels
, mask
= 0;
3514 uint8_t train_set
= intel_dp
->train_set
[0];
3517 signal_levels
= ddi_signal_levels(intel_dp
);
3519 if (IS_BROXTON(dev
))
3522 mask
= DDI_BUF_EMP_MASK
;
3523 } else if (IS_CHERRYVIEW(dev
)) {
3524 signal_levels
= chv_signal_levels(intel_dp
);
3525 } else if (IS_VALLEYVIEW(dev
)) {
3526 signal_levels
= vlv_signal_levels(intel_dp
);
3527 } else if (IS_GEN7(dev
) && port
== PORT_A
) {
3528 signal_levels
= gen7_edp_signal_levels(train_set
);
3529 mask
= EDP_LINK_TRAIN_VOL_EMP_MASK_IVB
;
3530 } else if (IS_GEN6(dev
) && port
== PORT_A
) {
3531 signal_levels
= gen6_edp_signal_levels(train_set
);
3532 mask
= EDP_LINK_TRAIN_VOL_EMP_MASK_SNB
;
3534 signal_levels
= gen4_signal_levels(train_set
);
3535 mask
= DP_VOLTAGE_MASK
| DP_PRE_EMPHASIS_MASK
;
3539 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels
);
3541 DRM_DEBUG_KMS("Using vswing level %d\n",
3542 train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
);
3543 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3544 (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) >>
3545 DP_TRAIN_PRE_EMPHASIS_SHIFT
);
3547 *DP
= (*DP
& ~mask
) | signal_levels
;
3551 intel_dp_set_link_train(struct intel_dp
*intel_dp
,
3553 uint8_t dp_train_pat
)
3555 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3556 struct drm_i915_private
*dev_priv
=
3557 to_i915(intel_dig_port
->base
.base
.dev
);
3558 uint8_t buf
[sizeof(intel_dp
->train_set
) + 1];
3561 _intel_dp_set_link_train(intel_dp
, DP
, dp_train_pat
);
3563 I915_WRITE(intel_dp
->output_reg
, *DP
);
3564 POSTING_READ(intel_dp
->output_reg
);
3566 buf
[0] = dp_train_pat
;
3567 if ((dp_train_pat
& DP_TRAINING_PATTERN_MASK
) ==
3568 DP_TRAINING_PATTERN_DISABLE
) {
3569 /* don't write DP_TRAINING_LANEx_SET on disable */
3572 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3573 memcpy(buf
+ 1, intel_dp
->train_set
, intel_dp
->lane_count
);
3574 len
= intel_dp
->lane_count
+ 1;
3577 ret
= drm_dp_dpcd_write(&intel_dp
->aux
, DP_TRAINING_PATTERN_SET
,
3584 intel_dp_reset_link_train(struct intel_dp
*intel_dp
, uint32_t *DP
,
3585 uint8_t dp_train_pat
)
3587 if (!intel_dp
->train_set_valid
)
3588 memset(intel_dp
->train_set
, 0, sizeof(intel_dp
->train_set
));
3589 intel_dp_set_signal_levels(intel_dp
, DP
);
3590 return intel_dp_set_link_train(intel_dp
, DP
, dp_train_pat
);
3594 intel_dp_update_link_train(struct intel_dp
*intel_dp
, uint32_t *DP
,
3595 const uint8_t link_status
[DP_LINK_STATUS_SIZE
])
3597 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3598 struct drm_i915_private
*dev_priv
=
3599 to_i915(intel_dig_port
->base
.base
.dev
);
3602 intel_get_adjust_train(intel_dp
, link_status
);
3603 intel_dp_set_signal_levels(intel_dp
, DP
);
3605 I915_WRITE(intel_dp
->output_reg
, *DP
);
3606 POSTING_READ(intel_dp
->output_reg
);
3608 ret
= drm_dp_dpcd_write(&intel_dp
->aux
, DP_TRAINING_LANE0_SET
,
3609 intel_dp
->train_set
, intel_dp
->lane_count
);
3611 return ret
== intel_dp
->lane_count
;
3614 static void intel_dp_set_idle_link_train(struct intel_dp
*intel_dp
)
3616 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3617 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3618 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3619 enum port port
= intel_dig_port
->port
;
3625 val
= I915_READ(DP_TP_CTL(port
));
3626 val
&= ~DP_TP_CTL_LINK_TRAIN_MASK
;
3627 val
|= DP_TP_CTL_LINK_TRAIN_IDLE
;
3628 I915_WRITE(DP_TP_CTL(port
), val
);
3631 * On PORT_A we can have only eDP in SST mode. There the only reason
3632 * we need to set idle transmission mode is to work around a HW issue
3633 * where we enable the pipe while not in idle link-training mode.
3634 * In this case there is requirement to wait for a minimum number of
3635 * idle patterns to be sent.
3640 if (wait_for((I915_READ(DP_TP_STATUS(port
)) & DP_TP_STATUS_IDLE_DONE
),
3642 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3645 /* Enable corresponding port and start training pattern 1 */
3647 intel_dp_start_link_train(struct intel_dp
*intel_dp
)
3649 struct drm_encoder
*encoder
= &dp_to_dig_port(intel_dp
)->base
.base
;
3650 struct drm_device
*dev
= encoder
->dev
;
3653 int voltage_tries
, loop_tries
;
3654 uint32_t DP
= intel_dp
->DP
;
3655 uint8_t link_config
[2];
3656 uint8_t link_bw
, rate_select
;
3659 intel_ddi_prepare_link_retrain(encoder
);
3661 intel_dp_compute_rate(intel_dp
, intel_dp
->link_rate
,
3662 &link_bw
, &rate_select
);
3664 /* Write the link configuration data */
3665 link_config
[0] = link_bw
;
3666 link_config
[1] = intel_dp
->lane_count
;
3667 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
3668 link_config
[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN
;
3669 drm_dp_dpcd_write(&intel_dp
->aux
, DP_LINK_BW_SET
, link_config
, 2);
3670 if (intel_dp
->num_sink_rates
)
3671 drm_dp_dpcd_write(&intel_dp
->aux
, DP_LINK_RATE_SET
,
3675 link_config
[1] = DP_SET_ANSI_8B10B
;
3676 drm_dp_dpcd_write(&intel_dp
->aux
, DP_DOWNSPREAD_CTRL
, link_config
, 2);
3680 /* clock recovery */
3681 if (!intel_dp_reset_link_train(intel_dp
, &DP
,
3682 DP_TRAINING_PATTERN_1
|
3683 DP_LINK_SCRAMBLING_DISABLE
)) {
3684 DRM_ERROR("failed to enable link training\n");
3692 uint8_t link_status
[DP_LINK_STATUS_SIZE
];
3694 drm_dp_link_train_clock_recovery_delay(intel_dp
->dpcd
);
3695 if (!intel_dp_get_link_status(intel_dp
, link_status
)) {
3696 DRM_ERROR("failed to get link status\n");
3700 if (drm_dp_clock_recovery_ok(link_status
, intel_dp
->lane_count
)) {
3701 DRM_DEBUG_KMS("clock recovery OK\n");
3706 * if we used previously trained voltage and pre-emphasis values
3707 * and we don't get clock recovery, reset link training values
3709 if (intel_dp
->train_set_valid
) {
3710 DRM_DEBUG_KMS("clock recovery not ok, reset");
3711 /* clear the flag as we are not reusing train set */
3712 intel_dp
->train_set_valid
= false;
3713 if (!intel_dp_reset_link_train(intel_dp
, &DP
,
3714 DP_TRAINING_PATTERN_1
|
3715 DP_LINK_SCRAMBLING_DISABLE
)) {
3716 DRM_ERROR("failed to enable link training\n");
3722 /* Check to see if we've tried the max voltage */
3723 for (i
= 0; i
< intel_dp
->lane_count
; i
++)
3724 if ((intel_dp
->train_set
[i
] & DP_TRAIN_MAX_SWING_REACHED
) == 0)
3726 if (i
== intel_dp
->lane_count
) {
3728 if (loop_tries
== 5) {
3729 DRM_ERROR("too many full retries, give up\n");
3732 intel_dp_reset_link_train(intel_dp
, &DP
,
3733 DP_TRAINING_PATTERN_1
|
3734 DP_LINK_SCRAMBLING_DISABLE
);
3739 /* Check to see if we've tried the same voltage 5 times */
3740 if ((intel_dp
->train_set
[0] & DP_TRAIN_VOLTAGE_SWING_MASK
) == voltage
) {
3742 if (voltage_tries
== 5) {
3743 DRM_ERROR("too many voltage retries, give up\n");
3748 voltage
= intel_dp
->train_set
[0] & DP_TRAIN_VOLTAGE_SWING_MASK
;
3750 /* Update training set as requested by target */
3751 if (!intel_dp_update_link_train(intel_dp
, &DP
, link_status
)) {
3752 DRM_ERROR("failed to update link training\n");
3761 intel_dp_complete_link_train(struct intel_dp
*intel_dp
)
3763 bool channel_eq
= false;
3764 int tries
, cr_tries
;
3765 uint32_t DP
= intel_dp
->DP
;
3766 uint32_t training_pattern
= DP_TRAINING_PATTERN_2
;
3768 /* Training Pattern 3 for HBR2 or 1.2 devices that support it*/
3769 if (intel_dp
->link_rate
== 540000 || intel_dp
->use_tps3
)
3770 training_pattern
= DP_TRAINING_PATTERN_3
;
3772 /* channel equalization */
3773 if (!intel_dp_set_link_train(intel_dp
, &DP
,
3775 DP_LINK_SCRAMBLING_DISABLE
)) {
3776 DRM_ERROR("failed to start channel equalization\n");
3784 uint8_t link_status
[DP_LINK_STATUS_SIZE
];
3787 DRM_ERROR("failed to train DP, aborting\n");
3791 drm_dp_link_train_channel_eq_delay(intel_dp
->dpcd
);
3792 if (!intel_dp_get_link_status(intel_dp
, link_status
)) {
3793 DRM_ERROR("failed to get link status\n");
3797 /* Make sure clock is still ok */
3798 if (!drm_dp_clock_recovery_ok(link_status
,
3799 intel_dp
->lane_count
)) {
3800 intel_dp
->train_set_valid
= false;
3801 intel_dp_start_link_train(intel_dp
);
3802 intel_dp_set_link_train(intel_dp
, &DP
,
3804 DP_LINK_SCRAMBLING_DISABLE
);
3809 if (drm_dp_channel_eq_ok(link_status
,
3810 intel_dp
->lane_count
)) {
3815 /* Try 5 times, then try clock recovery if that fails */
3817 intel_dp
->train_set_valid
= false;
3818 intel_dp_start_link_train(intel_dp
);
3819 intel_dp_set_link_train(intel_dp
, &DP
,
3821 DP_LINK_SCRAMBLING_DISABLE
);
3827 /* Update training set as requested by target */
3828 if (!intel_dp_update_link_train(intel_dp
, &DP
, link_status
)) {
3829 DRM_ERROR("failed to update link training\n");
3835 intel_dp_set_idle_link_train(intel_dp
);
3840 intel_dp
->train_set_valid
= true;
3841 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3845 void intel_dp_stop_link_train(struct intel_dp
*intel_dp
)
3847 intel_dp_set_link_train(intel_dp
, &intel_dp
->DP
,
3848 DP_TRAINING_PATTERN_DISABLE
);
3852 intel_dp_link_down(struct intel_dp
*intel_dp
)
3854 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3855 struct intel_crtc
*crtc
= to_intel_crtc(intel_dig_port
->base
.base
.crtc
);
3856 enum port port
= intel_dig_port
->port
;
3857 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3858 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3859 uint32_t DP
= intel_dp
->DP
;
3861 if (WARN_ON(HAS_DDI(dev
)))
3864 if (WARN_ON((I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
) == 0))
3867 DRM_DEBUG_KMS("\n");
3869 if ((IS_GEN7(dev
) && port
== PORT_A
) ||
3870 (HAS_PCH_CPT(dev
) && port
!= PORT_A
)) {
3871 DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
3872 DP
|= DP_LINK_TRAIN_PAT_IDLE_CPT
;
3874 if (IS_CHERRYVIEW(dev
))
3875 DP
&= ~DP_LINK_TRAIN_MASK_CHV
;
3877 DP
&= ~DP_LINK_TRAIN_MASK
;
3878 DP
|= DP_LINK_TRAIN_PAT_IDLE
;
3880 I915_WRITE(intel_dp
->output_reg
, DP
);
3881 POSTING_READ(intel_dp
->output_reg
);
3883 DP
&= ~(DP_PORT_EN
| DP_AUDIO_OUTPUT_ENABLE
);
3884 I915_WRITE(intel_dp
->output_reg
, DP
);
3885 POSTING_READ(intel_dp
->output_reg
);
3888 * HW workaround for IBX, we need to move the port
3889 * to transcoder A after disabling it to allow the
3890 * matching HDMI port to be enabled on transcoder A.
3892 if (HAS_PCH_IBX(dev
) && crtc
->pipe
== PIPE_B
&& port
!= PORT_A
) {
3893 /* always enable with pattern 1 (as per spec) */
3894 DP
&= ~(DP_PIPEB_SELECT
| DP_LINK_TRAIN_MASK
);
3895 DP
|= DP_PORT_EN
| DP_LINK_TRAIN_PAT_1
;
3896 I915_WRITE(intel_dp
->output_reg
, DP
);
3897 POSTING_READ(intel_dp
->output_reg
);
3900 I915_WRITE(intel_dp
->output_reg
, DP
);
3901 POSTING_READ(intel_dp
->output_reg
);
3904 msleep(intel_dp
->panel_power_down_delay
);
3908 intel_dp_get_dpcd(struct intel_dp
*intel_dp
)
3910 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
3911 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
3912 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3915 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, 0x000, intel_dp
->dpcd
,
3916 sizeof(intel_dp
->dpcd
)) < 0)
3917 return false; /* aux transfer failed */
3919 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp
->dpcd
), intel_dp
->dpcd
);
3921 if (intel_dp
->dpcd
[DP_DPCD_REV
] == 0)
3922 return false; /* DPCD not present */
3924 /* Check if the panel supports PSR */
3925 memset(intel_dp
->psr_dpcd
, 0, sizeof(intel_dp
->psr_dpcd
));
3926 if (is_edp(intel_dp
)) {
3927 intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_PSR_SUPPORT
,
3929 sizeof(intel_dp
->psr_dpcd
));
3930 if (intel_dp
->psr_dpcd
[0] & DP_PSR_IS_SUPPORTED
) {
3931 dev_priv
->psr
.sink_support
= true;
3932 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3935 if (INTEL_INFO(dev
)->gen
>= 9 &&
3936 (intel_dp
->psr_dpcd
[0] & DP_PSR2_IS_SUPPORTED
)) {
3937 uint8_t frame_sync_cap
;
3939 dev_priv
->psr
.sink_support
= true;
3940 intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3941 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP
,
3942 &frame_sync_cap
, 1);
3943 dev_priv
->psr
.aux_frame_sync
= frame_sync_cap
? true : false;
3944 /* PSR2 needs frame sync as well */
3945 dev_priv
->psr
.psr2_support
= dev_priv
->psr
.aux_frame_sync
;
3946 DRM_DEBUG_KMS("PSR2 %s on sink",
3947 dev_priv
->psr
.psr2_support
? "supported" : "not supported");
3951 /* Training Pattern 3 support, both source and sink */
3952 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x12 &&
3953 intel_dp
->dpcd
[DP_MAX_LANE_COUNT
] & DP_TPS3_SUPPORTED
&&
3954 (IS_HASWELL(dev_priv
) || INTEL_INFO(dev_priv
)->gen
>= 8)) {
3955 intel_dp
->use_tps3
= true;
3956 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3958 intel_dp
->use_tps3
= false;
3960 /* Intermediate frequency support */
3961 if (is_edp(intel_dp
) &&
3962 (intel_dp
->dpcd
[DP_EDP_CONFIGURATION_CAP
] & DP_DPCD_DISPLAY_CONTROL_CAPABLE
) &&
3963 (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_EDP_DPCD_REV
, &rev
, 1) == 1) &&
3964 (rev
>= 0x03)) { /* eDp v1.4 or higher */
3965 __le16 sink_rates
[DP_MAX_SUPPORTED_RATES
];
3968 intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3969 DP_SUPPORTED_LINK_RATES
,
3971 sizeof(sink_rates
));
3973 for (i
= 0; i
< ARRAY_SIZE(sink_rates
); i
++) {
3974 int val
= le16_to_cpu(sink_rates
[i
]);
3979 /* Value read is in kHz while drm clock is saved in deca-kHz */
3980 intel_dp
->sink_rates
[i
] = (val
* 200) / 10;
3982 intel_dp
->num_sink_rates
= i
;
3985 intel_dp_print_rates(intel_dp
);
3987 if (!(intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] &
3988 DP_DWN_STRM_PORT_PRESENT
))
3989 return true; /* native DP sink */
3991 if (intel_dp
->dpcd
[DP_DPCD_REV
] == 0x10)
3992 return true; /* no per-port downstream info */
3994 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_DOWNSTREAM_PORT_0
,
3995 intel_dp
->downstream_ports
,
3996 DP_MAX_DOWNSTREAM_PORTS
) < 0)
3997 return false; /* downstream port status fetch failed */
4003 intel_dp_probe_oui(struct intel_dp
*intel_dp
)
4007 if (!(intel_dp
->dpcd
[DP_DOWN_STREAM_PORT_COUNT
] & DP_OUI_SUPPORT
))
4010 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_SINK_OUI
, buf
, 3) == 3)
4011 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4012 buf
[0], buf
[1], buf
[2]);
4014 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_BRANCH_OUI
, buf
, 3) == 3)
4015 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4016 buf
[0], buf
[1], buf
[2]);
4020 intel_dp_probe_mst(struct intel_dp
*intel_dp
)
4024 if (!intel_dp
->can_mst
)
4027 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x12)
4030 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_MSTM_CAP
, buf
, 1)) {
4031 if (buf
[0] & DP_MST_CAP
) {
4032 DRM_DEBUG_KMS("Sink is MST capable\n");
4033 intel_dp
->is_mst
= true;
4035 DRM_DEBUG_KMS("Sink is not MST capable\n");
4036 intel_dp
->is_mst
= false;
4040 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
4041 return intel_dp
->is_mst
;
4044 static int intel_dp_sink_crc_stop(struct intel_dp
*intel_dp
)
4046 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
4047 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
4051 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK
, &buf
) < 0) {
4052 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4057 if (drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_SINK
,
4058 buf
& ~DP_TEST_SINK_START
) < 0) {
4059 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4064 intel_dp
->sink_crc
.started
= false;
4066 hsw_enable_ips(intel_crtc
);
4070 static int intel_dp_sink_crc_start(struct intel_dp
*intel_dp
)
4072 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
4073 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
4077 if (intel_dp
->sink_crc
.started
) {
4078 ret
= intel_dp_sink_crc_stop(intel_dp
);
4083 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK_MISC
, &buf
) < 0)
4086 if (!(buf
& DP_TEST_CRC_SUPPORTED
))
4089 intel_dp
->sink_crc
.last_count
= buf
& DP_TEST_COUNT_MASK
;
4091 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK
, &buf
) < 0)
4094 hsw_disable_ips(intel_crtc
);
4096 if (drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_SINK
,
4097 buf
| DP_TEST_SINK_START
) < 0) {
4098 hsw_enable_ips(intel_crtc
);
4102 intel_dp
->sink_crc
.started
= true;
4106 int intel_dp_sink_crc(struct intel_dp
*intel_dp
, u8
*crc
)
4108 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
4109 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
4110 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
4116 ret
= intel_dp_sink_crc_start(intel_dp
);
4121 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
4123 if (drm_dp_dpcd_readb(&intel_dp
->aux
,
4124 DP_TEST_SINK_MISC
, &buf
) < 0) {
4128 count
= buf
& DP_TEST_COUNT_MASK
;
4131 * Count might be reset during the loop. In this case
4132 * last known count needs to be reset as well.
4135 intel_dp
->sink_crc
.last_count
= 0;
4137 if (drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_CRC_R_CR
, crc
, 6) < 0) {
4142 old_equal_new
= (count
== intel_dp
->sink_crc
.last_count
&&
4143 !memcmp(intel_dp
->sink_crc
.last_crc
, crc
,
4146 } while (--attempts
&& (count
== 0 || old_equal_new
));
4148 intel_dp
->sink_crc
.last_count
= buf
& DP_TEST_COUNT_MASK
;
4149 memcpy(intel_dp
->sink_crc
.last_crc
, crc
, 6 * sizeof(u8
));
4151 if (attempts
== 0) {
4152 if (old_equal_new
) {
4153 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4155 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4162 intel_dp_sink_crc_stop(intel_dp
);
4167 intel_dp_get_sink_irq(struct intel_dp
*intel_dp
, u8
*sink_irq_vector
)
4169 return intel_dp_dpcd_read_wake(&intel_dp
->aux
,
4170 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4171 sink_irq_vector
, 1) == 1;
4175 intel_dp_get_sink_irq_esi(struct intel_dp
*intel_dp
, u8
*sink_irq_vector
)
4179 ret
= intel_dp_dpcd_read_wake(&intel_dp
->aux
,
4181 sink_irq_vector
, 14);
4188 static uint8_t intel_dp_autotest_link_training(struct intel_dp
*intel_dp
)
4190 uint8_t test_result
= DP_TEST_ACK
;
4194 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp
*intel_dp
)
4196 uint8_t test_result
= DP_TEST_NAK
;
4200 static uint8_t intel_dp_autotest_edid(struct intel_dp
*intel_dp
)
4202 uint8_t test_result
= DP_TEST_NAK
;
4203 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4204 struct drm_connector
*connector
= &intel_connector
->base
;
4206 if (intel_connector
->detect_edid
== NULL
||
4207 connector
->edid_corrupt
||
4208 intel_dp
->aux
.i2c_defer_count
> 6) {
4209 /* Check EDID read for NACKs, DEFERs and corruption
4210 * (DP CTS 1.2 Core r1.1)
4211 * 4.2.2.4 : Failed EDID read, I2C_NAK
4212 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4213 * 4.2.2.6 : EDID corruption detected
4214 * Use failsafe mode for all cases
4216 if (intel_dp
->aux
.i2c_nack_count
> 0 ||
4217 intel_dp
->aux
.i2c_defer_count
> 0)
4218 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4219 intel_dp
->aux
.i2c_nack_count
,
4220 intel_dp
->aux
.i2c_defer_count
);
4221 intel_dp
->compliance_test_data
= INTEL_DP_RESOLUTION_FAILSAFE
;
4223 struct edid
*block
= intel_connector
->detect_edid
;
4225 /* We have to write the checksum
4226 * of the last block read
4228 block
+= intel_connector
->detect_edid
->extensions
;
4230 if (!drm_dp_dpcd_write(&intel_dp
->aux
,
4231 DP_TEST_EDID_CHECKSUM
,
4234 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4236 test_result
= DP_TEST_ACK
| DP_TEST_EDID_CHECKSUM_WRITE
;
4237 intel_dp
->compliance_test_data
= INTEL_DP_RESOLUTION_STANDARD
;
4240 /* Set test active flag here so userspace doesn't interrupt things */
4241 intel_dp
->compliance_test_active
= 1;
4246 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp
*intel_dp
)
4248 uint8_t test_result
= DP_TEST_NAK
;
4252 static void intel_dp_handle_test_request(struct intel_dp
*intel_dp
)
4254 uint8_t response
= DP_TEST_NAK
;
4258 intel_dp
->compliance_test_active
= 0;
4259 intel_dp
->compliance_test_type
= 0;
4260 intel_dp
->compliance_test_data
= 0;
4262 intel_dp
->aux
.i2c_nack_count
= 0;
4263 intel_dp
->aux
.i2c_defer_count
= 0;
4265 status
= drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_REQUEST
, &rxdata
, 1);
4267 DRM_DEBUG_KMS("Could not read test request from sink\n");
4272 case DP_TEST_LINK_TRAINING
:
4273 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4274 intel_dp
->compliance_test_type
= DP_TEST_LINK_TRAINING
;
4275 response
= intel_dp_autotest_link_training(intel_dp
);
4277 case DP_TEST_LINK_VIDEO_PATTERN
:
4278 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4279 intel_dp
->compliance_test_type
= DP_TEST_LINK_VIDEO_PATTERN
;
4280 response
= intel_dp_autotest_video_pattern(intel_dp
);
4282 case DP_TEST_LINK_EDID_READ
:
4283 DRM_DEBUG_KMS("EDID test requested\n");
4284 intel_dp
->compliance_test_type
= DP_TEST_LINK_EDID_READ
;
4285 response
= intel_dp_autotest_edid(intel_dp
);
4287 case DP_TEST_LINK_PHY_TEST_PATTERN
:
4288 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4289 intel_dp
->compliance_test_type
= DP_TEST_LINK_PHY_TEST_PATTERN
;
4290 response
= intel_dp_autotest_phy_pattern(intel_dp
);
4293 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata
);
4298 status
= drm_dp_dpcd_write(&intel_dp
->aux
,
4302 DRM_DEBUG_KMS("Could not write test response to sink\n");
4306 intel_dp_check_mst_status(struct intel_dp
*intel_dp
)
4310 if (intel_dp
->is_mst
) {
4315 bret
= intel_dp_get_sink_irq_esi(intel_dp
, esi
);
4319 /* check link status - esi[10] = 0x200c */
4320 if (intel_dp
->active_mst_links
&&
4321 !drm_dp_channel_eq_ok(&esi
[10], intel_dp
->lane_count
)) {
4322 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4323 intel_dp_start_link_train(intel_dp
);
4324 intel_dp_complete_link_train(intel_dp
);
4325 intel_dp_stop_link_train(intel_dp
);
4328 DRM_DEBUG_KMS("got esi %3ph\n", esi
);
4329 ret
= drm_dp_mst_hpd_irq(&intel_dp
->mst_mgr
, esi
, &handled
);
4332 for (retry
= 0; retry
< 3; retry
++) {
4334 wret
= drm_dp_dpcd_write(&intel_dp
->aux
,
4335 DP_SINK_COUNT_ESI
+1,
4342 bret
= intel_dp_get_sink_irq_esi(intel_dp
, esi
);
4344 DRM_DEBUG_KMS("got esi2 %3ph\n", esi
);
4352 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4353 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4354 intel_dp
->is_mst
= false;
4355 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
4356 /* send a hotplug event */
4357 drm_kms_helper_hotplug_event(intel_dig_port
->base
.base
.dev
);
4364 * According to DP spec
4367 * 2. Configure link according to Receiver Capabilities
4368 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4369 * 4. Check link status on receipt of hot-plug interrupt
4372 intel_dp_check_link_status(struct intel_dp
*intel_dp
)
4374 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4375 struct intel_encoder
*intel_encoder
= &dp_to_dig_port(intel_dp
)->base
;
4377 u8 link_status
[DP_LINK_STATUS_SIZE
];
4379 WARN_ON(!drm_modeset_is_locked(&dev
->mode_config
.connection_mutex
));
4381 if (!intel_encoder
->base
.crtc
)
4384 if (!to_intel_crtc(intel_encoder
->base
.crtc
)->active
)
4387 /* Try to read receiver status if the link appears to be up */
4388 if (!intel_dp_get_link_status(intel_dp
, link_status
)) {
4392 /* Now read the DPCD to see if it's actually running */
4393 if (!intel_dp_get_dpcd(intel_dp
)) {
4397 /* Try to read the source of the interrupt */
4398 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4399 intel_dp_get_sink_irq(intel_dp
, &sink_irq_vector
)) {
4400 /* Clear interrupt source */
4401 drm_dp_dpcd_writeb(&intel_dp
->aux
,
4402 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4405 if (sink_irq_vector
& DP_AUTOMATED_TEST_REQUEST
)
4406 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4407 if (sink_irq_vector
& (DP_CP_IRQ
| DP_SINK_SPECIFIC_IRQ
))
4408 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4411 if (!drm_dp_channel_eq_ok(link_status
, intel_dp
->lane_count
)) {
4412 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4413 intel_encoder
->base
.name
);
4414 intel_dp_start_link_train(intel_dp
);
4415 intel_dp_complete_link_train(intel_dp
);
4416 intel_dp_stop_link_train(intel_dp
);
4420 /* XXX this is probably wrong for multiple downstream ports */
4421 static enum drm_connector_status
4422 intel_dp_detect_dpcd(struct intel_dp
*intel_dp
)
4424 uint8_t *dpcd
= intel_dp
->dpcd
;
4427 if (!intel_dp_get_dpcd(intel_dp
))
4428 return connector_status_disconnected
;
4430 /* if there's no downstream port, we're done */
4431 if (!(dpcd
[DP_DOWNSTREAMPORT_PRESENT
] & DP_DWN_STRM_PORT_PRESENT
))
4432 return connector_status_connected
;
4434 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4435 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4436 intel_dp
->downstream_ports
[0] & DP_DS_PORT_HPD
) {
4439 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_SINK_COUNT
,
4441 return connector_status_unknown
;
4443 return DP_GET_SINK_COUNT(reg
) ? connector_status_connected
4444 : connector_status_disconnected
;
4447 /* If no HPD, poke DDC gently */
4448 if (drm_probe_ddc(&intel_dp
->aux
.ddc
))
4449 return connector_status_connected
;
4451 /* Well we tried, say unknown for unreliable port types */
4452 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11) {
4453 type
= intel_dp
->downstream_ports
[0] & DP_DS_PORT_TYPE_MASK
;
4454 if (type
== DP_DS_PORT_TYPE_VGA
||
4455 type
== DP_DS_PORT_TYPE_NON_EDID
)
4456 return connector_status_unknown
;
4458 type
= intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] &
4459 DP_DWN_STRM_PORT_TYPE_MASK
;
4460 if (type
== DP_DWN_STRM_PORT_TYPE_ANALOG
||
4461 type
== DP_DWN_STRM_PORT_TYPE_OTHER
)
4462 return connector_status_unknown
;
4465 /* Anything else is out of spec, warn and ignore */
4466 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4467 return connector_status_disconnected
;
4470 static enum drm_connector_status
4471 edp_detect(struct intel_dp
*intel_dp
)
4473 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4474 enum drm_connector_status status
;
4476 status
= intel_panel_detect(dev
);
4477 if (status
== connector_status_unknown
)
4478 status
= connector_status_connected
;
4483 static bool ibx_digital_port_connected(struct drm_i915_private
*dev_priv
,
4484 struct intel_digital_port
*port
)
4488 switch (port
->port
) {
4492 bit
= SDE_PORTB_HOTPLUG
;
4495 bit
= SDE_PORTC_HOTPLUG
;
4498 bit
= SDE_PORTD_HOTPLUG
;
4501 MISSING_CASE(port
->port
);
4505 return I915_READ(SDEISR
) & bit
;
4508 static bool cpt_digital_port_connected(struct drm_i915_private
*dev_priv
,
4509 struct intel_digital_port
*port
)
4513 switch (port
->port
) {
4517 bit
= SDE_PORTB_HOTPLUG_CPT
;
4520 bit
= SDE_PORTC_HOTPLUG_CPT
;
4523 bit
= SDE_PORTD_HOTPLUG_CPT
;
4526 MISSING_CASE(port
->port
);
4530 return I915_READ(SDEISR
) & bit
;
4533 static bool g4x_digital_port_connected(struct drm_i915_private
*dev_priv
,
4534 struct intel_digital_port
*port
)
4538 switch (port
->port
) {
4540 bit
= PORTB_HOTPLUG_LIVE_STATUS_G4X
;
4543 bit
= PORTC_HOTPLUG_LIVE_STATUS_G4X
;
4546 bit
= PORTD_HOTPLUG_LIVE_STATUS_G4X
;
4549 MISSING_CASE(port
->port
);
4553 return I915_READ(PORT_HOTPLUG_STAT
) & bit
;
4556 static bool vlv_digital_port_connected(struct drm_i915_private
*dev_priv
,
4557 struct intel_digital_port
*port
)
4561 switch (port
->port
) {
4563 bit
= PORTB_HOTPLUG_LIVE_STATUS_VLV
;
4566 bit
= PORTC_HOTPLUG_LIVE_STATUS_VLV
;
4569 bit
= PORTD_HOTPLUG_LIVE_STATUS_VLV
;
4572 MISSING_CASE(port
->port
);
4576 return I915_READ(PORT_HOTPLUG_STAT
) & bit
;
4579 static bool bxt_digital_port_connected(struct drm_i915_private
*dev_priv
,
4580 struct intel_digital_port
*port
)
4584 switch (port
->port
) {
4586 bit
= BXT_DE_PORT_HP_DDIA
;
4589 bit
= BXT_DE_PORT_HP_DDIB
;
4592 bit
= BXT_DE_PORT_HP_DDIC
;
4595 MISSING_CASE(port
->port
);
4599 return I915_READ(GEN8_DE_PORT_ISR
) & bit
;
4603 * intel_digital_port_connected - is the specified port connected?
4604 * @dev_priv: i915 private structure
4605 * @port: the port to test
4607 * Return %true if @port is connected, %false otherwise.
4609 static bool intel_digital_port_connected(struct drm_i915_private
*dev_priv
,
4610 struct intel_digital_port
*port
)
4612 if (HAS_PCH_IBX(dev_priv
))
4613 return ibx_digital_port_connected(dev_priv
, port
);
4614 if (HAS_PCH_SPLIT(dev_priv
))
4615 return cpt_digital_port_connected(dev_priv
, port
);
4616 else if (IS_BROXTON(dev_priv
))
4617 return bxt_digital_port_connected(dev_priv
, port
);
4618 else if (IS_VALLEYVIEW(dev_priv
))
4619 return vlv_digital_port_connected(dev_priv
, port
);
4621 return g4x_digital_port_connected(dev_priv
, port
);
4624 static enum drm_connector_status
4625 ironlake_dp_detect(struct intel_dp
*intel_dp
)
4627 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4628 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4629 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4631 if (!intel_digital_port_connected(dev_priv
, intel_dig_port
))
4632 return connector_status_disconnected
;
4634 return intel_dp_detect_dpcd(intel_dp
);
4637 static enum drm_connector_status
4638 g4x_dp_detect(struct intel_dp
*intel_dp
)
4640 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4641 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4643 /* Can't disconnect eDP, but you can close the lid... */
4644 if (is_edp(intel_dp
)) {
4645 enum drm_connector_status status
;
4647 status
= intel_panel_detect(dev
);
4648 if (status
== connector_status_unknown
)
4649 status
= connector_status_connected
;
4653 if (!intel_digital_port_connected(dev
->dev_private
, intel_dig_port
))
4654 return connector_status_disconnected
;
4656 return intel_dp_detect_dpcd(intel_dp
);
4659 static struct edid
*
4660 intel_dp_get_edid(struct intel_dp
*intel_dp
)
4662 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4664 /* use cached edid if we have one */
4665 if (intel_connector
->edid
) {
4667 if (IS_ERR(intel_connector
->edid
))
4670 return drm_edid_duplicate(intel_connector
->edid
);
4672 return drm_get_edid(&intel_connector
->base
,
4673 &intel_dp
->aux
.ddc
);
4677 intel_dp_set_edid(struct intel_dp
*intel_dp
)
4679 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4682 edid
= intel_dp_get_edid(intel_dp
);
4683 intel_connector
->detect_edid
= edid
;
4685 if (intel_dp
->force_audio
!= HDMI_AUDIO_AUTO
)
4686 intel_dp
->has_audio
= intel_dp
->force_audio
== HDMI_AUDIO_ON
;
4688 intel_dp
->has_audio
= drm_detect_monitor_audio(edid
);
4692 intel_dp_unset_edid(struct intel_dp
*intel_dp
)
4694 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4696 kfree(intel_connector
->detect_edid
);
4697 intel_connector
->detect_edid
= NULL
;
4699 intel_dp
->has_audio
= false;
4702 static enum intel_display_power_domain
4703 intel_dp_power_get(struct intel_dp
*dp
)
4705 struct intel_encoder
*encoder
= &dp_to_dig_port(dp
)->base
;
4706 enum intel_display_power_domain power_domain
;
4708 power_domain
= intel_display_port_power_domain(encoder
);
4709 intel_display_power_get(to_i915(encoder
->base
.dev
), power_domain
);
4711 return power_domain
;
4715 intel_dp_power_put(struct intel_dp
*dp
,
4716 enum intel_display_power_domain power_domain
)
4718 struct intel_encoder
*encoder
= &dp_to_dig_port(dp
)->base
;
4719 intel_display_power_put(to_i915(encoder
->base
.dev
), power_domain
);
4722 static enum drm_connector_status
4723 intel_dp_detect(struct drm_connector
*connector
, bool force
)
4725 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
4726 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4727 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
4728 struct drm_device
*dev
= connector
->dev
;
4729 enum drm_connector_status status
;
4730 enum intel_display_power_domain power_domain
;
4734 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4735 connector
->base
.id
, connector
->name
);
4736 intel_dp_unset_edid(intel_dp
);
4738 if (intel_dp
->is_mst
) {
4739 /* MST devices are disconnected from a monitor POV */
4740 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4741 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4742 return connector_status_disconnected
;
4745 power_domain
= intel_dp_power_get(intel_dp
);
4747 /* Can't disconnect eDP, but you can close the lid... */
4748 if (is_edp(intel_dp
))
4749 status
= edp_detect(intel_dp
);
4750 else if (HAS_PCH_SPLIT(dev
))
4751 status
= ironlake_dp_detect(intel_dp
);
4753 status
= g4x_dp_detect(intel_dp
);
4754 if (status
!= connector_status_connected
)
4757 intel_dp_probe_oui(intel_dp
);
4759 ret
= intel_dp_probe_mst(intel_dp
);
4761 /* if we are in MST mode then this connector
4762 won't appear connected or have anything with EDID on it */
4763 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4764 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4765 status
= connector_status_disconnected
;
4769 intel_dp_set_edid(intel_dp
);
4771 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4772 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4773 status
= connector_status_connected
;
4775 /* Try to read the source of the interrupt */
4776 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4777 intel_dp_get_sink_irq(intel_dp
, &sink_irq_vector
)) {
4778 /* Clear interrupt source */
4779 drm_dp_dpcd_writeb(&intel_dp
->aux
,
4780 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4783 if (sink_irq_vector
& DP_AUTOMATED_TEST_REQUEST
)
4784 intel_dp_handle_test_request(intel_dp
);
4785 if (sink_irq_vector
& (DP_CP_IRQ
| DP_SINK_SPECIFIC_IRQ
))
4786 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4790 intel_dp_power_put(intel_dp
, power_domain
);
4795 intel_dp_force(struct drm_connector
*connector
)
4797 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
4798 struct intel_encoder
*intel_encoder
= &dp_to_dig_port(intel_dp
)->base
;
4799 enum intel_display_power_domain power_domain
;
4801 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4802 connector
->base
.id
, connector
->name
);
4803 intel_dp_unset_edid(intel_dp
);
4805 if (connector
->status
!= connector_status_connected
)
4808 power_domain
= intel_dp_power_get(intel_dp
);
4810 intel_dp_set_edid(intel_dp
);
4812 intel_dp_power_put(intel_dp
, power_domain
);
4814 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4815 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4818 static int intel_dp_get_modes(struct drm_connector
*connector
)
4820 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4823 edid
= intel_connector
->detect_edid
;
4825 int ret
= intel_connector_update_modes(connector
, edid
);
4830 /* if eDP has no EDID, fall back to fixed mode */
4831 if (is_edp(intel_attached_dp(connector
)) &&
4832 intel_connector
->panel
.fixed_mode
) {
4833 struct drm_display_mode
*mode
;
4835 mode
= drm_mode_duplicate(connector
->dev
,
4836 intel_connector
->panel
.fixed_mode
);
4838 drm_mode_probed_add(connector
, mode
);
4847 intel_dp_detect_audio(struct drm_connector
*connector
)
4849 bool has_audio
= false;
4852 edid
= to_intel_connector(connector
)->detect_edid
;
4854 has_audio
= drm_detect_monitor_audio(edid
);
4860 intel_dp_set_property(struct drm_connector
*connector
,
4861 struct drm_property
*property
,
4864 struct drm_i915_private
*dev_priv
= connector
->dev
->dev_private
;
4865 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4866 struct intel_encoder
*intel_encoder
= intel_attached_encoder(connector
);
4867 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
4870 ret
= drm_object_property_set_value(&connector
->base
, property
, val
);
4874 if (property
== dev_priv
->force_audio_property
) {
4878 if (i
== intel_dp
->force_audio
)
4881 intel_dp
->force_audio
= i
;
4883 if (i
== HDMI_AUDIO_AUTO
)
4884 has_audio
= intel_dp_detect_audio(connector
);
4886 has_audio
= (i
== HDMI_AUDIO_ON
);
4888 if (has_audio
== intel_dp
->has_audio
)
4891 intel_dp
->has_audio
= has_audio
;
4895 if (property
== dev_priv
->broadcast_rgb_property
) {
4896 bool old_auto
= intel_dp
->color_range_auto
;
4897 bool old_range
= intel_dp
->limited_color_range
;
4900 case INTEL_BROADCAST_RGB_AUTO
:
4901 intel_dp
->color_range_auto
= true;
4903 case INTEL_BROADCAST_RGB_FULL
:
4904 intel_dp
->color_range_auto
= false;
4905 intel_dp
->limited_color_range
= false;
4907 case INTEL_BROADCAST_RGB_LIMITED
:
4908 intel_dp
->color_range_auto
= false;
4909 intel_dp
->limited_color_range
= true;
4915 if (old_auto
== intel_dp
->color_range_auto
&&
4916 old_range
== intel_dp
->limited_color_range
)
4922 if (is_edp(intel_dp
) &&
4923 property
== connector
->dev
->mode_config
.scaling_mode_property
) {
4924 if (val
== DRM_MODE_SCALE_NONE
) {
4925 DRM_DEBUG_KMS("no scaling not supported\n");
4929 if (intel_connector
->panel
.fitting_mode
== val
) {
4930 /* the eDP scaling property is not changed */
4933 intel_connector
->panel
.fitting_mode
= val
;
4941 if (intel_encoder
->base
.crtc
)
4942 intel_crtc_restore_mode(intel_encoder
->base
.crtc
);
4948 intel_dp_connector_destroy(struct drm_connector
*connector
)
4950 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4952 kfree(intel_connector
->detect_edid
);
4954 if (!IS_ERR_OR_NULL(intel_connector
->edid
))
4955 kfree(intel_connector
->edid
);
4957 /* Can't call is_edp() since the encoder may have been destroyed
4959 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4960 intel_panel_fini(&intel_connector
->panel
);
4962 drm_connector_cleanup(connector
);
4966 void intel_dp_encoder_destroy(struct drm_encoder
*encoder
)
4968 struct intel_digital_port
*intel_dig_port
= enc_to_dig_port(encoder
);
4969 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
4971 drm_dp_aux_unregister(&intel_dp
->aux
);
4972 intel_dp_mst_encoder_cleanup(intel_dig_port
);
4973 if (is_edp(intel_dp
)) {
4974 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
4976 * vdd might still be enabled do to the delayed vdd off.
4977 * Make sure vdd is actually turned off here.
4980 edp_panel_vdd_off_sync(intel_dp
);
4981 pps_unlock(intel_dp
);
4983 if (intel_dp
->edp_notifier
.notifier_call
) {
4984 unregister_reboot_notifier(&intel_dp
->edp_notifier
);
4985 intel_dp
->edp_notifier
.notifier_call
= NULL
;
4988 drm_encoder_cleanup(encoder
);
4989 kfree(intel_dig_port
);
4992 static void intel_dp_encoder_suspend(struct intel_encoder
*intel_encoder
)
4994 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
4996 if (!is_edp(intel_dp
))
5000 * vdd might still be enabled do to the delayed vdd off.
5001 * Make sure vdd is actually turned off here.
5003 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
5005 edp_panel_vdd_off_sync(intel_dp
);
5006 pps_unlock(intel_dp
);
5009 static void intel_edp_panel_vdd_sanitize(struct intel_dp
*intel_dp
)
5011 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
5012 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
5013 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5014 enum intel_display_power_domain power_domain
;
5016 lockdep_assert_held(&dev_priv
->pps_mutex
);
5018 if (!edp_have_panel_vdd(intel_dp
))
5022 * The VDD bit needs a power domain reference, so if the bit is
5023 * already enabled when we boot or resume, grab this reference and
5024 * schedule a vdd off, so we don't hold on to the reference
5027 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5028 power_domain
= intel_display_port_power_domain(&intel_dig_port
->base
);
5029 intel_display_power_get(dev_priv
, power_domain
);
5031 edp_panel_vdd_schedule_off(intel_dp
);
5034 static void intel_dp_encoder_reset(struct drm_encoder
*encoder
)
5036 struct intel_dp
*intel_dp
;
5038 if (to_intel_encoder(encoder
)->type
!= INTEL_OUTPUT_EDP
)
5041 intel_dp
= enc_to_intel_dp(encoder
);
5046 * Read out the current power sequencer assignment,
5047 * in case the BIOS did something with it.
5049 if (IS_VALLEYVIEW(encoder
->dev
))
5050 vlv_initial_power_sequencer_setup(intel_dp
);
5052 intel_edp_panel_vdd_sanitize(intel_dp
);
5054 pps_unlock(intel_dp
);
5057 static const struct drm_connector_funcs intel_dp_connector_funcs
= {
5058 .dpms
= drm_atomic_helper_connector_dpms
,
5059 .detect
= intel_dp_detect
,
5060 .force
= intel_dp_force
,
5061 .fill_modes
= drm_helper_probe_single_connector_modes
,
5062 .set_property
= intel_dp_set_property
,
5063 .atomic_get_property
= intel_connector_atomic_get_property
,
5064 .destroy
= intel_dp_connector_destroy
,
5065 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
5066 .atomic_duplicate_state
= drm_atomic_helper_connector_duplicate_state
,
5069 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs
= {
5070 .get_modes
= intel_dp_get_modes
,
5071 .mode_valid
= intel_dp_mode_valid
,
5072 .best_encoder
= intel_best_encoder
,
5075 static const struct drm_encoder_funcs intel_dp_enc_funcs
= {
5076 .reset
= intel_dp_encoder_reset
,
5077 .destroy
= intel_dp_encoder_destroy
,
5081 intel_dp_hpd_pulse(struct intel_digital_port
*intel_dig_port
, bool long_hpd
)
5083 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
5084 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
5085 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
5086 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5087 enum intel_display_power_domain power_domain
;
5088 enum irqreturn ret
= IRQ_NONE
;
5090 if (intel_dig_port
->base
.type
!= INTEL_OUTPUT_EDP
)
5091 intel_dig_port
->base
.type
= INTEL_OUTPUT_DISPLAYPORT
;
5093 if (long_hpd
&& intel_dig_port
->base
.type
== INTEL_OUTPUT_EDP
) {
5095 * vdd off can generate a long pulse on eDP which
5096 * would require vdd on to handle it, and thus we
5097 * would end up in an endless cycle of
5098 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5100 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5101 port_name(intel_dig_port
->port
));
5105 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5106 port_name(intel_dig_port
->port
),
5107 long_hpd
? "long" : "short");
5109 power_domain
= intel_display_port_power_domain(intel_encoder
);
5110 intel_display_power_get(dev_priv
, power_domain
);
5113 /* indicate that we need to restart link training */
5114 intel_dp
->train_set_valid
= false;
5116 if (!intel_digital_port_connected(dev_priv
, intel_dig_port
))
5119 if (!intel_dp_get_dpcd(intel_dp
)) {
5123 intel_dp_probe_oui(intel_dp
);
5125 if (!intel_dp_probe_mst(intel_dp
))
5129 if (intel_dp
->is_mst
) {
5130 if (intel_dp_check_mst_status(intel_dp
) == -EINVAL
)
5134 if (!intel_dp
->is_mst
) {
5136 * we'll check the link status via the normal hot plug path later -
5137 * but for short hpds we should check it now
5139 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
5140 intel_dp_check_link_status(intel_dp
);
5141 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
5149 /* if we were in MST mode, and device is not there get out of MST mode */
5150 if (intel_dp
->is_mst
) {
5151 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp
->is_mst
, intel_dp
->mst_mgr
.mst_state
);
5152 intel_dp
->is_mst
= false;
5153 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
5156 intel_display_power_put(dev_priv
, power_domain
);
5161 /* Return which DP Port should be selected for Transcoder DP control */
5163 intel_trans_dp_port_sel(struct drm_crtc
*crtc
)
5165 struct drm_device
*dev
= crtc
->dev
;
5166 struct intel_encoder
*intel_encoder
;
5167 struct intel_dp
*intel_dp
;
5169 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
) {
5170 intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
5172 if (intel_encoder
->type
== INTEL_OUTPUT_DISPLAYPORT
||
5173 intel_encoder
->type
== INTEL_OUTPUT_EDP
)
5174 return intel_dp
->output_reg
;
5180 /* check the VBT to see whether the eDP is on DP-D port */
5181 bool intel_dp_is_edp(struct drm_device
*dev
, enum port port
)
5183 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5184 union child_device_config
*p_child
;
5186 static const short port_mapping
[] = {
5187 [PORT_B
] = PORT_IDPB
,
5188 [PORT_C
] = PORT_IDPC
,
5189 [PORT_D
] = PORT_IDPD
,
5195 if (!dev_priv
->vbt
.child_dev_num
)
5198 for (i
= 0; i
< dev_priv
->vbt
.child_dev_num
; i
++) {
5199 p_child
= dev_priv
->vbt
.child_dev
+ i
;
5201 if (p_child
->common
.dvo_port
== port_mapping
[port
] &&
5202 (p_child
->common
.device_type
& DEVICE_TYPE_eDP_BITS
) ==
5203 (DEVICE_TYPE_eDP
& DEVICE_TYPE_eDP_BITS
))
5210 intel_dp_add_properties(struct intel_dp
*intel_dp
, struct drm_connector
*connector
)
5212 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
5214 intel_attach_force_audio_property(connector
);
5215 intel_attach_broadcast_rgb_property(connector
);
5216 intel_dp
->color_range_auto
= true;
5218 if (is_edp(intel_dp
)) {
5219 drm_mode_create_scaling_mode_property(connector
->dev
);
5220 drm_object_attach_property(
5222 connector
->dev
->mode_config
.scaling_mode_property
,
5223 DRM_MODE_SCALE_ASPECT
);
5224 intel_connector
->panel
.fitting_mode
= DRM_MODE_SCALE_ASPECT
;
5228 static void intel_dp_init_panel_power_timestamps(struct intel_dp
*intel_dp
)
5230 intel_dp
->last_power_cycle
= jiffies
;
5231 intel_dp
->last_power_on
= jiffies
;
5232 intel_dp
->last_backlight_off
= jiffies
;
5236 intel_dp_init_panel_power_sequencer(struct drm_device
*dev
,
5237 struct intel_dp
*intel_dp
)
5239 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5240 struct edp_power_seq cur
, vbt
, spec
,
5241 *final
= &intel_dp
->pps_delays
;
5242 u32 pp_on
, pp_off
, pp_div
= 0, pp_ctl
= 0;
5243 int pp_ctrl_reg
, pp_on_reg
, pp_off_reg
, pp_div_reg
= 0;
5245 lockdep_assert_held(&dev_priv
->pps_mutex
);
5247 /* already initialized? */
5248 if (final
->t11_t12
!= 0)
5251 if (IS_BROXTON(dev
)) {
5253 * TODO: BXT has 2 sets of PPS registers.
5254 * Correct Register for Broxton need to be identified
5255 * using VBT. hardcoding for now
5257 pp_ctrl_reg
= BXT_PP_CONTROL(0);
5258 pp_on_reg
= BXT_PP_ON_DELAYS(0);
5259 pp_off_reg
= BXT_PP_OFF_DELAYS(0);
5260 } else if (HAS_PCH_SPLIT(dev
)) {
5261 pp_ctrl_reg
= PCH_PP_CONTROL
;
5262 pp_on_reg
= PCH_PP_ON_DELAYS
;
5263 pp_off_reg
= PCH_PP_OFF_DELAYS
;
5264 pp_div_reg
= PCH_PP_DIVISOR
;
5266 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
5268 pp_ctrl_reg
= VLV_PIPE_PP_CONTROL(pipe
);
5269 pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
5270 pp_off_reg
= VLV_PIPE_PP_OFF_DELAYS(pipe
);
5271 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
5274 /* Workaround: Need to write PP_CONTROL with the unlock key as
5275 * the very first thing. */
5276 pp_ctl
= ironlake_get_pp_control(intel_dp
);
5278 pp_on
= I915_READ(pp_on_reg
);
5279 pp_off
= I915_READ(pp_off_reg
);
5280 if (!IS_BROXTON(dev
)) {
5281 I915_WRITE(pp_ctrl_reg
, pp_ctl
);
5282 pp_div
= I915_READ(pp_div_reg
);
5285 /* Pull timing values out of registers */
5286 cur
.t1_t3
= (pp_on
& PANEL_POWER_UP_DELAY_MASK
) >>
5287 PANEL_POWER_UP_DELAY_SHIFT
;
5289 cur
.t8
= (pp_on
& PANEL_LIGHT_ON_DELAY_MASK
) >>
5290 PANEL_LIGHT_ON_DELAY_SHIFT
;
5292 cur
.t9
= (pp_off
& PANEL_LIGHT_OFF_DELAY_MASK
) >>
5293 PANEL_LIGHT_OFF_DELAY_SHIFT
;
5295 cur
.t10
= (pp_off
& PANEL_POWER_DOWN_DELAY_MASK
) >>
5296 PANEL_POWER_DOWN_DELAY_SHIFT
;
5298 if (IS_BROXTON(dev
)) {
5299 u16 tmp
= (pp_ctl
& BXT_POWER_CYCLE_DELAY_MASK
) >>
5300 BXT_POWER_CYCLE_DELAY_SHIFT
;
5302 cur
.t11_t12
= (tmp
- 1) * 1000;
5306 cur
.t11_t12
= ((pp_div
& PANEL_POWER_CYCLE_DELAY_MASK
) >>
5307 PANEL_POWER_CYCLE_DELAY_SHIFT
) * 1000;
5310 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5311 cur
.t1_t3
, cur
.t8
, cur
.t9
, cur
.t10
, cur
.t11_t12
);
5313 vbt
= dev_priv
->vbt
.edp_pps
;
5315 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5316 * our hw here, which are all in 100usec. */
5317 spec
.t1_t3
= 210 * 10;
5318 spec
.t8
= 50 * 10; /* no limit for t8, use t7 instead */
5319 spec
.t9
= 50 * 10; /* no limit for t9, make it symmetric with t8 */
5320 spec
.t10
= 500 * 10;
5321 /* This one is special and actually in units of 100ms, but zero
5322 * based in the hw (so we need to add 100 ms). But the sw vbt
5323 * table multiplies it with 1000 to make it in units of 100usec,
5325 spec
.t11_t12
= (510 + 100) * 10;
5327 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5328 vbt
.t1_t3
, vbt
.t8
, vbt
.t9
, vbt
.t10
, vbt
.t11_t12
);
5330 /* Use the max of the register settings and vbt. If both are
5331 * unset, fall back to the spec limits. */
5332 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5334 max(cur.field, vbt.field))
5335 assign_final(t1_t3
);
5339 assign_final(t11_t12
);
5342 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5343 intel_dp
->panel_power_up_delay
= get_delay(t1_t3
);
5344 intel_dp
->backlight_on_delay
= get_delay(t8
);
5345 intel_dp
->backlight_off_delay
= get_delay(t9
);
5346 intel_dp
->panel_power_down_delay
= get_delay(t10
);
5347 intel_dp
->panel_power_cycle_delay
= get_delay(t11_t12
);
5350 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5351 intel_dp
->panel_power_up_delay
, intel_dp
->panel_power_down_delay
,
5352 intel_dp
->panel_power_cycle_delay
);
5354 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5355 intel_dp
->backlight_on_delay
, intel_dp
->backlight_off_delay
);
5359 intel_dp_init_panel_power_sequencer_registers(struct drm_device
*dev
,
5360 struct intel_dp
*intel_dp
)
5362 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5363 u32 pp_on
, pp_off
, pp_div
, port_sel
= 0;
5364 int div
= HAS_PCH_SPLIT(dev
) ? intel_pch_rawclk(dev
) : intel_hrawclk(dev
);
5365 int pp_on_reg
, pp_off_reg
, pp_div_reg
= 0, pp_ctrl_reg
;
5366 enum port port
= dp_to_dig_port(intel_dp
)->port
;
5367 const struct edp_power_seq
*seq
= &intel_dp
->pps_delays
;
5369 lockdep_assert_held(&dev_priv
->pps_mutex
);
5371 if (IS_BROXTON(dev
)) {
5373 * TODO: BXT has 2 sets of PPS registers.
5374 * Correct Register for Broxton need to be identified
5375 * using VBT. hardcoding for now
5377 pp_ctrl_reg
= BXT_PP_CONTROL(0);
5378 pp_on_reg
= BXT_PP_ON_DELAYS(0);
5379 pp_off_reg
= BXT_PP_OFF_DELAYS(0);
5381 } else if (HAS_PCH_SPLIT(dev
)) {
5382 pp_on_reg
= PCH_PP_ON_DELAYS
;
5383 pp_off_reg
= PCH_PP_OFF_DELAYS
;
5384 pp_div_reg
= PCH_PP_DIVISOR
;
5386 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
5388 pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
5389 pp_off_reg
= VLV_PIPE_PP_OFF_DELAYS(pipe
);
5390 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
5394 * And finally store the new values in the power sequencer. The
5395 * backlight delays are set to 1 because we do manual waits on them. For
5396 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5397 * we'll end up waiting for the backlight off delay twice: once when we
5398 * do the manual sleep, and once when we disable the panel and wait for
5399 * the PP_STATUS bit to become zero.
5401 pp_on
= (seq
->t1_t3
<< PANEL_POWER_UP_DELAY_SHIFT
) |
5402 (1 << PANEL_LIGHT_ON_DELAY_SHIFT
);
5403 pp_off
= (1 << PANEL_LIGHT_OFF_DELAY_SHIFT
) |
5404 (seq
->t10
<< PANEL_POWER_DOWN_DELAY_SHIFT
);
5405 /* Compute the divisor for the pp clock, simply match the Bspec
5407 if (IS_BROXTON(dev
)) {
5408 pp_div
= I915_READ(pp_ctrl_reg
);
5409 pp_div
&= ~BXT_POWER_CYCLE_DELAY_MASK
;
5410 pp_div
|= (DIV_ROUND_UP((seq
->t11_t12
+ 1), 1000)
5411 << BXT_POWER_CYCLE_DELAY_SHIFT
);
5413 pp_div
= ((100 * div
)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT
;
5414 pp_div
|= (DIV_ROUND_UP(seq
->t11_t12
, 1000)
5415 << PANEL_POWER_CYCLE_DELAY_SHIFT
);
5418 /* Haswell doesn't have any port selection bits for the panel
5419 * power sequencer any more. */
5420 if (IS_VALLEYVIEW(dev
)) {
5421 port_sel
= PANEL_PORT_SELECT_VLV(port
);
5422 } else if (HAS_PCH_IBX(dev
) || HAS_PCH_CPT(dev
)) {
5424 port_sel
= PANEL_PORT_SELECT_DPA
;
5426 port_sel
= PANEL_PORT_SELECT_DPD
;
5431 I915_WRITE(pp_on_reg
, pp_on
);
5432 I915_WRITE(pp_off_reg
, pp_off
);
5433 if (IS_BROXTON(dev
))
5434 I915_WRITE(pp_ctrl_reg
, pp_div
);
5436 I915_WRITE(pp_div_reg
, pp_div
);
5438 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5439 I915_READ(pp_on_reg
),
5440 I915_READ(pp_off_reg
),
5442 (I915_READ(pp_ctrl_reg
) & BXT_POWER_CYCLE_DELAY_MASK
) :
5443 I915_READ(pp_div_reg
));
5447 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5449 * @refresh_rate: RR to be programmed
5451 * This function gets called when refresh rate (RR) has to be changed from
5452 * one frequency to another. Switches can be between high and low RR
5453 * supported by the panel or to any other RR based on media playback (in
5454 * this case, RR value needs to be passed from user space).
5456 * The caller of this function needs to take a lock on dev_priv->drrs.
5458 static void intel_dp_set_drrs_state(struct drm_device
*dev
, int refresh_rate
)
5460 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5461 struct intel_encoder
*encoder
;
5462 struct intel_digital_port
*dig_port
= NULL
;
5463 struct intel_dp
*intel_dp
= dev_priv
->drrs
.dp
;
5464 struct intel_crtc_state
*config
= NULL
;
5465 struct intel_crtc
*intel_crtc
= NULL
;
5467 enum drrs_refresh_rate_type index
= DRRS_HIGH_RR
;
5469 if (refresh_rate
<= 0) {
5470 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5474 if (intel_dp
== NULL
) {
5475 DRM_DEBUG_KMS("DRRS not supported.\n");
5480 * FIXME: This needs proper synchronization with psr state for some
5481 * platforms that cannot have PSR and DRRS enabled at the same time.
5484 dig_port
= dp_to_dig_port(intel_dp
);
5485 encoder
= &dig_port
->base
;
5486 intel_crtc
= to_intel_crtc(encoder
->base
.crtc
);
5489 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5493 config
= intel_crtc
->config
;
5495 if (dev_priv
->drrs
.type
< SEAMLESS_DRRS_SUPPORT
) {
5496 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5500 if (intel_dp
->attached_connector
->panel
.downclock_mode
->vrefresh
==
5502 index
= DRRS_LOW_RR
;
5504 if (index
== dev_priv
->drrs
.refresh_rate_type
) {
5506 "DRRS requested for previously set RR...ignoring\n");
5510 if (!intel_crtc
->active
) {
5511 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5515 if (INTEL_INFO(dev
)->gen
>= 8 && !IS_CHERRYVIEW(dev
)) {
5518 intel_dp_set_m_n(intel_crtc
, M1_N1
);
5521 intel_dp_set_m_n(intel_crtc
, M2_N2
);
5525 DRM_ERROR("Unsupported refreshrate type\n");
5527 } else if (INTEL_INFO(dev
)->gen
> 6) {
5528 reg
= PIPECONF(intel_crtc
->config
->cpu_transcoder
);
5529 val
= I915_READ(reg
);
5531 if (index
> DRRS_HIGH_RR
) {
5532 if (IS_VALLEYVIEW(dev
))
5533 val
|= PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
5535 val
|= PIPECONF_EDP_RR_MODE_SWITCH
;
5537 if (IS_VALLEYVIEW(dev
))
5538 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
5540 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH
;
5542 I915_WRITE(reg
, val
);
5545 dev_priv
->drrs
.refresh_rate_type
= index
;
5547 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate
);
5551 * intel_edp_drrs_enable - init drrs struct if supported
5552 * @intel_dp: DP struct
5554 * Initializes frontbuffer_bits and drrs.dp
5556 void intel_edp_drrs_enable(struct intel_dp
*intel_dp
)
5558 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
5559 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5560 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5561 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
5562 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5564 if (!intel_crtc
->config
->has_drrs
) {
5565 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5569 mutex_lock(&dev_priv
->drrs
.mutex
);
5570 if (WARN_ON(dev_priv
->drrs
.dp
)) {
5571 DRM_ERROR("DRRS already enabled\n");
5575 dev_priv
->drrs
.busy_frontbuffer_bits
= 0;
5577 dev_priv
->drrs
.dp
= intel_dp
;
5580 mutex_unlock(&dev_priv
->drrs
.mutex
);
5584 * intel_edp_drrs_disable - Disable DRRS
5585 * @intel_dp: DP struct
5588 void intel_edp_drrs_disable(struct intel_dp
*intel_dp
)
5590 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
5591 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5592 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5593 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
5594 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5596 if (!intel_crtc
->config
->has_drrs
)
5599 mutex_lock(&dev_priv
->drrs
.mutex
);
5600 if (!dev_priv
->drrs
.dp
) {
5601 mutex_unlock(&dev_priv
->drrs
.mutex
);
5605 if (dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5606 intel_dp_set_drrs_state(dev_priv
->dev
,
5607 intel_dp
->attached_connector
->panel
.
5608 fixed_mode
->vrefresh
);
5610 dev_priv
->drrs
.dp
= NULL
;
5611 mutex_unlock(&dev_priv
->drrs
.mutex
);
5613 cancel_delayed_work_sync(&dev_priv
->drrs
.work
);
5616 static void intel_edp_drrs_downclock_work(struct work_struct
*work
)
5618 struct drm_i915_private
*dev_priv
=
5619 container_of(work
, typeof(*dev_priv
), drrs
.work
.work
);
5620 struct intel_dp
*intel_dp
;
5622 mutex_lock(&dev_priv
->drrs
.mutex
);
5624 intel_dp
= dev_priv
->drrs
.dp
;
5630 * The delayed work can race with an invalidate hence we need to
5634 if (dev_priv
->drrs
.busy_frontbuffer_bits
)
5637 if (dev_priv
->drrs
.refresh_rate_type
!= DRRS_LOW_RR
)
5638 intel_dp_set_drrs_state(dev_priv
->dev
,
5639 intel_dp
->attached_connector
->panel
.
5640 downclock_mode
->vrefresh
);
5643 mutex_unlock(&dev_priv
->drrs
.mutex
);
5647 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5649 * @frontbuffer_bits: frontbuffer plane tracking bits
5651 * This function gets called everytime rendering on the given planes start.
5652 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5654 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5656 void intel_edp_drrs_invalidate(struct drm_device
*dev
,
5657 unsigned frontbuffer_bits
)
5659 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5660 struct drm_crtc
*crtc
;
5663 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
5666 cancel_delayed_work(&dev_priv
->drrs
.work
);
5668 mutex_lock(&dev_priv
->drrs
.mutex
);
5669 if (!dev_priv
->drrs
.dp
) {
5670 mutex_unlock(&dev_priv
->drrs
.mutex
);
5674 crtc
= dp_to_dig_port(dev_priv
->drrs
.dp
)->base
.base
.crtc
;
5675 pipe
= to_intel_crtc(crtc
)->pipe
;
5677 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
5678 dev_priv
->drrs
.busy_frontbuffer_bits
|= frontbuffer_bits
;
5680 /* invalidate means busy screen hence upclock */
5681 if (frontbuffer_bits
&& dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5682 intel_dp_set_drrs_state(dev_priv
->dev
,
5683 dev_priv
->drrs
.dp
->attached_connector
->panel
.
5684 fixed_mode
->vrefresh
);
5686 mutex_unlock(&dev_priv
->drrs
.mutex
);
5690 * intel_edp_drrs_flush - Restart Idleness DRRS
5692 * @frontbuffer_bits: frontbuffer plane tracking bits
5694 * This function gets called every time rendering on the given planes has
5695 * completed or flip on a crtc is completed. So DRRS should be upclocked
5696 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5697 * if no other planes are dirty.
5699 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5701 void intel_edp_drrs_flush(struct drm_device
*dev
,
5702 unsigned frontbuffer_bits
)
5704 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5705 struct drm_crtc
*crtc
;
5708 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
5711 cancel_delayed_work(&dev_priv
->drrs
.work
);
5713 mutex_lock(&dev_priv
->drrs
.mutex
);
5714 if (!dev_priv
->drrs
.dp
) {
5715 mutex_unlock(&dev_priv
->drrs
.mutex
);
5719 crtc
= dp_to_dig_port(dev_priv
->drrs
.dp
)->base
.base
.crtc
;
5720 pipe
= to_intel_crtc(crtc
)->pipe
;
5722 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
5723 dev_priv
->drrs
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
5725 /* flush means busy screen hence upclock */
5726 if (frontbuffer_bits
&& dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5727 intel_dp_set_drrs_state(dev_priv
->dev
,
5728 dev_priv
->drrs
.dp
->attached_connector
->panel
.
5729 fixed_mode
->vrefresh
);
5732 * flush also means no more activity hence schedule downclock, if all
5733 * other fbs are quiescent too
5735 if (!dev_priv
->drrs
.busy_frontbuffer_bits
)
5736 schedule_delayed_work(&dev_priv
->drrs
.work
,
5737 msecs_to_jiffies(1000));
5738 mutex_unlock(&dev_priv
->drrs
.mutex
);
5742 * DOC: Display Refresh Rate Switching (DRRS)
5744 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5745 * which enables swtching between low and high refresh rates,
5746 * dynamically, based on the usage scenario. This feature is applicable
5747 * for internal panels.
5749 * Indication that the panel supports DRRS is given by the panel EDID, which
5750 * would list multiple refresh rates for one resolution.
5752 * DRRS is of 2 types - static and seamless.
5753 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5754 * (may appear as a blink on screen) and is used in dock-undock scenario.
5755 * Seamless DRRS involves changing RR without any visual effect to the user
5756 * and can be used during normal system usage. This is done by programming
5757 * certain registers.
5759 * Support for static/seamless DRRS may be indicated in the VBT based on
5760 * inputs from the panel spec.
5762 * DRRS saves power by switching to low RR based on usage scenarios.
5765 * The implementation is based on frontbuffer tracking implementation.
5766 * When there is a disturbance on the screen triggered by user activity or a
5767 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5768 * When there is no movement on screen, after a timeout of 1 second, a switch
5769 * to low RR is made.
5770 * For integration with frontbuffer tracking code,
5771 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5773 * DRRS can be further extended to support other internal panels and also
5774 * the scenario of video playback wherein RR is set based on the rate
5775 * requested by userspace.
5779 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5780 * @intel_connector: eDP connector
5781 * @fixed_mode: preferred mode of panel
5783 * This function is called only once at driver load to initialize basic
5787 * Downclock mode if panel supports it, else return NULL.
5788 * DRRS support is determined by the presence of downclock mode (apart
5789 * from VBT setting).
5791 static struct drm_display_mode
*
5792 intel_dp_drrs_init(struct intel_connector
*intel_connector
,
5793 struct drm_display_mode
*fixed_mode
)
5795 struct drm_connector
*connector
= &intel_connector
->base
;
5796 struct drm_device
*dev
= connector
->dev
;
5797 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5798 struct drm_display_mode
*downclock_mode
= NULL
;
5800 INIT_DELAYED_WORK(&dev_priv
->drrs
.work
, intel_edp_drrs_downclock_work
);
5801 mutex_init(&dev_priv
->drrs
.mutex
);
5803 if (INTEL_INFO(dev
)->gen
<= 6) {
5804 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5808 if (dev_priv
->vbt
.drrs_type
!= SEAMLESS_DRRS_SUPPORT
) {
5809 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5813 downclock_mode
= intel_find_panel_downclock
5814 (dev
, fixed_mode
, connector
);
5816 if (!downclock_mode
) {
5817 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5821 dev_priv
->drrs
.type
= dev_priv
->vbt
.drrs_type
;
5823 dev_priv
->drrs
.refresh_rate_type
= DRRS_HIGH_RR
;
5824 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5825 return downclock_mode
;
5828 static bool intel_edp_init_connector(struct intel_dp
*intel_dp
,
5829 struct intel_connector
*intel_connector
)
5831 struct drm_connector
*connector
= &intel_connector
->base
;
5832 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
5833 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
5834 struct drm_device
*dev
= intel_encoder
->base
.dev
;
5835 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5836 struct drm_display_mode
*fixed_mode
= NULL
;
5837 struct drm_display_mode
*downclock_mode
= NULL
;
5839 struct drm_display_mode
*scan
;
5841 enum pipe pipe
= INVALID_PIPE
;
5843 if (!is_edp(intel_dp
))
5847 intel_edp_panel_vdd_sanitize(intel_dp
);
5848 pps_unlock(intel_dp
);
5850 /* Cache DPCD and EDID for edp. */
5851 has_dpcd
= intel_dp_get_dpcd(intel_dp
);
5854 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11)
5855 dev_priv
->no_aux_handshake
=
5856 intel_dp
->dpcd
[DP_MAX_DOWNSPREAD
] &
5857 DP_NO_AUX_HANDSHAKE_LINK_TRAINING
;
5859 /* if this fails, presume the device is a ghost */
5860 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5864 /* We now know it's not a ghost, init power sequence regs. */
5866 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
5867 pps_unlock(intel_dp
);
5869 mutex_lock(&dev
->mode_config
.mutex
);
5870 edid
= drm_get_edid(connector
, &intel_dp
->aux
.ddc
);
5872 if (drm_add_edid_modes(connector
, edid
)) {
5873 drm_mode_connector_update_edid_property(connector
,
5875 drm_edid_to_eld(connector
, edid
);
5878 edid
= ERR_PTR(-EINVAL
);
5881 edid
= ERR_PTR(-ENOENT
);
5883 intel_connector
->edid
= edid
;
5885 /* prefer fixed mode from EDID if available */
5886 list_for_each_entry(scan
, &connector
->probed_modes
, head
) {
5887 if ((scan
->type
& DRM_MODE_TYPE_PREFERRED
)) {
5888 fixed_mode
= drm_mode_duplicate(dev
, scan
);
5889 downclock_mode
= intel_dp_drrs_init(
5890 intel_connector
, fixed_mode
);
5895 /* fallback to VBT if available for eDP */
5896 if (!fixed_mode
&& dev_priv
->vbt
.lfp_lvds_vbt_mode
) {
5897 fixed_mode
= drm_mode_duplicate(dev
,
5898 dev_priv
->vbt
.lfp_lvds_vbt_mode
);
5900 fixed_mode
->type
|= DRM_MODE_TYPE_PREFERRED
;
5902 mutex_unlock(&dev
->mode_config
.mutex
);
5904 if (IS_VALLEYVIEW(dev
)) {
5905 intel_dp
->edp_notifier
.notifier_call
= edp_notify_handler
;
5906 register_reboot_notifier(&intel_dp
->edp_notifier
);
5909 * Figure out the current pipe for the initial backlight setup.
5910 * If the current pipe isn't valid, try the PPS pipe, and if that
5911 * fails just assume pipe A.
5913 if (IS_CHERRYVIEW(dev
))
5914 pipe
= DP_PORT_TO_PIPE_CHV(intel_dp
->DP
);
5916 pipe
= PORT_TO_PIPE(intel_dp
->DP
);
5918 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
5919 pipe
= intel_dp
->pps_pipe
;
5921 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
5924 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5928 intel_panel_init(&intel_connector
->panel
, fixed_mode
, downclock_mode
);
5929 intel_connector
->panel
.backlight_power
= intel_edp_backlight_power
;
5930 intel_panel_setup_backlight(connector
, pipe
);
5936 intel_dp_init_connector(struct intel_digital_port
*intel_dig_port
,
5937 struct intel_connector
*intel_connector
)
5939 struct drm_connector
*connector
= &intel_connector
->base
;
5940 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
5941 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
5942 struct drm_device
*dev
= intel_encoder
->base
.dev
;
5943 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5944 enum port port
= intel_dig_port
->port
;
5947 intel_dp
->pps_pipe
= INVALID_PIPE
;
5949 /* intel_dp vfuncs */
5950 if (INTEL_INFO(dev
)->gen
>= 9)
5951 intel_dp
->get_aux_clock_divider
= skl_get_aux_clock_divider
;
5952 else if (IS_VALLEYVIEW(dev
))
5953 intel_dp
->get_aux_clock_divider
= vlv_get_aux_clock_divider
;
5954 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
5955 intel_dp
->get_aux_clock_divider
= hsw_get_aux_clock_divider
;
5956 else if (HAS_PCH_SPLIT(dev
))
5957 intel_dp
->get_aux_clock_divider
= ilk_get_aux_clock_divider
;
5959 intel_dp
->get_aux_clock_divider
= i9xx_get_aux_clock_divider
;
5961 if (INTEL_INFO(dev
)->gen
>= 9)
5962 intel_dp
->get_aux_send_ctl
= skl_get_aux_send_ctl
;
5964 intel_dp
->get_aux_send_ctl
= i9xx_get_aux_send_ctl
;
5966 /* Preserve the current hw state. */
5967 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
);
5968 intel_dp
->attached_connector
= intel_connector
;
5970 if (intel_dp_is_edp(dev
, port
))
5971 type
= DRM_MODE_CONNECTOR_eDP
;
5973 type
= DRM_MODE_CONNECTOR_DisplayPort
;
5976 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5977 * for DP the encoder type can be set by the caller to
5978 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5980 if (type
== DRM_MODE_CONNECTOR_eDP
)
5981 intel_encoder
->type
= INTEL_OUTPUT_EDP
;
5983 /* eDP only on port B and/or C on vlv/chv */
5984 if (WARN_ON(IS_VALLEYVIEW(dev
) && is_edp(intel_dp
) &&
5985 port
!= PORT_B
&& port
!= PORT_C
))
5988 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5989 type
== DRM_MODE_CONNECTOR_eDP
? "eDP" : "DP",
5992 drm_connector_init(dev
, connector
, &intel_dp_connector_funcs
, type
);
5993 drm_connector_helper_add(connector
, &intel_dp_connector_helper_funcs
);
5995 connector
->interlace_allowed
= true;
5996 connector
->doublescan_allowed
= 0;
5998 INIT_DELAYED_WORK(&intel_dp
->panel_vdd_work
,
5999 edp_panel_vdd_work
);
6001 intel_connector_attach_encoder(intel_connector
, intel_encoder
);
6002 drm_connector_register(connector
);
6005 intel_connector
->get_hw_state
= intel_ddi_connector_get_hw_state
;
6007 intel_connector
->get_hw_state
= intel_connector_get_hw_state
;
6008 intel_connector
->unregister
= intel_dp_connector_unregister
;
6010 /* Set up the hotplug pin. */
6013 intel_encoder
->hpd_pin
= HPD_PORT_A
;
6016 intel_encoder
->hpd_pin
= HPD_PORT_B
;
6017 if (IS_BROXTON(dev_priv
) && (INTEL_REVID(dev
) < BXT_REVID_B0
))
6018 intel_encoder
->hpd_pin
= HPD_PORT_A
;
6021 intel_encoder
->hpd_pin
= HPD_PORT_C
;
6024 intel_encoder
->hpd_pin
= HPD_PORT_D
;
6030 if (is_edp(intel_dp
)) {
6032 intel_dp_init_panel_power_timestamps(intel_dp
);
6033 if (IS_VALLEYVIEW(dev
))
6034 vlv_initial_power_sequencer_setup(intel_dp
);
6036 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
6037 pps_unlock(intel_dp
);
6040 intel_dp_aux_init(intel_dp
, intel_connector
);
6042 /* init MST on ports that can support it */
6043 if (HAS_DP_MST(dev
) &&
6044 (port
== PORT_B
|| port
== PORT_C
|| port
== PORT_D
))
6045 intel_dp_mst_encoder_init(intel_dig_port
,
6046 intel_connector
->base
.base
.id
);
6048 if (!intel_edp_init_connector(intel_dp
, intel_connector
)) {
6049 drm_dp_aux_unregister(&intel_dp
->aux
);
6050 if (is_edp(intel_dp
)) {
6051 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
6053 * vdd might still be enabled do to the delayed vdd off.
6054 * Make sure vdd is actually turned off here.
6057 edp_panel_vdd_off_sync(intel_dp
);
6058 pps_unlock(intel_dp
);
6060 drm_connector_unregister(connector
);
6061 drm_connector_cleanup(connector
);
6065 intel_dp_add_properties(intel_dp
, connector
);
6067 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6068 * 0xd. Failure to do so will result in spurious interrupts being
6069 * generated on the port when a cable is not attached.
6071 if (IS_G4X(dev
) && !IS_GM45(dev
)) {
6072 u32 temp
= I915_READ(PEG_BAND_GAP_DATA
);
6073 I915_WRITE(PEG_BAND_GAP_DATA
, (temp
& ~0xf) | 0xd);
6076 i915_debugfs_connector_add(connector
);
6082 intel_dp_init(struct drm_device
*dev
, int output_reg
, enum port port
)
6084 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6085 struct intel_digital_port
*intel_dig_port
;
6086 struct intel_encoder
*intel_encoder
;
6087 struct drm_encoder
*encoder
;
6088 struct intel_connector
*intel_connector
;
6090 intel_dig_port
= kzalloc(sizeof(*intel_dig_port
), GFP_KERNEL
);
6091 if (!intel_dig_port
)
6094 intel_connector
= intel_connector_alloc();
6095 if (!intel_connector
) {
6096 kfree(intel_dig_port
);
6100 intel_encoder
= &intel_dig_port
->base
;
6101 encoder
= &intel_encoder
->base
;
6103 drm_encoder_init(dev
, &intel_encoder
->base
, &intel_dp_enc_funcs
,
6104 DRM_MODE_ENCODER_TMDS
);
6106 intel_encoder
->compute_config
= intel_dp_compute_config
;
6107 intel_encoder
->disable
= intel_disable_dp
;
6108 intel_encoder
->get_hw_state
= intel_dp_get_hw_state
;
6109 intel_encoder
->get_config
= intel_dp_get_config
;
6110 intel_encoder
->suspend
= intel_dp_encoder_suspend
;
6111 if (IS_CHERRYVIEW(dev
)) {
6112 intel_encoder
->pre_pll_enable
= chv_dp_pre_pll_enable
;
6113 intel_encoder
->pre_enable
= chv_pre_enable_dp
;
6114 intel_encoder
->enable
= vlv_enable_dp
;
6115 intel_encoder
->post_disable
= chv_post_disable_dp
;
6116 intel_encoder
->post_pll_disable
= chv_dp_post_pll_disable
;
6117 } else if (IS_VALLEYVIEW(dev
)) {
6118 intel_encoder
->pre_pll_enable
= vlv_dp_pre_pll_enable
;
6119 intel_encoder
->pre_enable
= vlv_pre_enable_dp
;
6120 intel_encoder
->enable
= vlv_enable_dp
;
6121 intel_encoder
->post_disable
= vlv_post_disable_dp
;
6123 intel_encoder
->pre_enable
= g4x_pre_enable_dp
;
6124 intel_encoder
->enable
= g4x_enable_dp
;
6125 if (INTEL_INFO(dev
)->gen
>= 5)
6126 intel_encoder
->post_disable
= ilk_post_disable_dp
;
6129 intel_dig_port
->port
= port
;
6130 intel_dig_port
->dp
.output_reg
= output_reg
;
6132 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
6133 if (IS_CHERRYVIEW(dev
)) {
6135 intel_encoder
->crtc_mask
= 1 << 2;
6137 intel_encoder
->crtc_mask
= (1 << 0) | (1 << 1);
6139 intel_encoder
->crtc_mask
= (1 << 0) | (1 << 1) | (1 << 2);
6141 intel_encoder
->cloneable
= 0;
6143 intel_dig_port
->hpd_pulse
= intel_dp_hpd_pulse
;
6144 dev_priv
->hotplug
.irq_port
[port
] = intel_dig_port
;
6146 if (!intel_dp_init_connector(intel_dig_port
, intel_connector
)) {
6147 drm_encoder_cleanup(encoder
);
6148 kfree(intel_dig_port
);
6149 kfree(intel_connector
);
6153 void intel_dp_mst_suspend(struct drm_device
*dev
)
6155 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6159 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
6160 struct intel_digital_port
*intel_dig_port
= dev_priv
->hotplug
.irq_port
[i
];
6161 if (!intel_dig_port
)
6164 if (intel_dig_port
->base
.type
== INTEL_OUTPUT_DISPLAYPORT
) {
6165 if (!intel_dig_port
->dp
.can_mst
)
6167 if (intel_dig_port
->dp
.is_mst
)
6168 drm_dp_mst_topology_mgr_suspend(&intel_dig_port
->dp
.mst_mgr
);
6173 void intel_dp_mst_resume(struct drm_device
*dev
)
6175 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6178 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
6179 struct intel_digital_port
*intel_dig_port
= dev_priv
->hotplug
.irq_port
[i
];
6180 if (!intel_dig_port
)
6182 if (intel_dig_port
->base
.type
== INTEL_OUTPUT_DISPLAYPORT
) {
6185 if (!intel_dig_port
->dp
.can_mst
)
6188 ret
= drm_dp_mst_topology_mgr_resume(&intel_dig_port
->dp
.mst_mgr
);
6190 intel_dp_check_mst_status(&intel_dig_port
->dp
);