drm/i915: Read out hrawclk from CCK on vlv/chv
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15
VS
97 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 99
cfcb0fc9
JB
100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
da63a9f2
PZ
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
112}
113
68b4d824 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 115{
68b4d824
ID
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
119}
120
df0e9248
CW
121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
fa90ecef 123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
124}
125
ea5b213a 126static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
a4fc5ed6 132
e0fce78f
VS
133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
ed4e9c1d
VS
138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 140{
7183dc29 141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
1db10e28 146 case DP_LINK_BW_5_4:
d4eead50 147 break;
a4fc5ed6 148 default:
d4eead50
ID
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
a4fc5ed6
KP
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
eeb6324d
PZ
157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
eeb6324d
PZ
160 u8 source_max, sink_max;
161
ccb1a831 162 source_max = intel_dig_port->max_lanes;
eeb6324d
PZ
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165 return min(source_max, sink_max);
166}
167
cd9dde44
AJ
168/*
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
171 *
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173 *
174 * 270000 * 1 * 8 / 10 == 216000
175 *
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
180 *
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
183 */
184
a4fc5ed6 185static int
c898261c 186intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 187{
cd9dde44 188 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
189}
190
fe27d53e
DA
191static int
192intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193{
194 return (max_link_clock * max_lanes * 8) / 10;
195}
196
c19de8eb 197static enum drm_mode_status
a4fc5ed6
KP
198intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
200{
df0e9248 201 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
799487f5 206 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
a4fc5ed6 207
dd06f90e
JN
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
210 return MODE_PANEL;
211
dd06f90e 212 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 213 return MODE_PANEL;
03afc4a2
DV
214
215 target_clock = fixed_mode->clock;
7de56f43
ZY
216 }
217
50fec21a 218 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 219 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
220
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
223
799487f5 224 if (mode_rate > max_rate || target_clock > max_dotclk)
c4867936 225 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
226
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
229
0af78a2b
DV
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
232
a4fc5ed6
KP
233 return MODE_OK;
234}
235
a4f1289e 236uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
237{
238 int i;
239 uint32_t v = 0;
240
241 if (src_bytes > 4)
242 src_bytes = 4;
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 return v;
246}
247
c2af70e2 248static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
249{
250 int i;
251 if (dst_bytes > 4)
252 dst_bytes = 4;
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
255}
256
bf13e81b
JN
257static void
258intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 259 struct intel_dp *intel_dp);
bf13e81b
JN
260static void
261intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 262 struct intel_dp *intel_dp);
bf13e81b 263
773538e8
VS
264static void pps_lock(struct intel_dp *intel_dp)
265{
266 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
267 struct intel_encoder *encoder = &intel_dig_port->base;
268 struct drm_device *dev = encoder->base.dev;
269 struct drm_i915_private *dev_priv = dev->dev_private;
270 enum intel_display_power_domain power_domain;
271
272 /*
273 * See vlv_power_sequencer_reset() why we need
274 * a power domain reference here.
275 */
25f78f58 276 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
277 intel_display_power_get(dev_priv, power_domain);
278
279 mutex_lock(&dev_priv->pps_mutex);
280}
281
282static void pps_unlock(struct intel_dp *intel_dp)
283{
284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
285 struct intel_encoder *encoder = &intel_dig_port->base;
286 struct drm_device *dev = encoder->base.dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 enum intel_display_power_domain power_domain;
289
290 mutex_unlock(&dev_priv->pps_mutex);
291
25f78f58 292 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
293 intel_display_power_put(dev_priv, power_domain);
294}
295
961a0db0
VS
296static void
297vlv_power_sequencer_kick(struct intel_dp *intel_dp)
298{
299 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300 struct drm_device *dev = intel_dig_port->base.base.dev;
301 struct drm_i915_private *dev_priv = dev->dev_private;
302 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
303 bool pll_enabled, release_cl_override = false;
304 enum dpio_phy phy = DPIO_PHY(pipe);
305 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
961a0db0
VS
306 uint32_t DP;
307
308 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
309 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
310 pipe_name(pipe), port_name(intel_dig_port->port)))
311 return;
312
313 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
314 pipe_name(pipe), port_name(intel_dig_port->port));
315
316 /* Preserve the BIOS-computed detected bit. This is
317 * supposed to be read-only.
318 */
319 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
320 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
321 DP |= DP_PORT_WIDTH(1);
322 DP |= DP_LINK_TRAIN_PAT_1;
323
324 if (IS_CHERRYVIEW(dev))
325 DP |= DP_PIPE_SELECT_CHV(pipe);
326 else if (pipe == PIPE_B)
327 DP |= DP_PIPEB_SELECT;
328
d288f65f
VS
329 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
330
331 /*
332 * The DPLL for the pipe must be enabled for this to work.
333 * So enable temporarily it if it's not already enabled.
334 */
0047eedc
VS
335 if (!pll_enabled) {
336 release_cl_override = IS_CHERRYVIEW(dev) &&
337 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
338
3f36b937
TU
339 if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
340 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
341 DRM_ERROR("Failed to force on pll for pipe %c!\n",
342 pipe_name(pipe));
343 return;
344 }
0047eedc 345 }
d288f65f 346
961a0db0
VS
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
d288f65f 361
0047eedc 362 if (!pll_enabled) {
d288f65f 363 vlv_force_pll_off(dev, pipe);
0047eedc
VS
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
961a0db0
VS
368}
369
bf13e81b
JN
370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 378 enum pipe pipe;
bf13e81b 379
e39b999a 380 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 381
a8c3344e
VS
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
a4a5d2f8
VS
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
19c8054c 392 for_each_intel_encoder(dev, encoder) {
a4a5d2f8
VS
393 struct intel_dp *tmp;
394
395 if (encoder->type != INTEL_OUTPUT_EDP)
396 continue;
397
398 tmp = enc_to_intel_dp(&encoder->base);
399
400 if (tmp->pps_pipe != INVALID_PIPE)
401 pipes &= ~(1 << tmp->pps_pipe);
402 }
403
404 /*
405 * Didn't find one. This should not happen since there
406 * are two power sequencers and up to two eDP ports.
407 */
408 if (WARN_ON(pipes == 0))
a8c3344e
VS
409 pipe = PIPE_A;
410 else
411 pipe = ffs(pipes) - 1;
a4a5d2f8 412
a8c3344e
VS
413 vlv_steal_power_sequencer(dev, pipe);
414 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
415
416 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
417 pipe_name(intel_dp->pps_pipe),
418 port_name(intel_dig_port->port));
419
420 /* init power sequencer on this pipe and port */
36b5f425
VS
421 intel_dp_init_panel_power_sequencer(dev, intel_dp);
422 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 423
961a0db0
VS
424 /*
425 * Even vdd force doesn't work until we've made
426 * the power sequencer lock in on the port.
427 */
428 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
429
430 return intel_dp->pps_pipe;
431}
432
6491ab27
VS
433typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
434 enum pipe pipe);
435
436static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
437 enum pipe pipe)
438{
439 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
440}
441
442static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
443 enum pipe pipe)
444{
445 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
446}
447
448static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
449 enum pipe pipe)
450{
451 return true;
452}
bf13e81b 453
a4a5d2f8 454static enum pipe
6491ab27
VS
455vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
456 enum port port,
457 vlv_pipe_check pipe_check)
a4a5d2f8
VS
458{
459 enum pipe pipe;
bf13e81b 460
bf13e81b
JN
461 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
462 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
463 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
464
465 if (port_sel != PANEL_PORT_SELECT_VLV(port))
466 continue;
467
6491ab27
VS
468 if (!pipe_check(dev_priv, pipe))
469 continue;
470
a4a5d2f8 471 return pipe;
bf13e81b
JN
472 }
473
a4a5d2f8
VS
474 return INVALID_PIPE;
475}
476
477static void
478vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
479{
480 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
481 struct drm_device *dev = intel_dig_port->base.base.dev;
482 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
483 enum port port = intel_dig_port->port;
484
485 lockdep_assert_held(&dev_priv->pps_mutex);
486
487 /* try to find a pipe with this port selected */
6491ab27
VS
488 /* first pick one where the panel is on */
489 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
490 vlv_pipe_has_pp_on);
491 /* didn't find one? pick one where vdd is on */
492 if (intel_dp->pps_pipe == INVALID_PIPE)
493 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
494 vlv_pipe_has_vdd_on);
495 /* didn't find one? pick one with just the correct port */
496 if (intel_dp->pps_pipe == INVALID_PIPE)
497 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
498 vlv_pipe_any);
a4a5d2f8
VS
499
500 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
501 if (intel_dp->pps_pipe == INVALID_PIPE) {
502 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
503 port_name(port));
504 return;
bf13e81b
JN
505 }
506
a4a5d2f8
VS
507 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
508 port_name(port), pipe_name(intel_dp->pps_pipe));
509
36b5f425
VS
510 intel_dp_init_panel_power_sequencer(dev, intel_dp);
511 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
512}
513
773538e8
VS
514void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
515{
516 struct drm_device *dev = dev_priv->dev;
517 struct intel_encoder *encoder;
518
666a4537 519 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
773538e8
VS
520 return;
521
522 /*
523 * We can't grab pps_mutex here due to deadlock with power_domain
524 * mutex when power_domain functions are called while holding pps_mutex.
525 * That also means that in order to use pps_pipe the code needs to
526 * hold both a power domain reference and pps_mutex, and the power domain
527 * reference get/put must be done while _not_ holding pps_mutex.
528 * pps_{lock,unlock}() do these steps in the correct order, so one
529 * should use them always.
530 */
531
19c8054c 532 for_each_intel_encoder(dev, encoder) {
773538e8
VS
533 struct intel_dp *intel_dp;
534
535 if (encoder->type != INTEL_OUTPUT_EDP)
536 continue;
537
538 intel_dp = enc_to_intel_dp(&encoder->base);
539 intel_dp->pps_pipe = INVALID_PIPE;
540 }
bf13e81b
JN
541}
542
f0f59a00
VS
543static i915_reg_t
544_pp_ctrl_reg(struct intel_dp *intel_dp)
bf13e81b
JN
545{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
b0a08bec
VK
548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554}
555
f0f59a00
VS
556static i915_reg_t
557_pp_stat_reg(struct intel_dp *intel_dp)
bf13e81b
JN
558{
559 struct drm_device *dev = intel_dp_to_dev(intel_dp);
560
b0a08bec
VK
561 if (IS_BROXTON(dev))
562 return BXT_PP_STATUS(0);
563 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
564 return PCH_PP_STATUS;
565 else
566 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
567}
568
01527b31
CT
569/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
570 This function only applicable when panel PM state is not to be tracked */
571static int edp_notify_handler(struct notifier_block *this, unsigned long code,
572 void *unused)
573{
574 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
575 edp_notifier);
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
577 struct drm_i915_private *dev_priv = dev->dev_private;
01527b31
CT
578
579 if (!is_edp(intel_dp) || code != SYS_RESTART)
580 return 0;
581
773538e8 582 pps_lock(intel_dp);
e39b999a 583
666a4537 584 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e39b999a 585 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
f0f59a00 586 i915_reg_t pp_ctrl_reg, pp_div_reg;
649636ef 587 u32 pp_div;
e39b999a 588
01527b31
CT
589 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
590 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
591 pp_div = I915_READ(pp_div_reg);
592 pp_div &= PP_REFERENCE_DIVIDER_MASK;
593
594 /* 0x1F write to PP_DIV_REG sets max cycle delay */
595 I915_WRITE(pp_div_reg, pp_div | 0x1F);
596 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
597 msleep(intel_dp->panel_power_cycle_delay);
598 }
599
773538e8 600 pps_unlock(intel_dp);
e39b999a 601
01527b31
CT
602 return 0;
603}
604
4be73780 605static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 606{
30add22d 607 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
608 struct drm_i915_private *dev_priv = dev->dev_private;
609
e39b999a
VS
610 lockdep_assert_held(&dev_priv->pps_mutex);
611
666a4537 612 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
9a42356b
VS
613 intel_dp->pps_pipe == INVALID_PIPE)
614 return false;
615
bf13e81b 616 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
617}
618
4be73780 619static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 620{
30add22d 621 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
622 struct drm_i915_private *dev_priv = dev->dev_private;
623
e39b999a
VS
624 lockdep_assert_held(&dev_priv->pps_mutex);
625
666a4537 626 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
9a42356b
VS
627 intel_dp->pps_pipe == INVALID_PIPE)
628 return false;
629
773538e8 630 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
631}
632
9b984dae
KP
633static void
634intel_dp_check_edp(struct intel_dp *intel_dp)
635{
30add22d 636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 637 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 638
9b984dae
KP
639 if (!is_edp(intel_dp))
640 return;
453c5420 641
4be73780 642 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
643 WARN(1, "eDP powered off while attempting aux channel communication.\n");
644 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
645 I915_READ(_pp_stat_reg(intel_dp)),
646 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
647 }
648}
649
9ee32fea
DV
650static uint32_t
651intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
652{
653 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
654 struct drm_device *dev = intel_dig_port->base.base.dev;
655 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 656 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
657 uint32_t status;
658 bool done;
659
ef04f00d 660#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 661 if (has_aux_irq)
b18ac466 662 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 663 msecs_to_jiffies_timeout(10));
9ee32fea
DV
664 else
665 done = wait_for_atomic(C, 10) == 0;
666 if (!done)
667 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
668 has_aux_irq);
669#undef C
670
671 return status;
672}
673
6ffb1be7 674static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 675{
174edf1f 676 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
e7dc33f3 677 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
9ee32fea 678
ec5b01dd
DL
679 /*
680 * The clock divider is based off the hrawclk, and would like to run at
681 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 682 */
e7dc33f3 683 return index ? 0 : DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
ec5b01dd
DL
684}
685
686static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
687{
688 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 690 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
691
692 if (index)
693 return 0;
694
e7dc33f3 695 if (intel_dig_port->port == PORT_A)
fce18c4c 696 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
e7dc33f3
VS
697 else
698 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
ec5b01dd
DL
699}
700
701static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
702{
703 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
704 struct drm_device *dev = intel_dig_port->base.base.dev;
705 struct drm_i915_private *dev_priv = dev->dev_private;
706
707 if (intel_dig_port->port == PORT_A) {
708 if (index)
709 return 0;
05024da3 710 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
56f5f700 711 } else if (HAS_PCH_LPT_H(dev_priv)) {
2c55c336 712 /* Workaround for non-ULT HSW */
bc86625a
CW
713 switch (index) {
714 case 0: return 63;
715 case 1: return 72;
716 default: return 0;
717 }
ec5b01dd 718 } else {
e7dc33f3 719 return index ? 0 : DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
2c55c336 720 }
b84a1cf8
RV
721}
722
b6b5e383
DL
723static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
724{
725 /*
726 * SKL doesn't need us to program the AUX clock divider (Hardware will
727 * derive the clock from CDCLK automatically). We still implement the
728 * get_aux_clock_divider vfunc to plug-in into the existing code.
729 */
730 return index ? 0 : 1;
731}
732
6ffb1be7
VS
733static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
734 bool has_aux_irq,
735 int send_bytes,
736 uint32_t aux_clock_divider)
5ed12a19
DL
737{
738 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
739 struct drm_device *dev = intel_dig_port->base.base.dev;
740 uint32_t precharge, timeout;
741
742 if (IS_GEN6(dev))
743 precharge = 3;
744 else
745 precharge = 5;
746
f3c6a3a7 747 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
5ed12a19
DL
748 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
749 else
750 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
751
752 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 753 DP_AUX_CH_CTL_DONE |
5ed12a19 754 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 755 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 756 timeout |
788d4433 757 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
758 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
759 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 760 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
761}
762
b9ca5fad
DL
763static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
764 bool has_aux_irq,
765 int send_bytes,
766 uint32_t unused)
767{
768 return DP_AUX_CH_CTL_SEND_BUSY |
769 DP_AUX_CH_CTL_DONE |
770 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
771 DP_AUX_CH_CTL_TIME_OUT_ERROR |
772 DP_AUX_CH_CTL_TIME_OUT_1600us |
773 DP_AUX_CH_CTL_RECEIVE_ERROR |
774 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
775 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
776}
777
b84a1cf8
RV
778static int
779intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 780 const uint8_t *send, int send_bytes,
b84a1cf8
RV
781 uint8_t *recv, int recv_size)
782{
783 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
784 struct drm_device *dev = intel_dig_port->base.base.dev;
785 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 786 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
bc86625a 787 uint32_t aux_clock_divider;
b84a1cf8
RV
788 int i, ret, recv_bytes;
789 uint32_t status;
5ed12a19 790 int try, clock = 0;
4e6b788c 791 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
792 bool vdd;
793
773538e8 794 pps_lock(intel_dp);
e39b999a 795
72c3500a
VS
796 /*
797 * We will be called with VDD already enabled for dpcd/edid/oui reads.
798 * In such cases we want to leave VDD enabled and it's up to upper layers
799 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
800 * ourselves.
801 */
1e0560e0 802 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
803
804 /* dp aux is extremely sensitive to irq latency, hence request the
805 * lowest possible wakeup latency and so prevent the cpu from going into
806 * deep sleep states.
807 */
808 pm_qos_update_request(&dev_priv->pm_qos, 0);
809
810 intel_dp_check_edp(intel_dp);
5eb08b69 811
11bee43e
JB
812 /* Try to wait for any previous AUX channel activity */
813 for (try = 0; try < 3; try++) {
ef04f00d 814 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
815 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
816 break;
817 msleep(1);
818 }
819
820 if (try == 3) {
02196c77
MK
821 static u32 last_status = -1;
822 const u32 status = I915_READ(ch_ctl);
823
824 if (status != last_status) {
825 WARN(1, "dp_aux_ch not started status 0x%08x\n",
826 status);
827 last_status = status;
828 }
829
9ee32fea
DV
830 ret = -EBUSY;
831 goto out;
4f7f7b7e
CW
832 }
833
46a5ae9f
PZ
834 /* Only 5 data registers! */
835 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
836 ret = -E2BIG;
837 goto out;
838 }
839
ec5b01dd 840 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
841 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
842 has_aux_irq,
843 send_bytes,
844 aux_clock_divider);
5ed12a19 845
bc86625a
CW
846 /* Must try at least 3 times according to DP spec */
847 for (try = 0; try < 5; try++) {
848 /* Load the send data into the aux channel data registers */
849 for (i = 0; i < send_bytes; i += 4)
330e20ec 850 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
a4f1289e
RV
851 intel_dp_pack_aux(send + i,
852 send_bytes - i));
bc86625a
CW
853
854 /* Send the command and wait for it to complete */
5ed12a19 855 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
856
857 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
858
859 /* Clear done status and any errors */
860 I915_WRITE(ch_ctl,
861 status |
862 DP_AUX_CH_CTL_DONE |
863 DP_AUX_CH_CTL_TIME_OUT_ERROR |
864 DP_AUX_CH_CTL_RECEIVE_ERROR);
865
74ebf294 866 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 867 continue;
74ebf294
TP
868
869 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
870 * 400us delay required for errors and timeouts
871 * Timeout errors from the HW already meet this
872 * requirement so skip to next iteration
873 */
874 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
875 usleep_range(400, 500);
bc86625a 876 continue;
74ebf294 877 }
bc86625a 878 if (status & DP_AUX_CH_CTL_DONE)
e058c945 879 goto done;
bc86625a 880 }
a4fc5ed6
KP
881 }
882
a4fc5ed6 883 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 884 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
885 ret = -EBUSY;
886 goto out;
a4fc5ed6
KP
887 }
888
e058c945 889done:
a4fc5ed6
KP
890 /* Check for timeout or receive error.
891 * Timeouts occur when the sink is not connected
892 */
a5b3da54 893 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 894 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
895 ret = -EIO;
896 goto out;
a5b3da54 897 }
1ae8c0a5
KP
898
899 /* Timeouts occur when the device isn't connected, so they're
900 * "normal" -- don't fill the kernel log with these */
a5b3da54 901 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 902 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
903 ret = -ETIMEDOUT;
904 goto out;
a4fc5ed6
KP
905 }
906
907 /* Unload any bytes sent back from the other side */
908 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
909 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
14e01889
RV
910
911 /*
912 * By BSpec: "Message sizes of 0 or >20 are not allowed."
913 * We have no idea of what happened so we return -EBUSY so
914 * drm layer takes care for the necessary retries.
915 */
916 if (recv_bytes == 0 || recv_bytes > 20) {
917 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
918 recv_bytes);
919 /*
920 * FIXME: This patch was created on top of a series that
921 * organize the retries at drm level. There EBUSY should
922 * also take care for 1ms wait before retrying.
923 * That aux retries re-org is still needed and after that is
924 * merged we remove this sleep from here.
925 */
926 usleep_range(1000, 1500);
927 ret = -EBUSY;
928 goto out;
929 }
930
a4fc5ed6
KP
931 if (recv_bytes > recv_size)
932 recv_bytes = recv_size;
0206e353 933
4f7f7b7e 934 for (i = 0; i < recv_bytes; i += 4)
330e20ec 935 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
a4f1289e 936 recv + i, recv_bytes - i);
a4fc5ed6 937
9ee32fea
DV
938 ret = recv_bytes;
939out:
940 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
941
884f19e9
JN
942 if (vdd)
943 edp_panel_vdd_off(intel_dp, false);
944
773538e8 945 pps_unlock(intel_dp);
e39b999a 946
9ee32fea 947 return ret;
a4fc5ed6
KP
948}
949
a6c8aff0
JN
950#define BARE_ADDRESS_SIZE 3
951#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
952static ssize_t
953intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 954{
9d1a1031
JN
955 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
956 uint8_t txbuf[20], rxbuf[20];
957 size_t txsize, rxsize;
a4fc5ed6 958 int ret;
a4fc5ed6 959
d2d9cbbd
VS
960 txbuf[0] = (msg->request << 4) |
961 ((msg->address >> 16) & 0xf);
962 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
963 txbuf[2] = msg->address & 0xff;
964 txbuf[3] = msg->size - 1;
46a5ae9f 965
9d1a1031
JN
966 switch (msg->request & ~DP_AUX_I2C_MOT) {
967 case DP_AUX_NATIVE_WRITE:
968 case DP_AUX_I2C_WRITE:
c1e74122 969 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
a6c8aff0 970 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 971 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 972
9d1a1031
JN
973 if (WARN_ON(txsize > 20))
974 return -E2BIG;
a4fc5ed6 975
d81a67cc
ID
976 if (msg->buffer)
977 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
978 else
979 WARN_ON(msg->size);
a4fc5ed6 980
9d1a1031
JN
981 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
982 if (ret > 0) {
983 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 984
a1ddefd8
JN
985 if (ret > 1) {
986 /* Number of bytes written in a short write. */
987 ret = clamp_t(int, rxbuf[1], 0, msg->size);
988 } else {
989 /* Return payload size. */
990 ret = msg->size;
991 }
9d1a1031
JN
992 }
993 break;
46a5ae9f 994
9d1a1031
JN
995 case DP_AUX_NATIVE_READ:
996 case DP_AUX_I2C_READ:
a6c8aff0 997 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 998 rxsize = msg->size + 1;
a4fc5ed6 999
9d1a1031
JN
1000 if (WARN_ON(rxsize > 20))
1001 return -E2BIG;
a4fc5ed6 1002
9d1a1031
JN
1003 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1004 if (ret > 0) {
1005 msg->reply = rxbuf[0] >> 4;
1006 /*
1007 * Assume happy day, and copy the data. The caller is
1008 * expected to check msg->reply before touching it.
1009 *
1010 * Return payload size.
1011 */
1012 ret--;
1013 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1014 }
9d1a1031
JN
1015 break;
1016
1017 default:
1018 ret = -EINVAL;
1019 break;
a4fc5ed6 1020 }
f51a44b9 1021
9d1a1031 1022 return ret;
a4fc5ed6
KP
1023}
1024
f0f59a00
VS
1025static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1026 enum port port)
da00bdcf
VS
1027{
1028 switch (port) {
1029 case PORT_B:
1030 case PORT_C:
1031 case PORT_D:
1032 return DP_AUX_CH_CTL(port);
1033 default:
1034 MISSING_CASE(port);
1035 return DP_AUX_CH_CTL(PORT_B);
1036 }
1037}
1038
f0f59a00
VS
1039static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1040 enum port port, int index)
330e20ec
VS
1041{
1042 switch (port) {
1043 case PORT_B:
1044 case PORT_C:
1045 case PORT_D:
1046 return DP_AUX_CH_DATA(port, index);
1047 default:
1048 MISSING_CASE(port);
1049 return DP_AUX_CH_DATA(PORT_B, index);
1050 }
1051}
1052
f0f59a00
VS
1053static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1054 enum port port)
da00bdcf
VS
1055{
1056 switch (port) {
1057 case PORT_A:
1058 return DP_AUX_CH_CTL(port);
1059 case PORT_B:
1060 case PORT_C:
1061 case PORT_D:
1062 return PCH_DP_AUX_CH_CTL(port);
1063 default:
1064 MISSING_CASE(port);
1065 return DP_AUX_CH_CTL(PORT_A);
1066 }
1067}
1068
f0f59a00
VS
1069static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1070 enum port port, int index)
330e20ec
VS
1071{
1072 switch (port) {
1073 case PORT_A:
1074 return DP_AUX_CH_DATA(port, index);
1075 case PORT_B:
1076 case PORT_C:
1077 case PORT_D:
1078 return PCH_DP_AUX_CH_DATA(port, index);
1079 default:
1080 MISSING_CASE(port);
1081 return DP_AUX_CH_DATA(PORT_A, index);
1082 }
1083}
1084
da00bdcf
VS
1085/*
1086 * On SKL we don't have Aux for port E so we rely
1087 * on VBT to set a proper alternate aux channel.
1088 */
1089static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1090{
1091 const struct ddi_vbt_port_info *info =
1092 &dev_priv->vbt.ddi_port_info[PORT_E];
1093
1094 switch (info->alternate_aux_channel) {
1095 case DP_AUX_A:
1096 return PORT_A;
1097 case DP_AUX_B:
1098 return PORT_B;
1099 case DP_AUX_C:
1100 return PORT_C;
1101 case DP_AUX_D:
1102 return PORT_D;
1103 default:
1104 MISSING_CASE(info->alternate_aux_channel);
1105 return PORT_A;
1106 }
1107}
1108
f0f59a00
VS
1109static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1110 enum port port)
da00bdcf
VS
1111{
1112 if (port == PORT_E)
1113 port = skl_porte_aux_port(dev_priv);
1114
1115 switch (port) {
1116 case PORT_A:
1117 case PORT_B:
1118 case PORT_C:
1119 case PORT_D:
1120 return DP_AUX_CH_CTL(port);
1121 default:
1122 MISSING_CASE(port);
1123 return DP_AUX_CH_CTL(PORT_A);
1124 }
1125}
1126
f0f59a00
VS
1127static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1128 enum port port, int index)
330e20ec
VS
1129{
1130 if (port == PORT_E)
1131 port = skl_porte_aux_port(dev_priv);
1132
1133 switch (port) {
1134 case PORT_A:
1135 case PORT_B:
1136 case PORT_C:
1137 case PORT_D:
1138 return DP_AUX_CH_DATA(port, index);
1139 default:
1140 MISSING_CASE(port);
1141 return DP_AUX_CH_DATA(PORT_A, index);
1142 }
1143}
1144
f0f59a00
VS
1145static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1146 enum port port)
330e20ec
VS
1147{
1148 if (INTEL_INFO(dev_priv)->gen >= 9)
1149 return skl_aux_ctl_reg(dev_priv, port);
1150 else if (HAS_PCH_SPLIT(dev_priv))
1151 return ilk_aux_ctl_reg(dev_priv, port);
1152 else
1153 return g4x_aux_ctl_reg(dev_priv, port);
1154}
1155
f0f59a00
VS
1156static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1157 enum port port, int index)
330e20ec
VS
1158{
1159 if (INTEL_INFO(dev_priv)->gen >= 9)
1160 return skl_aux_data_reg(dev_priv, port, index);
1161 else if (HAS_PCH_SPLIT(dev_priv))
1162 return ilk_aux_data_reg(dev_priv, port, index);
1163 else
1164 return g4x_aux_data_reg(dev_priv, port, index);
1165}
1166
1167static void intel_aux_reg_init(struct intel_dp *intel_dp)
1168{
1169 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1170 enum port port = dp_to_dig_port(intel_dp)->port;
1171 int i;
1172
1173 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1174 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1175 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1176}
1177
9d1a1031 1178static void
a121f4e5
VS
1179intel_dp_aux_fini(struct intel_dp *intel_dp)
1180{
1181 drm_dp_aux_unregister(&intel_dp->aux);
1182 kfree(intel_dp->aux.name);
1183}
1184
1185static int
9d1a1031
JN
1186intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1187{
1188 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1189 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1190 enum port port = intel_dig_port->port;
ab2c0672
DA
1191 int ret;
1192
330e20ec 1193 intel_aux_reg_init(intel_dp);
8316f337 1194
a121f4e5
VS
1195 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1196 if (!intel_dp->aux.name)
1197 return -ENOMEM;
1198
9d1a1031
JN
1199 intel_dp->aux.dev = dev->dev;
1200 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1201
a121f4e5
VS
1202 DRM_DEBUG_KMS("registering %s bus for %s\n",
1203 intel_dp->aux.name,
0b99836f 1204 connector->base.kdev->kobj.name);
8316f337 1205
4f71d0cb 1206 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1207 if (ret < 0) {
4f71d0cb 1208 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
a121f4e5
VS
1209 intel_dp->aux.name, ret);
1210 kfree(intel_dp->aux.name);
1211 return ret;
ab2c0672 1212 }
8a5e6aeb 1213
0b99836f
JN
1214 ret = sysfs_create_link(&connector->base.kdev->kobj,
1215 &intel_dp->aux.ddc.dev.kobj,
1216 intel_dp->aux.ddc.dev.kobj.name);
1217 if (ret < 0) {
a121f4e5
VS
1218 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1219 intel_dp->aux.name, ret);
1220 intel_dp_aux_fini(intel_dp);
1221 return ret;
ab2c0672 1222 }
a121f4e5
VS
1223
1224 return 0;
a4fc5ed6
KP
1225}
1226
80f65de3
ID
1227static void
1228intel_dp_connector_unregister(struct intel_connector *intel_connector)
1229{
1230 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1231
0e32b39c
DA
1232 if (!intel_connector->mst_port)
1233 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1234 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1235 intel_connector_unregister(intel_connector);
1236}
1237
5416d871 1238static void
840b32b7 1239skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1240{
1241 u32 ctrl1;
1242
dd3cd74a
ACO
1243 memset(&pipe_config->dpll_hw_state, 0,
1244 sizeof(pipe_config->dpll_hw_state));
1245
5416d871
DL
1246 pipe_config->ddi_pll_sel = SKL_DPLL0;
1247 pipe_config->dpll_hw_state.cfgcr1 = 0;
1248 pipe_config->dpll_hw_state.cfgcr2 = 0;
1249
1250 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1251 switch (pipe_config->port_clock / 2) {
c3346ef6 1252 case 81000:
71cd8423 1253 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1254 SKL_DPLL0);
1255 break;
c3346ef6 1256 case 135000:
71cd8423 1257 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1258 SKL_DPLL0);
1259 break;
c3346ef6 1260 case 270000:
71cd8423 1261 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1262 SKL_DPLL0);
1263 break;
c3346ef6 1264 case 162000:
71cd8423 1265 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1266 SKL_DPLL0);
1267 break;
1268 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1269 results in CDCLK change. Need to handle the change of CDCLK by
1270 disabling pipes and re-enabling them */
1271 case 108000:
71cd8423 1272 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1273 SKL_DPLL0);
1274 break;
1275 case 216000:
71cd8423 1276 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1277 SKL_DPLL0);
1278 break;
1279
5416d871
DL
1280 }
1281 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1282}
1283
6fa2d197 1284void
840b32b7 1285hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1286{
ee46f3c7
ACO
1287 memset(&pipe_config->dpll_hw_state, 0,
1288 sizeof(pipe_config->dpll_hw_state));
1289
840b32b7
VS
1290 switch (pipe_config->port_clock / 2) {
1291 case 81000:
0e50338c
DV
1292 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1293 break;
840b32b7 1294 case 135000:
0e50338c
DV
1295 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1296 break;
840b32b7 1297 case 270000:
0e50338c
DV
1298 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1299 break;
1300 }
1301}
1302
fc0f8e25 1303static int
12f6a2e2 1304intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1305{
94ca719e
VS
1306 if (intel_dp->num_sink_rates) {
1307 *sink_rates = intel_dp->sink_rates;
1308 return intel_dp->num_sink_rates;
fc0f8e25 1309 }
12f6a2e2
VS
1310
1311 *sink_rates = default_rates;
1312
1313 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1314}
1315
e588fa18 1316bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
ed63baaf 1317{
e588fa18
ACO
1318 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1319 struct drm_device *dev = dig_port->base.base.dev;
1320
ed63baaf 1321 /* WaDisableHBR2:skl */
e87a005d 1322 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
ed63baaf
TS
1323 return false;
1324
1325 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1326 (INTEL_INFO(dev)->gen >= 9))
1327 return true;
1328 else
1329 return false;
1330}
1331
a8f3ef61 1332static int
e588fa18 1333intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
a8f3ef61 1334{
e588fa18
ACO
1335 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1336 struct drm_device *dev = dig_port->base.base.dev;
af7080f5
TS
1337 int size;
1338
64987fc5
SJ
1339 if (IS_BROXTON(dev)) {
1340 *source_rates = bxt_rates;
af7080f5 1341 size = ARRAY_SIZE(bxt_rates);
ef11bdb3 1342 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
637a9c63 1343 *source_rates = skl_rates;
af7080f5
TS
1344 size = ARRAY_SIZE(skl_rates);
1345 } else {
1346 *source_rates = default_rates;
1347 size = ARRAY_SIZE(default_rates);
a8f3ef61 1348 }
636280ba 1349
ed63baaf 1350 /* This depends on the fact that 5.4 is last value in the array */
e588fa18 1351 if (!intel_dp_source_supports_hbr2(intel_dp))
af7080f5 1352 size--;
636280ba 1353
af7080f5 1354 return size;
a8f3ef61
SJ
1355}
1356
c6bb3538
DV
1357static void
1358intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1359 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1360{
1361 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1362 const struct dp_link_dpll *divisor = NULL;
1363 int i, count = 0;
c6bb3538
DV
1364
1365 if (IS_G4X(dev)) {
9dd4ffdf
CML
1366 divisor = gen4_dpll;
1367 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1368 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1369 divisor = pch_dpll;
1370 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1371 } else if (IS_CHERRYVIEW(dev)) {
1372 divisor = chv_dpll;
1373 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1374 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1375 divisor = vlv_dpll;
1376 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1377 }
9dd4ffdf
CML
1378
1379 if (divisor && count) {
1380 for (i = 0; i < count; i++) {
840b32b7 1381 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1382 pipe_config->dpll = divisor[i].dpll;
1383 pipe_config->clock_set = true;
1384 break;
1385 }
1386 }
c6bb3538
DV
1387 }
1388}
1389
2ecae76a
VS
1390static int intersect_rates(const int *source_rates, int source_len,
1391 const int *sink_rates, int sink_len,
94ca719e 1392 int *common_rates)
a8f3ef61
SJ
1393{
1394 int i = 0, j = 0, k = 0;
1395
a8f3ef61
SJ
1396 while (i < source_len && j < sink_len) {
1397 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1398 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1399 return k;
94ca719e 1400 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1401 ++k;
1402 ++i;
1403 ++j;
1404 } else if (source_rates[i] < sink_rates[j]) {
1405 ++i;
1406 } else {
1407 ++j;
1408 }
1409 }
1410 return k;
1411}
1412
94ca719e
VS
1413static int intel_dp_common_rates(struct intel_dp *intel_dp,
1414 int *common_rates)
2ecae76a 1415{
2ecae76a
VS
1416 const int *source_rates, *sink_rates;
1417 int source_len, sink_len;
1418
1419 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
e588fa18 1420 source_len = intel_dp_source_rates(intel_dp, &source_rates);
2ecae76a
VS
1421
1422 return intersect_rates(source_rates, source_len,
1423 sink_rates, sink_len,
94ca719e 1424 common_rates);
2ecae76a
VS
1425}
1426
0336400e
VS
1427static void snprintf_int_array(char *str, size_t len,
1428 const int *array, int nelem)
1429{
1430 int i;
1431
1432 str[0] = '\0';
1433
1434 for (i = 0; i < nelem; i++) {
b2f505be 1435 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1436 if (r >= len)
1437 return;
1438 str += r;
1439 len -= r;
1440 }
1441}
1442
1443static void intel_dp_print_rates(struct intel_dp *intel_dp)
1444{
0336400e 1445 const int *source_rates, *sink_rates;
94ca719e
VS
1446 int source_len, sink_len, common_len;
1447 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1448 char str[128]; /* FIXME: too big for stack? */
1449
1450 if ((drm_debug & DRM_UT_KMS) == 0)
1451 return;
1452
e588fa18 1453 source_len = intel_dp_source_rates(intel_dp, &source_rates);
0336400e
VS
1454 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1455 DRM_DEBUG_KMS("source rates: %s\n", str);
1456
1457 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1458 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1459 DRM_DEBUG_KMS("sink rates: %s\n", str);
1460
94ca719e
VS
1461 common_len = intel_dp_common_rates(intel_dp, common_rates);
1462 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1463 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1464}
1465
f4896f15 1466static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1467{
1468 int i = 0;
1469
1470 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1471 if (find == rates[i])
1472 break;
1473
1474 return i;
1475}
1476
50fec21a
VS
1477int
1478intel_dp_max_link_rate(struct intel_dp *intel_dp)
1479{
1480 int rates[DP_MAX_SUPPORTED_RATES] = {};
1481 int len;
1482
94ca719e 1483 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1484 if (WARN_ON(len <= 0))
1485 return 162000;
1486
1487 return rates[rate_to_index(0, rates) - 1];
1488}
1489
ed4e9c1d
VS
1490int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1491{
94ca719e 1492 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1493}
1494
94223d04
ACO
1495void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1496 uint8_t *link_bw, uint8_t *rate_select)
04a60f9f
VS
1497{
1498 if (intel_dp->num_sink_rates) {
1499 *link_bw = 0;
1500 *rate_select =
1501 intel_dp_rate_select(intel_dp, port_clock);
1502 } else {
1503 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1504 *rate_select = 0;
1505 }
1506}
1507
00c09d70 1508bool
5bfe2ac0 1509intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1510 struct intel_crtc_state *pipe_config)
a4fc5ed6 1511{
5bfe2ac0 1512 struct drm_device *dev = encoder->base.dev;
36008365 1513 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1514 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1515 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1516 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1517 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1518 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1519 int lane_count, clock;
56071a20 1520 int min_lane_count = 1;
eeb6324d 1521 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1522 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1523 int min_clock = 0;
a8f3ef61 1524 int max_clock;
083f9560 1525 int bpp, mode_rate;
ff9a6750 1526 int link_avail, link_clock;
94ca719e
VS
1527 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1528 int common_len;
04a60f9f 1529 uint8_t link_bw, rate_select;
a8f3ef61 1530
94ca719e 1531 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1532
1533 /* No common link rates between source and sink */
94ca719e 1534 WARN_ON(common_len <= 0);
a8f3ef61 1535
94ca719e 1536 max_clock = common_len - 1;
a4fc5ed6 1537
bc7d38a4 1538 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1539 pipe_config->has_pch_encoder = true;
1540
03afc4a2 1541 pipe_config->has_dp_encoder = true;
f769cd24 1542 pipe_config->has_drrs = false;
9fcb1704 1543 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1544
dd06f90e
JN
1545 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1546 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1547 adjusted_mode);
a1b2278e
CK
1548
1549 if (INTEL_INFO(dev)->gen >= 9) {
1550 int ret;
e435d6e5 1551 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1552 if (ret)
1553 return ret;
1554 }
1555
b5667627 1556 if (HAS_GMCH_DISPLAY(dev))
2dd24552
JB
1557 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1558 intel_connector->panel.fitting_mode);
1559 else
b074cec8
JB
1560 intel_pch_panel_fitting(intel_crtc, pipe_config,
1561 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1562 }
1563
cb1793ce 1564 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1565 return false;
1566
083f9560 1567 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1568 "max bw %d pixel clock %iKHz\n",
94ca719e 1569 max_lane_count, common_rates[max_clock],
241bfc38 1570 adjusted_mode->crtc_clock);
083f9560 1571
36008365
DV
1572 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1573 * bpc in between. */
3e7ca985 1574 bpp = pipe_config->pipe_bpp;
56071a20 1575 if (is_edp(intel_dp)) {
22ce5628
TS
1576
1577 /* Get bpp from vbt only for panels that dont have bpp in edid */
1578 if (intel_connector->base.display_info.bpc == 0 &&
1579 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1580 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1581 dev_priv->vbt.edp_bpp);
1582 bpp = dev_priv->vbt.edp_bpp;
1583 }
1584
344c5bbc
JN
1585 /*
1586 * Use the maximum clock and number of lanes the eDP panel
1587 * advertizes being capable of. The panels are generally
1588 * designed to support only a single clock and lane
1589 * configuration, and typically these values correspond to the
1590 * native resolution of the panel.
1591 */
1592 min_lane_count = max_lane_count;
1593 min_clock = max_clock;
7984211e 1594 }
657445fe 1595
36008365 1596 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1597 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1598 bpp);
36008365 1599
c6930992 1600 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1601 for (lane_count = min_lane_count;
1602 lane_count <= max_lane_count;
1603 lane_count <<= 1) {
1604
94ca719e 1605 link_clock = common_rates[clock];
36008365
DV
1606 link_avail = intel_dp_max_data_rate(link_clock,
1607 lane_count);
1608
1609 if (mode_rate <= link_avail) {
1610 goto found;
1611 }
1612 }
1613 }
1614 }
c4867936 1615
36008365 1616 return false;
3685a8f3 1617
36008365 1618found:
55bc60db
VS
1619 if (intel_dp->color_range_auto) {
1620 /*
1621 * See:
1622 * CEA-861-E - 5.1 Default Encoding Parameters
1623 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1624 */
0f2a2a75
VS
1625 pipe_config->limited_color_range =
1626 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1627 } else {
1628 pipe_config->limited_color_range =
1629 intel_dp->limited_color_range;
55bc60db
VS
1630 }
1631
90a6b7b0 1632 pipe_config->lane_count = lane_count;
a8f3ef61 1633
657445fe 1634 pipe_config->pipe_bpp = bpp;
94ca719e 1635 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1636
04a60f9f
VS
1637 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1638 &link_bw, &rate_select);
1639
1640 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1641 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1642 pipe_config->port_clock, bpp);
36008365
DV
1643 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1644 mode_rate, link_avail);
a4fc5ed6 1645
03afc4a2 1646 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1647 adjusted_mode->crtc_clock,
1648 pipe_config->port_clock,
03afc4a2 1649 &pipe_config->dp_m_n);
9d1a455b 1650
439d7ac0 1651 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1652 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1653 pipe_config->has_drrs = true;
439d7ac0
PB
1654 intel_link_compute_m_n(bpp, lane_count,
1655 intel_connector->panel.downclock_mode->clock,
1656 pipe_config->port_clock,
1657 &pipe_config->dp_m2_n2);
1658 }
1659
ef11bdb3 1660 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
840b32b7 1661 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1662 else if (IS_BROXTON(dev))
1663 /* handled in ddi */;
5416d871 1664 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1665 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1666 else
840b32b7 1667 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1668
03afc4a2 1669 return true;
a4fc5ed6
KP
1670}
1671
901c2daf
VS
1672void intel_dp_set_link_params(struct intel_dp *intel_dp,
1673 const struct intel_crtc_state *pipe_config)
1674{
1675 intel_dp->link_rate = pipe_config->port_clock;
1676 intel_dp->lane_count = pipe_config->lane_count;
1677}
1678
8ac33ed3 1679static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1680{
b934223d 1681 struct drm_device *dev = encoder->base.dev;
417e822d 1682 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1683 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1684 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1685 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
7c5f93b0 1686 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1687
901c2daf
VS
1688 intel_dp_set_link_params(intel_dp, crtc->config);
1689
417e822d 1690 /*
1a2eb460 1691 * There are four kinds of DP registers:
417e822d
KP
1692 *
1693 * IBX PCH
1a2eb460
KP
1694 * SNB CPU
1695 * IVB CPU
417e822d
KP
1696 * CPT PCH
1697 *
1698 * IBX PCH and CPU are the same for almost everything,
1699 * except that the CPU DP PLL is configured in this
1700 * register
1701 *
1702 * CPT PCH is quite different, having many bits moved
1703 * to the TRANS_DP_CTL register instead. That
1704 * configuration happens (oddly) in ironlake_pch_enable
1705 */
9c9e7927 1706
417e822d
KP
1707 /* Preserve the BIOS-computed detected bit. This is
1708 * supposed to be read-only.
1709 */
1710 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1711
417e822d 1712 /* Handle DP bits in common between all three register formats */
417e822d 1713 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1714 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1715
417e822d 1716 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1717
39e5fa88 1718 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1719 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1720 intel_dp->DP |= DP_SYNC_HS_HIGH;
1721 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1722 intel_dp->DP |= DP_SYNC_VS_HIGH;
1723 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1724
6aba5b6c 1725 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1726 intel_dp->DP |= DP_ENHANCED_FRAMING;
1727
7c62a164 1728 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1729 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1730 u32 trans_dp;
1731
39e5fa88 1732 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1733
1734 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1735 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1736 trans_dp |= TRANS_DP_ENH_FRAMING;
1737 else
1738 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1739 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1740 } else {
0f2a2a75 1741 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
666a4537 1742 !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
0f2a2a75 1743 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1744
1745 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1746 intel_dp->DP |= DP_SYNC_HS_HIGH;
1747 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1748 intel_dp->DP |= DP_SYNC_VS_HIGH;
1749 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1750
6aba5b6c 1751 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1752 intel_dp->DP |= DP_ENHANCED_FRAMING;
1753
39e5fa88 1754 if (IS_CHERRYVIEW(dev))
44f37d1f 1755 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1756 else if (crtc->pipe == PIPE_B)
1757 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1758 }
a4fc5ed6
KP
1759}
1760
ffd6749d
PZ
1761#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1762#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1763
1a5ef5b7
PZ
1764#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1765#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1766
ffd6749d
PZ
1767#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1768#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1769
4be73780 1770static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1771 u32 mask,
1772 u32 value)
bd943159 1773{
30add22d 1774 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1775 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1776 i915_reg_t pp_stat_reg, pp_ctrl_reg;
453c5420 1777
e39b999a
VS
1778 lockdep_assert_held(&dev_priv->pps_mutex);
1779
bf13e81b
JN
1780 pp_stat_reg = _pp_stat_reg(intel_dp);
1781 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1782
99ea7127 1783 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1784 mask, value,
1785 I915_READ(pp_stat_reg),
1786 I915_READ(pp_ctrl_reg));
32ce697c 1787
3f177625
TU
1788 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value,
1789 5 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
99ea7127 1790 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1791 I915_READ(pp_stat_reg),
1792 I915_READ(pp_ctrl_reg));
54c136d4
CW
1793
1794 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1795}
32ce697c 1796
4be73780 1797static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1798{
1799 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1800 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1801}
1802
4be73780 1803static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1804{
1805 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1806 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1807}
1808
4be73780 1809static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127 1810{
d28d4731
AK
1811 ktime_t panel_power_on_time;
1812 s64 panel_power_off_duration;
1813
99ea7127 1814 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c 1815
d28d4731
AK
1816 /* take the difference of currrent time and panel power off time
1817 * and then make panel wait for t11_t12 if needed. */
1818 panel_power_on_time = ktime_get_boottime();
1819 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1820
dce56b3c
PZ
1821 /* When we disable the VDD override bit last we have to do the manual
1822 * wait. */
d28d4731
AK
1823 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1824 wait_remaining_ms_from_jiffies(jiffies,
1825 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
dce56b3c 1826
4be73780 1827 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1828}
1829
4be73780 1830static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1831{
1832 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1833 intel_dp->backlight_on_delay);
1834}
1835
4be73780 1836static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1837{
1838 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1839 intel_dp->backlight_off_delay);
1840}
99ea7127 1841
832dd3c1
KP
1842/* Read the current pp_control value, unlocking the register if it
1843 * is locked
1844 */
1845
453c5420 1846static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1847{
453c5420
JB
1848 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1849 struct drm_i915_private *dev_priv = dev->dev_private;
1850 u32 control;
832dd3c1 1851
e39b999a
VS
1852 lockdep_assert_held(&dev_priv->pps_mutex);
1853
bf13e81b 1854 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1855 if (!IS_BROXTON(dev)) {
1856 control &= ~PANEL_UNLOCK_MASK;
1857 control |= PANEL_UNLOCK_REGS;
1858 }
832dd3c1 1859 return control;
bd943159
KP
1860}
1861
951468f3
VS
1862/*
1863 * Must be paired with edp_panel_vdd_off().
1864 * Must hold pps_mutex around the whole on/off sequence.
1865 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1866 */
1e0560e0 1867static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1868{
30add22d 1869 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1870 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1871 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1872 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1873 enum intel_display_power_domain power_domain;
5d613501 1874 u32 pp;
f0f59a00 1875 i915_reg_t pp_stat_reg, pp_ctrl_reg;
adddaaf4 1876 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1877
e39b999a
VS
1878 lockdep_assert_held(&dev_priv->pps_mutex);
1879
97af61f5 1880 if (!is_edp(intel_dp))
adddaaf4 1881 return false;
bd943159 1882
2c623c11 1883 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1884 intel_dp->want_panel_vdd = true;
99ea7127 1885
4be73780 1886 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1887 return need_to_disable;
b0665d57 1888
25f78f58 1889 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 1890 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1891
3936fcf4
VS
1892 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1893 port_name(intel_dig_port->port));
bd943159 1894
4be73780
DV
1895 if (!edp_have_panel_power(intel_dp))
1896 wait_panel_power_cycle(intel_dp);
99ea7127 1897
453c5420 1898 pp = ironlake_get_pp_control(intel_dp);
5d613501 1899 pp |= EDP_FORCE_VDD;
ebf33b18 1900
bf13e81b
JN
1901 pp_stat_reg = _pp_stat_reg(intel_dp);
1902 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1903
1904 I915_WRITE(pp_ctrl_reg, pp);
1905 POSTING_READ(pp_ctrl_reg);
1906 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1907 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1908 /*
1909 * If the panel wasn't on, delay before accessing aux channel
1910 */
4be73780 1911 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1912 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1913 port_name(intel_dig_port->port));
f01eca2e 1914 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1915 }
adddaaf4
JN
1916
1917 return need_to_disable;
1918}
1919
951468f3
VS
1920/*
1921 * Must be paired with intel_edp_panel_vdd_off() or
1922 * intel_edp_panel_off().
1923 * Nested calls to these functions are not allowed since
1924 * we drop the lock. Caller must use some higher level
1925 * locking to prevent nested calls from other threads.
1926 */
b80d6c78 1927void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1928{
c695b6b6 1929 bool vdd;
adddaaf4 1930
c695b6b6
VS
1931 if (!is_edp(intel_dp))
1932 return;
1933
773538e8 1934 pps_lock(intel_dp);
c695b6b6 1935 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1936 pps_unlock(intel_dp);
c695b6b6 1937
e2c719b7 1938 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1939 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1940}
1941
4be73780 1942static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1943{
30add22d 1944 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1945 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1946 struct intel_digital_port *intel_dig_port =
1947 dp_to_dig_port(intel_dp);
1948 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1949 enum intel_display_power_domain power_domain;
5d613501 1950 u32 pp;
f0f59a00 1951 i915_reg_t pp_stat_reg, pp_ctrl_reg;
5d613501 1952
e39b999a 1953 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1954
15e899a0 1955 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1956
15e899a0 1957 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1958 return;
b0665d57 1959
3936fcf4
VS
1960 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1961 port_name(intel_dig_port->port));
bd943159 1962
be2c9196
VS
1963 pp = ironlake_get_pp_control(intel_dp);
1964 pp &= ~EDP_FORCE_VDD;
453c5420 1965
be2c9196
VS
1966 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1967 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1968
be2c9196
VS
1969 I915_WRITE(pp_ctrl_reg, pp);
1970 POSTING_READ(pp_ctrl_reg);
90791a5c 1971
be2c9196
VS
1972 /* Make sure sequencer is idle before allowing subsequent activity */
1973 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1974 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1975
be2c9196 1976 if ((pp & POWER_TARGET_ON) == 0)
d28d4731 1977 intel_dp->panel_power_off_time = ktime_get_boottime();
e9cb81a2 1978
25f78f58 1979 power_domain = intel_display_port_aux_power_domain(intel_encoder);
be2c9196 1980 intel_display_power_put(dev_priv, power_domain);
bd943159 1981}
5d613501 1982
4be73780 1983static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1984{
1985 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1986 struct intel_dp, panel_vdd_work);
bd943159 1987
773538e8 1988 pps_lock(intel_dp);
15e899a0
VS
1989 if (!intel_dp->want_panel_vdd)
1990 edp_panel_vdd_off_sync(intel_dp);
773538e8 1991 pps_unlock(intel_dp);
bd943159
KP
1992}
1993
aba86890
ID
1994static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1995{
1996 unsigned long delay;
1997
1998 /*
1999 * Queue the timer to fire a long time from now (relative to the power
2000 * down delay) to keep the panel power up across a sequence of
2001 * operations.
2002 */
2003 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2004 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2005}
2006
951468f3
VS
2007/*
2008 * Must be paired with edp_panel_vdd_on().
2009 * Must hold pps_mutex around the whole on/off sequence.
2010 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2011 */
4be73780 2012static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 2013{
e39b999a
VS
2014 struct drm_i915_private *dev_priv =
2015 intel_dp_to_dev(intel_dp)->dev_private;
2016
2017 lockdep_assert_held(&dev_priv->pps_mutex);
2018
97af61f5
KP
2019 if (!is_edp(intel_dp))
2020 return;
5d613501 2021
e2c719b7 2022 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 2023 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 2024
bd943159
KP
2025 intel_dp->want_panel_vdd = false;
2026
aba86890 2027 if (sync)
4be73780 2028 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
2029 else
2030 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
2031}
2032
9f0fb5be 2033static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 2034{
30add22d 2035 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2036 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 2037 u32 pp;
f0f59a00 2038 i915_reg_t pp_ctrl_reg;
9934c132 2039
9f0fb5be
VS
2040 lockdep_assert_held(&dev_priv->pps_mutex);
2041
97af61f5 2042 if (!is_edp(intel_dp))
bd943159 2043 return;
99ea7127 2044
3936fcf4
VS
2045 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2046 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 2047
e7a89ace
VS
2048 if (WARN(edp_have_panel_power(intel_dp),
2049 "eDP port %c panel power already on\n",
2050 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 2051 return;
9934c132 2052
4be73780 2053 wait_panel_power_cycle(intel_dp);
37c6c9b0 2054
bf13e81b 2055 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2056 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
2057 if (IS_GEN5(dev)) {
2058 /* ILK workaround: disable reset around power sequence */
2059 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
2060 I915_WRITE(pp_ctrl_reg, pp);
2061 POSTING_READ(pp_ctrl_reg);
05ce1a49 2062 }
37c6c9b0 2063
1c0ae80a 2064 pp |= POWER_TARGET_ON;
99ea7127
KP
2065 if (!IS_GEN5(dev))
2066 pp |= PANEL_POWER_RESET;
2067
453c5420
JB
2068 I915_WRITE(pp_ctrl_reg, pp);
2069 POSTING_READ(pp_ctrl_reg);
9934c132 2070
4be73780 2071 wait_panel_on(intel_dp);
dce56b3c 2072 intel_dp->last_power_on = jiffies;
9934c132 2073
05ce1a49
KP
2074 if (IS_GEN5(dev)) {
2075 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
2076 I915_WRITE(pp_ctrl_reg, pp);
2077 POSTING_READ(pp_ctrl_reg);
05ce1a49 2078 }
9f0fb5be 2079}
e39b999a 2080
9f0fb5be
VS
2081void intel_edp_panel_on(struct intel_dp *intel_dp)
2082{
2083 if (!is_edp(intel_dp))
2084 return;
2085
2086 pps_lock(intel_dp);
2087 edp_panel_on(intel_dp);
773538e8 2088 pps_unlock(intel_dp);
9934c132
JB
2089}
2090
9f0fb5be
VS
2091
2092static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2093{
4e6e1a54
ID
2094 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2095 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 2096 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2097 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 2098 enum intel_display_power_domain power_domain;
99ea7127 2099 u32 pp;
f0f59a00 2100 i915_reg_t pp_ctrl_reg;
9934c132 2101
9f0fb5be
VS
2102 lockdep_assert_held(&dev_priv->pps_mutex);
2103
97af61f5
KP
2104 if (!is_edp(intel_dp))
2105 return;
37c6c9b0 2106
3936fcf4
VS
2107 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2108 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2109
3936fcf4
VS
2110 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2111 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2112
453c5420 2113 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2114 /* We need to switch off panel power _and_ force vdd, for otherwise some
2115 * panels get very unhappy and cease to work. */
b3064154
PJ
2116 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2117 EDP_BLC_ENABLE);
453c5420 2118
bf13e81b 2119 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2120
849e39f5
PZ
2121 intel_dp->want_panel_vdd = false;
2122
453c5420
JB
2123 I915_WRITE(pp_ctrl_reg, pp);
2124 POSTING_READ(pp_ctrl_reg);
9934c132 2125
d28d4731 2126 intel_dp->panel_power_off_time = ktime_get_boottime();
4be73780 2127 wait_panel_off(intel_dp);
849e39f5
PZ
2128
2129 /* We got a reference when we enabled the VDD. */
25f78f58 2130 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 2131 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2132}
e39b999a 2133
9f0fb5be
VS
2134void intel_edp_panel_off(struct intel_dp *intel_dp)
2135{
2136 if (!is_edp(intel_dp))
2137 return;
e39b999a 2138
9f0fb5be
VS
2139 pps_lock(intel_dp);
2140 edp_panel_off(intel_dp);
773538e8 2141 pps_unlock(intel_dp);
9934c132
JB
2142}
2143
1250d107
JN
2144/* Enable backlight in the panel power control. */
2145static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2146{
da63a9f2
PZ
2147 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2148 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2149 struct drm_i915_private *dev_priv = dev->dev_private;
2150 u32 pp;
f0f59a00 2151 i915_reg_t pp_ctrl_reg;
32f9d658 2152
01cb9ea6
JB
2153 /*
2154 * If we enable the backlight right away following a panel power
2155 * on, we may see slight flicker as the panel syncs with the eDP
2156 * link. So delay a bit to make sure the image is solid before
2157 * allowing it to appear.
2158 */
4be73780 2159 wait_backlight_on(intel_dp);
e39b999a 2160
773538e8 2161 pps_lock(intel_dp);
e39b999a 2162
453c5420 2163 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2164 pp |= EDP_BLC_ENABLE;
453c5420 2165
bf13e81b 2166 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2167
2168 I915_WRITE(pp_ctrl_reg, pp);
2169 POSTING_READ(pp_ctrl_reg);
e39b999a 2170
773538e8 2171 pps_unlock(intel_dp);
32f9d658
ZW
2172}
2173
1250d107
JN
2174/* Enable backlight PWM and backlight PP control. */
2175void intel_edp_backlight_on(struct intel_dp *intel_dp)
2176{
2177 if (!is_edp(intel_dp))
2178 return;
2179
2180 DRM_DEBUG_KMS("\n");
2181
2182 intel_panel_enable_backlight(intel_dp->attached_connector);
2183 _intel_edp_backlight_on(intel_dp);
2184}
2185
2186/* Disable backlight in the panel power control. */
2187static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2188{
30add22d 2189 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2190 struct drm_i915_private *dev_priv = dev->dev_private;
2191 u32 pp;
f0f59a00 2192 i915_reg_t pp_ctrl_reg;
32f9d658 2193
f01eca2e
KP
2194 if (!is_edp(intel_dp))
2195 return;
2196
773538e8 2197 pps_lock(intel_dp);
e39b999a 2198
453c5420 2199 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2200 pp &= ~EDP_BLC_ENABLE;
453c5420 2201
bf13e81b 2202 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2203
2204 I915_WRITE(pp_ctrl_reg, pp);
2205 POSTING_READ(pp_ctrl_reg);
f7d2323c 2206
773538e8 2207 pps_unlock(intel_dp);
e39b999a
VS
2208
2209 intel_dp->last_backlight_off = jiffies;
f7d2323c 2210 edp_wait_backlight_off(intel_dp);
1250d107 2211}
f7d2323c 2212
1250d107
JN
2213/* Disable backlight PP control and backlight PWM. */
2214void intel_edp_backlight_off(struct intel_dp *intel_dp)
2215{
2216 if (!is_edp(intel_dp))
2217 return;
2218
2219 DRM_DEBUG_KMS("\n");
f7d2323c 2220
1250d107 2221 _intel_edp_backlight_off(intel_dp);
f7d2323c 2222 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2223}
a4fc5ed6 2224
73580fb7
JN
2225/*
2226 * Hook for controlling the panel power control backlight through the bl_power
2227 * sysfs attribute. Take care to handle multiple calls.
2228 */
2229static void intel_edp_backlight_power(struct intel_connector *connector,
2230 bool enable)
2231{
2232 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2233 bool is_enabled;
2234
773538e8 2235 pps_lock(intel_dp);
e39b999a 2236 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2237 pps_unlock(intel_dp);
73580fb7
JN
2238
2239 if (is_enabled == enable)
2240 return;
2241
23ba9373
JN
2242 DRM_DEBUG_KMS("panel power control backlight %s\n",
2243 enable ? "enable" : "disable");
73580fb7
JN
2244
2245 if (enable)
2246 _intel_edp_backlight_on(intel_dp);
2247 else
2248 _intel_edp_backlight_off(intel_dp);
2249}
2250
64e1077a
VS
2251static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2252{
2253 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2254 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2255 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2256
2257 I915_STATE_WARN(cur_state != state,
2258 "DP port %c state assertion failure (expected %s, current %s)\n",
2259 port_name(dig_port->port),
87ad3212 2260 onoff(state), onoff(cur_state));
64e1077a
VS
2261}
2262#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2263
2264static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2265{
2266 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2267
2268 I915_STATE_WARN(cur_state != state,
2269 "eDP PLL state assertion failure (expected %s, current %s)\n",
87ad3212 2270 onoff(state), onoff(cur_state));
64e1077a
VS
2271}
2272#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2273#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2274
2bd2ad64 2275static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2276{
da63a9f2 2277 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2278 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2279 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2280
64e1077a
VS
2281 assert_pipe_disabled(dev_priv, crtc->pipe);
2282 assert_dp_port_disabled(intel_dp);
2283 assert_edp_pll_disabled(dev_priv);
2bd2ad64 2284
abfce949
VS
2285 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2286 crtc->config->port_clock);
2287
2288 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2289
2290 if (crtc->config->port_clock == 162000)
2291 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2292 else
2293 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2294
2295 I915_WRITE(DP_A, intel_dp->DP);
2296 POSTING_READ(DP_A);
2297 udelay(500);
2298
0767935e 2299 intel_dp->DP |= DP_PLL_ENABLE;
6fec7662 2300
0767935e 2301 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2302 POSTING_READ(DP_A);
2303 udelay(200);
d240f20f
JB
2304}
2305
2bd2ad64 2306static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2307{
da63a9f2 2308 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2309 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2310 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2311
64e1077a
VS
2312 assert_pipe_disabled(dev_priv, crtc->pipe);
2313 assert_dp_port_disabled(intel_dp);
2314 assert_edp_pll_enabled(dev_priv);
2bd2ad64 2315
abfce949
VS
2316 DRM_DEBUG_KMS("disabling eDP PLL\n");
2317
6fec7662 2318 intel_dp->DP &= ~DP_PLL_ENABLE;
0767935e 2319
6fec7662 2320 I915_WRITE(DP_A, intel_dp->DP);
1af5fa1b 2321 POSTING_READ(DP_A);
d240f20f
JB
2322 udelay(200);
2323}
2324
c7ad3810 2325/* If the sink supports it, try to set the power state appropriately */
c19b0669 2326void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2327{
2328 int ret, i;
2329
2330 /* Should have a valid DPCD by this point */
2331 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2332 return;
2333
2334 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2335 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2336 DP_SET_POWER_D3);
c7ad3810
JB
2337 } else {
2338 /*
2339 * When turning on, we need to retry for 1ms to give the sink
2340 * time to wake up.
2341 */
2342 for (i = 0; i < 3; i++) {
9d1a1031
JN
2343 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2344 DP_SET_POWER_D0);
c7ad3810
JB
2345 if (ret == 1)
2346 break;
2347 msleep(1);
2348 }
2349 }
f9cac721
JN
2350
2351 if (ret != 1)
2352 DRM_DEBUG_KMS("failed to %s sink power state\n",
2353 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2354}
2355
19d8fe15
DV
2356static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2357 enum pipe *pipe)
d240f20f 2358{
19d8fe15 2359 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2360 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2361 struct drm_device *dev = encoder->base.dev;
2362 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2363 enum intel_display_power_domain power_domain;
2364 u32 tmp;
6fa9a5ec 2365 bool ret;
6d129bea
ID
2366
2367 power_domain = intel_display_port_power_domain(encoder);
6fa9a5ec 2368 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
6d129bea
ID
2369 return false;
2370
6fa9a5ec
ID
2371 ret = false;
2372
6d129bea 2373 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2374
2375 if (!(tmp & DP_PORT_EN))
6fa9a5ec 2376 goto out;
19d8fe15 2377
39e5fa88 2378 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2379 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2380 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2381 enum pipe p;
19d8fe15 2382
adc289d7
VS
2383 for_each_pipe(dev_priv, p) {
2384 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2385 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2386 *pipe = p;
6fa9a5ec
ID
2387 ret = true;
2388
2389 goto out;
19d8fe15
DV
2390 }
2391 }
19d8fe15 2392
4a0833ec 2393 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
f0f59a00 2394 i915_mmio_reg_offset(intel_dp->output_reg));
39e5fa88
VS
2395 } else if (IS_CHERRYVIEW(dev)) {
2396 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2397 } else {
2398 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2399 }
d240f20f 2400
6fa9a5ec
ID
2401 ret = true;
2402
2403out:
2404 intel_display_power_put(dev_priv, power_domain);
2405
2406 return ret;
19d8fe15 2407}
d240f20f 2408
045ac3b5 2409static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2410 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2411{
2412 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2413 u32 tmp, flags = 0;
63000ef6
XZ
2414 struct drm_device *dev = encoder->base.dev;
2415 struct drm_i915_private *dev_priv = dev->dev_private;
2416 enum port port = dp_to_dig_port(intel_dp)->port;
2417 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
045ac3b5 2418
9ed109a7 2419 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2420
2421 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2422
39e5fa88 2423 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2424 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2425
2426 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2427 flags |= DRM_MODE_FLAG_PHSYNC;
2428 else
2429 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2430
b81e34c2 2431 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2432 flags |= DRM_MODE_FLAG_PVSYNC;
2433 else
2434 flags |= DRM_MODE_FLAG_NVSYNC;
2435 } else {
39e5fa88 2436 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2437 flags |= DRM_MODE_FLAG_PHSYNC;
2438 else
2439 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2440
39e5fa88 2441 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2442 flags |= DRM_MODE_FLAG_PVSYNC;
2443 else
2444 flags |= DRM_MODE_FLAG_NVSYNC;
2445 }
045ac3b5 2446
2d112de7 2447 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2448
8c875fca 2449 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
666a4537 2450 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
8c875fca
VS
2451 pipe_config->limited_color_range = true;
2452
eb14cb74
VS
2453 pipe_config->has_dp_encoder = true;
2454
90a6b7b0
VS
2455 pipe_config->lane_count =
2456 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2457
eb14cb74
VS
2458 intel_dp_get_m_n(crtc, pipe_config);
2459
18442d08 2460 if (port == PORT_A) {
b377e0df 2461 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
f1f644dc
JB
2462 pipe_config->port_clock = 162000;
2463 else
2464 pipe_config->port_clock = 270000;
2465 }
18442d08 2466
e3b247da
VS
2467 pipe_config->base.adjusted_mode.crtc_clock =
2468 intel_dotclock_calculate(pipe_config->port_clock,
2469 &pipe_config->dp_m_n);
7f16e5c1 2470
c6cd2ee2
JN
2471 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2472 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2473 /*
2474 * This is a big fat ugly hack.
2475 *
2476 * Some machines in UEFI boot mode provide us a VBT that has 18
2477 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2478 * unknown we fail to light up. Yet the same BIOS boots up with
2479 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2480 * max, not what it tells us to use.
2481 *
2482 * Note: This will still be broken if the eDP panel is not lit
2483 * up by the BIOS, and thus we can't get the mode at module
2484 * load.
2485 */
2486 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2487 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2488 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2489 }
045ac3b5
JB
2490}
2491
e8cb4558 2492static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2493{
e8cb4558 2494 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2495 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2496 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2497
6e3c9717 2498 if (crtc->config->has_audio)
495a5bb8 2499 intel_audio_codec_disable(encoder);
6cb49835 2500
b32c6f48
RV
2501 if (HAS_PSR(dev) && !HAS_DDI(dev))
2502 intel_psr_disable(intel_dp);
2503
6cb49835
DV
2504 /* Make sure the panel is off before trying to change the mode. But also
2505 * ensure that we have vdd while we switch off the panel. */
24f3e092 2506 intel_edp_panel_vdd_on(intel_dp);
4be73780 2507 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2508 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2509 intel_edp_panel_off(intel_dp);
3739850b 2510
08aff3fe
VS
2511 /* disable the port before the pipe on g4x */
2512 if (INTEL_INFO(dev)->gen < 5)
3739850b 2513 intel_dp_link_down(intel_dp);
d240f20f
JB
2514}
2515
08aff3fe 2516static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2517{
2bd2ad64 2518 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2519 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2520
49277c31 2521 intel_dp_link_down(intel_dp);
abfce949
VS
2522
2523 /* Only ilk+ has port A */
08aff3fe
VS
2524 if (port == PORT_A)
2525 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2526}
2527
2528static void vlv_post_disable_dp(struct intel_encoder *encoder)
2529{
2530 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2531
2532 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2533}
2534
a8f327fb
VS
2535static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2536 bool reset)
580d3811 2537{
a8f327fb
VS
2538 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2539 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2540 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2541 enum pipe pipe = crtc->pipe;
2542 uint32_t val;
580d3811 2543
a8f327fb
VS
2544 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2545 if (reset)
2546 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2547 else
2548 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2549 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
580d3811 2550
a8f327fb
VS
2551 if (crtc->config->lane_count > 2) {
2552 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2553 if (reset)
2554 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2555 else
2556 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2557 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2558 }
580d3811 2559
97fd4d5c 2560 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2561 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2562 if (reset)
2563 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2564 else
2565 val |= DPIO_PCS_CLK_SOFT_RESET;
97fd4d5c 2566 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2567
a8f327fb 2568 if (crtc->config->lane_count > 2) {
e0fce78f
VS
2569 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2570 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2571 if (reset)
2572 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2573 else
2574 val |= DPIO_PCS_CLK_SOFT_RESET;
e0fce78f
VS
2575 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2576 }
a8f327fb 2577}
97fd4d5c 2578
a8f327fb
VS
2579static void chv_post_disable_dp(struct intel_encoder *encoder)
2580{
2581 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2582 struct drm_device *dev = encoder->base.dev;
2583 struct drm_i915_private *dev_priv = dev->dev_private;
97fd4d5c 2584
a8f327fb
VS
2585 intel_dp_link_down(intel_dp);
2586
2587 mutex_lock(&dev_priv->sb_lock);
2588
2589 /* Assert data lane reset */
2590 chv_data_lane_soft_reset(encoder, true);
580d3811 2591
a580516d 2592 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2593}
2594
7b13b58a
VS
2595static void
2596_intel_dp_set_link_train(struct intel_dp *intel_dp,
2597 uint32_t *DP,
2598 uint8_t dp_train_pat)
2599{
2600 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2601 struct drm_device *dev = intel_dig_port->base.base.dev;
2602 struct drm_i915_private *dev_priv = dev->dev_private;
2603 enum port port = intel_dig_port->port;
2604
2605 if (HAS_DDI(dev)) {
2606 uint32_t temp = I915_READ(DP_TP_CTL(port));
2607
2608 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2609 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2610 else
2611 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2612
2613 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2614 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2615 case DP_TRAINING_PATTERN_DISABLE:
2616 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2617
2618 break;
2619 case DP_TRAINING_PATTERN_1:
2620 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2621 break;
2622 case DP_TRAINING_PATTERN_2:
2623 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2624 break;
2625 case DP_TRAINING_PATTERN_3:
2626 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2627 break;
2628 }
2629 I915_WRITE(DP_TP_CTL(port), temp);
2630
39e5fa88
VS
2631 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2632 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2633 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2634
2635 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2636 case DP_TRAINING_PATTERN_DISABLE:
2637 *DP |= DP_LINK_TRAIN_OFF_CPT;
2638 break;
2639 case DP_TRAINING_PATTERN_1:
2640 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2641 break;
2642 case DP_TRAINING_PATTERN_2:
2643 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2644 break;
2645 case DP_TRAINING_PATTERN_3:
2646 DRM_ERROR("DP training pattern 3 not supported\n");
2647 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2648 break;
2649 }
2650
2651 } else {
2652 if (IS_CHERRYVIEW(dev))
2653 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2654 else
2655 *DP &= ~DP_LINK_TRAIN_MASK;
2656
2657 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2658 case DP_TRAINING_PATTERN_DISABLE:
2659 *DP |= DP_LINK_TRAIN_OFF;
2660 break;
2661 case DP_TRAINING_PATTERN_1:
2662 *DP |= DP_LINK_TRAIN_PAT_1;
2663 break;
2664 case DP_TRAINING_PATTERN_2:
2665 *DP |= DP_LINK_TRAIN_PAT_2;
2666 break;
2667 case DP_TRAINING_PATTERN_3:
2668 if (IS_CHERRYVIEW(dev)) {
2669 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2670 } else {
2671 DRM_ERROR("DP training pattern 3 not supported\n");
2672 *DP |= DP_LINK_TRAIN_PAT_2;
2673 }
2674 break;
2675 }
2676 }
2677}
2678
2679static void intel_dp_enable_port(struct intel_dp *intel_dp)
2680{
2681 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2682 struct drm_i915_private *dev_priv = dev->dev_private;
6fec7662
VS
2683 struct intel_crtc *crtc =
2684 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
7b13b58a 2685
7b13b58a
VS
2686 /* enable with pattern 1 (as per spec) */
2687 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2688 DP_TRAINING_PATTERN_1);
2689
2690 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2691 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2692
2693 /*
2694 * Magic for VLV/CHV. We _must_ first set up the register
2695 * without actually enabling the port, and then do another
2696 * write to enable the port. Otherwise link training will
2697 * fail when the power sequencer is freshly used for this port.
2698 */
2699 intel_dp->DP |= DP_PORT_EN;
6fec7662
VS
2700 if (crtc->config->has_audio)
2701 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
7b713f50
VS
2702
2703 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2704 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2705}
2706
e8cb4558 2707static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2708{
e8cb4558
DV
2709 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2710 struct drm_device *dev = encoder->base.dev;
2711 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2712 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2713 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
d6fbdd15
VS
2714 enum port port = dp_to_dig_port(intel_dp)->port;
2715 enum pipe pipe = crtc->pipe;
5d613501 2716
0c33d8d7
DV
2717 if (WARN_ON(dp_reg & DP_PORT_EN))
2718 return;
5d613501 2719
093e3f13
VS
2720 pps_lock(intel_dp);
2721
666a4537 2722 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
093e3f13
VS
2723 vlv_init_panel_power_sequencer(intel_dp);
2724
7864578a
VS
2725 /*
2726 * We get an occasional spurious underrun between the port
2727 * enable and vdd enable, when enabling port A eDP.
2728 *
2729 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2730 */
2731 if (port == PORT_A)
2732 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2733
7b13b58a 2734 intel_dp_enable_port(intel_dp);
093e3f13 2735
d6fbdd15
VS
2736 if (port == PORT_A && IS_GEN5(dev_priv)) {
2737 /*
2738 * Underrun reporting for the other pipe was disabled in
2739 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2740 * enabled, so it's now safe to re-enable underrun reporting.
2741 */
2742 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2743 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2744 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2745 }
2746
093e3f13
VS
2747 edp_panel_vdd_on(intel_dp);
2748 edp_panel_on(intel_dp);
2749 edp_panel_vdd_off(intel_dp, true);
2750
7864578a
VS
2751 if (port == PORT_A)
2752 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2753
093e3f13
VS
2754 pps_unlock(intel_dp);
2755
666a4537 2756 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e0fce78f
VS
2757 unsigned int lane_mask = 0x0;
2758
2759 if (IS_CHERRYVIEW(dev))
2760 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2761
9b6de0a1
VS
2762 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2763 lane_mask);
e0fce78f 2764 }
61234fa5 2765
f01eca2e 2766 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2767 intel_dp_start_link_train(intel_dp);
3ab9c637 2768 intel_dp_stop_link_train(intel_dp);
c1dec79a 2769
6e3c9717 2770 if (crtc->config->has_audio) {
c1dec79a 2771 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
d6fbdd15 2772 pipe_name(pipe));
c1dec79a
JN
2773 intel_audio_codec_enable(encoder);
2774 }
ab1f90f9 2775}
89b667f8 2776
ecff4f3b
JN
2777static void g4x_enable_dp(struct intel_encoder *encoder)
2778{
828f5c6e
JN
2779 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2780
ecff4f3b 2781 intel_enable_dp(encoder);
4be73780 2782 intel_edp_backlight_on(intel_dp);
ab1f90f9 2783}
89b667f8 2784
ab1f90f9
JN
2785static void vlv_enable_dp(struct intel_encoder *encoder)
2786{
828f5c6e
JN
2787 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2788
4be73780 2789 intel_edp_backlight_on(intel_dp);
b32c6f48 2790 intel_psr_enable(intel_dp);
d240f20f
JB
2791}
2792
ecff4f3b 2793static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9 2794{
d6fbdd15 2795 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
ab1f90f9 2796 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
d6fbdd15
VS
2797 enum port port = dp_to_dig_port(intel_dp)->port;
2798 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
ab1f90f9 2799
8ac33ed3
DV
2800 intel_dp_prepare(encoder);
2801
d6fbdd15
VS
2802 if (port == PORT_A && IS_GEN5(dev_priv)) {
2803 /*
2804 * We get FIFO underruns on the other pipe when
2805 * enabling the CPU eDP PLL, and when enabling CPU
2806 * eDP port. We could potentially avoid the PLL
2807 * underrun with a vblank wait just prior to enabling
2808 * the PLL, but that doesn't appear to help the port
2809 * enable case. Just sweep it all under the rug.
2810 */
2811 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2812 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2813 }
2814
d41f1efb 2815 /* Only ilk+ has port A */
abfce949 2816 if (port == PORT_A)
ab1f90f9
JN
2817 ironlake_edp_pll_on(intel_dp);
2818}
2819
83b84597
VS
2820static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2821{
2822 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2823 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2824 enum pipe pipe = intel_dp->pps_pipe;
f0f59a00 2825 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
83b84597
VS
2826
2827 edp_panel_vdd_off_sync(intel_dp);
2828
2829 /*
2830 * VLV seems to get confused when multiple power seqeuencers
2831 * have the same port selected (even if only one has power/vdd
2832 * enabled). The failure manifests as vlv_wait_port_ready() failing
2833 * CHV on the other hand doesn't seem to mind having the same port
2834 * selected in multiple power seqeuencers, but let's clear the
2835 * port select always when logically disconnecting a power sequencer
2836 * from a port.
2837 */
2838 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2839 pipe_name(pipe), port_name(intel_dig_port->port));
2840 I915_WRITE(pp_on_reg, 0);
2841 POSTING_READ(pp_on_reg);
2842
2843 intel_dp->pps_pipe = INVALID_PIPE;
2844}
2845
a4a5d2f8
VS
2846static void vlv_steal_power_sequencer(struct drm_device *dev,
2847 enum pipe pipe)
2848{
2849 struct drm_i915_private *dev_priv = dev->dev_private;
2850 struct intel_encoder *encoder;
2851
2852 lockdep_assert_held(&dev_priv->pps_mutex);
2853
ac3c12e4
VS
2854 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2855 return;
2856
19c8054c 2857 for_each_intel_encoder(dev, encoder) {
a4a5d2f8 2858 struct intel_dp *intel_dp;
773538e8 2859 enum port port;
a4a5d2f8
VS
2860
2861 if (encoder->type != INTEL_OUTPUT_EDP)
2862 continue;
2863
2864 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2865 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2866
2867 if (intel_dp->pps_pipe != pipe)
2868 continue;
2869
2870 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2871 pipe_name(pipe), port_name(port));
a4a5d2f8 2872
e02f9a06 2873 WARN(encoder->base.crtc,
034e43c6
VS
2874 "stealing pipe %c power sequencer from active eDP port %c\n",
2875 pipe_name(pipe), port_name(port));
a4a5d2f8 2876
a4a5d2f8 2877 /* make sure vdd is off before we steal it */
83b84597 2878 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2879 }
2880}
2881
2882static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2883{
2884 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2885 struct intel_encoder *encoder = &intel_dig_port->base;
2886 struct drm_device *dev = encoder->base.dev;
2887 struct drm_i915_private *dev_priv = dev->dev_private;
2888 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2889
2890 lockdep_assert_held(&dev_priv->pps_mutex);
2891
093e3f13
VS
2892 if (!is_edp(intel_dp))
2893 return;
2894
a4a5d2f8
VS
2895 if (intel_dp->pps_pipe == crtc->pipe)
2896 return;
2897
2898 /*
2899 * If another power sequencer was being used on this
2900 * port previously make sure to turn off vdd there while
2901 * we still have control of it.
2902 */
2903 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2904 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2905
2906 /*
2907 * We may be stealing the power
2908 * sequencer from another port.
2909 */
2910 vlv_steal_power_sequencer(dev, crtc->pipe);
2911
2912 /* now it's all ours */
2913 intel_dp->pps_pipe = crtc->pipe;
2914
2915 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2916 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2917
2918 /* init power sequencer on this pipe and port */
36b5f425
VS
2919 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2920 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2921}
2922
ab1f90f9 2923static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2924{
2bd2ad64 2925 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2926 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2927 struct drm_device *dev = encoder->base.dev;
89b667f8 2928 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2929 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2930 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2931 int pipe = intel_crtc->pipe;
2932 u32 val;
a4fc5ed6 2933
a580516d 2934 mutex_lock(&dev_priv->sb_lock);
89b667f8 2935
ab3c759a 2936 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2937 val = 0;
2938 if (pipe)
2939 val |= (1<<21);
2940 else
2941 val &= ~(1<<21);
2942 val |= 0x001000c4;
ab3c759a
CML
2943 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2944 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2945 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2946
a580516d 2947 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2948
2949 intel_enable_dp(encoder);
89b667f8
JB
2950}
2951
ecff4f3b 2952static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2953{
2954 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2955 struct drm_device *dev = encoder->base.dev;
2956 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2957 struct intel_crtc *intel_crtc =
2958 to_intel_crtc(encoder->base.crtc);
e4607fcf 2959 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2960 int pipe = intel_crtc->pipe;
89b667f8 2961
8ac33ed3
DV
2962 intel_dp_prepare(encoder);
2963
89b667f8 2964 /* Program Tx lane resets to default */
a580516d 2965 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2966 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2967 DPIO_PCS_TX_LANE2_RESET |
2968 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2969 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2970 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2971 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2972 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2973 DPIO_PCS_CLK_SOFT_RESET);
2974
2975 /* Fix up inter-pair skew failure */
ab3c759a
CML
2976 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2977 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2978 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2979 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2980}
2981
e4a1d846
CML
2982static void chv_pre_enable_dp(struct intel_encoder *encoder)
2983{
2984 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2985 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2986 struct drm_device *dev = encoder->base.dev;
2987 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2988 struct intel_crtc *intel_crtc =
2989 to_intel_crtc(encoder->base.crtc);
2990 enum dpio_channel ch = vlv_dport_to_channel(dport);
2991 int pipe = intel_crtc->pipe;
2e523e98 2992 int data, i, stagger;
949c1d43 2993 u32 val;
e4a1d846 2994
a580516d 2995 mutex_lock(&dev_priv->sb_lock);
949c1d43 2996
570e2a74
VS
2997 /* allow hardware to manage TX FIFO reset source */
2998 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2999 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3000 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3001
e0fce78f
VS
3002 if (intel_crtc->config->lane_count > 2) {
3003 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3004 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3005 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3006 }
570e2a74 3007
949c1d43 3008 /* Program Tx lane latency optimal setting*/
e0fce78f 3009 for (i = 0; i < intel_crtc->config->lane_count; i++) {
e4a1d846 3010 /* Set the upar bit */
e0fce78f
VS
3011 if (intel_crtc->config->lane_count == 1)
3012 data = 0x0;
3013 else
3014 data = (i == 1) ? 0x0 : 0x1;
e4a1d846
CML
3015 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3016 data << DPIO_UPAR_SHIFT);
3017 }
3018
3019 /* Data lane stagger programming */
2e523e98
VS
3020 if (intel_crtc->config->port_clock > 270000)
3021 stagger = 0x18;
3022 else if (intel_crtc->config->port_clock > 135000)
3023 stagger = 0xd;
3024 else if (intel_crtc->config->port_clock > 67500)
3025 stagger = 0x7;
3026 else if (intel_crtc->config->port_clock > 33750)
3027 stagger = 0x4;
3028 else
3029 stagger = 0x2;
3030
3031 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3032 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3033 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3034
e0fce78f
VS
3035 if (intel_crtc->config->lane_count > 2) {
3036 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3037 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3038 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3039 }
2e523e98
VS
3040
3041 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3042 DPIO_LANESTAGGER_STRAP(stagger) |
3043 DPIO_LANESTAGGER_STRAP_OVRD |
3044 DPIO_TX1_STAGGER_MASK(0x1f) |
3045 DPIO_TX1_STAGGER_MULT(6) |
3046 DPIO_TX2_STAGGER_MULT(0));
3047
e0fce78f
VS
3048 if (intel_crtc->config->lane_count > 2) {
3049 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3050 DPIO_LANESTAGGER_STRAP(stagger) |
3051 DPIO_LANESTAGGER_STRAP_OVRD |
3052 DPIO_TX1_STAGGER_MASK(0x1f) |
3053 DPIO_TX1_STAGGER_MULT(7) |
3054 DPIO_TX2_STAGGER_MULT(5));
3055 }
e4a1d846 3056
a8f327fb
VS
3057 /* Deassert data lane reset */
3058 chv_data_lane_soft_reset(encoder, false);
3059
a580516d 3060 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 3061
e4a1d846 3062 intel_enable_dp(encoder);
b0b33846
VS
3063
3064 /* Second common lane will stay alive on its own now */
3065 if (dport->release_cl2_override) {
3066 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3067 dport->release_cl2_override = false;
3068 }
e4a1d846
CML
3069}
3070
9197c88b
VS
3071static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3072{
3073 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3074 struct drm_device *dev = encoder->base.dev;
3075 struct drm_i915_private *dev_priv = dev->dev_private;
3076 struct intel_crtc *intel_crtc =
3077 to_intel_crtc(encoder->base.crtc);
3078 enum dpio_channel ch = vlv_dport_to_channel(dport);
3079 enum pipe pipe = intel_crtc->pipe;
e0fce78f
VS
3080 unsigned int lane_mask =
3081 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
9197c88b
VS
3082 u32 val;
3083
625695f8
VS
3084 intel_dp_prepare(encoder);
3085
b0b33846
VS
3086 /*
3087 * Must trick the second common lane into life.
3088 * Otherwise we can't even access the PLL.
3089 */
3090 if (ch == DPIO_CH0 && pipe == PIPE_B)
3091 dport->release_cl2_override =
3092 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3093
e0fce78f
VS
3094 chv_phy_powergate_lanes(encoder, true, lane_mask);
3095
a580516d 3096 mutex_lock(&dev_priv->sb_lock);
9197c88b 3097
a8f327fb
VS
3098 /* Assert data lane reset */
3099 chv_data_lane_soft_reset(encoder, true);
3100
b9e5ac3c
VS
3101 /* program left/right clock distribution */
3102 if (pipe != PIPE_B) {
3103 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3104 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3105 if (ch == DPIO_CH0)
3106 val |= CHV_BUFLEFTENA1_FORCE;
3107 if (ch == DPIO_CH1)
3108 val |= CHV_BUFRIGHTENA1_FORCE;
3109 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3110 } else {
3111 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3112 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3113 if (ch == DPIO_CH0)
3114 val |= CHV_BUFLEFTENA2_FORCE;
3115 if (ch == DPIO_CH1)
3116 val |= CHV_BUFRIGHTENA2_FORCE;
3117 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3118 }
3119
9197c88b
VS
3120 /* program clock channel usage */
3121 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3122 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3123 if (pipe != PIPE_B)
3124 val &= ~CHV_PCS_USEDCLKCHANNEL;
3125 else
3126 val |= CHV_PCS_USEDCLKCHANNEL;
3127 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3128
e0fce78f
VS
3129 if (intel_crtc->config->lane_count > 2) {
3130 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3131 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3132 if (pipe != PIPE_B)
3133 val &= ~CHV_PCS_USEDCLKCHANNEL;
3134 else
3135 val |= CHV_PCS_USEDCLKCHANNEL;
3136 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3137 }
9197c88b
VS
3138
3139 /*
3140 * This a a bit weird since generally CL
3141 * matches the pipe, but here we need to
3142 * pick the CL based on the port.
3143 */
3144 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3145 if (pipe != PIPE_B)
3146 val &= ~CHV_CMN_USEDCLKCHANNEL;
3147 else
3148 val |= CHV_CMN_USEDCLKCHANNEL;
3149 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3150
a580516d 3151 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
3152}
3153
d6db995f
VS
3154static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3155{
3156 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3157 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3158 u32 val;
3159
3160 mutex_lock(&dev_priv->sb_lock);
3161
3162 /* disable left/right clock distribution */
3163 if (pipe != PIPE_B) {
3164 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3165 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3166 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3167 } else {
3168 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3169 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3170 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3171 }
3172
3173 mutex_unlock(&dev_priv->sb_lock);
e0fce78f 3174
b0b33846
VS
3175 /*
3176 * Leave the power down bit cleared for at least one
3177 * lane so that chv_powergate_phy_ch() will power
3178 * on something when the channel is otherwise unused.
3179 * When the port is off and the override is removed
3180 * the lanes power down anyway, so otherwise it doesn't
3181 * really matter what the state of power down bits is
3182 * after this.
3183 */
e0fce78f 3184 chv_phy_powergate_lanes(encoder, false, 0x0);
d6db995f
VS
3185}
3186
a4fc5ed6 3187/*
df0c237d
JB
3188 * Native read with retry for link status and receiver capability reads for
3189 * cases where the sink may still be asleep.
9d1a1031
JN
3190 *
3191 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3192 * supposed to retry 3 times per the spec.
a4fc5ed6 3193 */
9d1a1031
JN
3194static ssize_t
3195intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3196 void *buffer, size_t size)
a4fc5ed6 3197{
9d1a1031
JN
3198 ssize_t ret;
3199 int i;
61da5fab 3200
f6a19066
VS
3201 /*
3202 * Sometime we just get the same incorrect byte repeated
3203 * over the entire buffer. Doing just one throw away read
3204 * initially seems to "solve" it.
3205 */
3206 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3207
61da5fab 3208 for (i = 0; i < 3; i++) {
9d1a1031
JN
3209 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3210 if (ret == size)
3211 return ret;
61da5fab
JB
3212 msleep(1);
3213 }
a4fc5ed6 3214
9d1a1031 3215 return ret;
a4fc5ed6
KP
3216}
3217
3218/*
3219 * Fetch AUX CH registers 0x202 - 0x207 which contain
3220 * link status information
3221 */
94223d04 3222bool
93f62dad 3223intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3224{
9d1a1031
JN
3225 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3226 DP_LANE0_1_STATUS,
3227 link_status,
3228 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3229}
3230
1100244e 3231/* These are source-specific values. */
94223d04 3232uint8_t
1a2eb460 3233intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3234{
30add22d 3235 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 3236 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 3237 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3238
9314726b
VK
3239 if (IS_BROXTON(dev))
3240 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3241 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3242 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3243 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3244 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
666a4537 3245 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
bd60018a 3246 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3247 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3248 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3249 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3250 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3251 else
bd60018a 3252 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3253}
3254
94223d04 3255uint8_t
1a2eb460
KP
3256intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3257{
30add22d 3258 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3259 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3260
5a9d1f1a
DL
3261 if (INTEL_INFO(dev)->gen >= 9) {
3262 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3263 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3264 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3265 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3266 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3267 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3268 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3270 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3271 default:
3272 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3273 }
3274 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3275 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3277 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3279 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3281 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3282 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3283 default:
bd60018a 3284 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3285 }
666a4537 3286 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e2fa6fba 3287 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3288 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3289 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3290 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3291 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3293 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3294 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3295 default:
bd60018a 3296 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3297 }
bc7d38a4 3298 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3299 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3300 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3301 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3303 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3304 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3305 default:
bd60018a 3306 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3307 }
3308 } else {
3309 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3311 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3313 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3314 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3315 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3317 default:
bd60018a 3318 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3319 }
a4fc5ed6
KP
3320 }
3321}
3322
5829975c 3323static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3324{
3325 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3326 struct drm_i915_private *dev_priv = dev->dev_private;
3327 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3328 struct intel_crtc *intel_crtc =
3329 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3330 unsigned long demph_reg_value, preemph_reg_value,
3331 uniqtranscale_reg_value;
3332 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3333 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3334 int pipe = intel_crtc->pipe;
e2fa6fba
P
3335
3336 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3337 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3338 preemph_reg_value = 0x0004000;
3339 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3340 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3341 demph_reg_value = 0x2B405555;
3342 uniqtranscale_reg_value = 0x552AB83A;
3343 break;
bd60018a 3344 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3345 demph_reg_value = 0x2B404040;
3346 uniqtranscale_reg_value = 0x5548B83A;
3347 break;
bd60018a 3348 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3349 demph_reg_value = 0x2B245555;
3350 uniqtranscale_reg_value = 0x5560B83A;
3351 break;
bd60018a 3352 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3353 demph_reg_value = 0x2B405555;
3354 uniqtranscale_reg_value = 0x5598DA3A;
3355 break;
3356 default:
3357 return 0;
3358 }
3359 break;
bd60018a 3360 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3361 preemph_reg_value = 0x0002000;
3362 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3363 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3364 demph_reg_value = 0x2B404040;
3365 uniqtranscale_reg_value = 0x5552B83A;
3366 break;
bd60018a 3367 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3368 demph_reg_value = 0x2B404848;
3369 uniqtranscale_reg_value = 0x5580B83A;
3370 break;
bd60018a 3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3372 demph_reg_value = 0x2B404040;
3373 uniqtranscale_reg_value = 0x55ADDA3A;
3374 break;
3375 default:
3376 return 0;
3377 }
3378 break;
bd60018a 3379 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3380 preemph_reg_value = 0x0000000;
3381 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3382 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3383 demph_reg_value = 0x2B305555;
3384 uniqtranscale_reg_value = 0x5570B83A;
3385 break;
bd60018a 3386 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3387 demph_reg_value = 0x2B2B4040;
3388 uniqtranscale_reg_value = 0x55ADDA3A;
3389 break;
3390 default:
3391 return 0;
3392 }
3393 break;
bd60018a 3394 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3395 preemph_reg_value = 0x0006000;
3396 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3397 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3398 demph_reg_value = 0x1B405555;
3399 uniqtranscale_reg_value = 0x55ADDA3A;
3400 break;
3401 default:
3402 return 0;
3403 }
3404 break;
3405 default:
3406 return 0;
3407 }
3408
a580516d 3409 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3410 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3411 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3412 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3413 uniqtranscale_reg_value);
ab3c759a
CML
3414 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3415 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3416 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3417 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3418 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3419
3420 return 0;
3421}
3422
67fa24b4
VS
3423static bool chv_need_uniq_trans_scale(uint8_t train_set)
3424{
3425 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3426 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3427}
3428
5829975c 3429static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3430{
3431 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3432 struct drm_i915_private *dev_priv = dev->dev_private;
3433 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3434 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3435 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3436 uint8_t train_set = intel_dp->train_set[0];
3437 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3438 enum pipe pipe = intel_crtc->pipe;
3439 int i;
e4a1d846
CML
3440
3441 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3442 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3443 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3444 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3445 deemph_reg_value = 128;
3446 margin_reg_value = 52;
3447 break;
bd60018a 3448 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3449 deemph_reg_value = 128;
3450 margin_reg_value = 77;
3451 break;
bd60018a 3452 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3453 deemph_reg_value = 128;
3454 margin_reg_value = 102;
3455 break;
bd60018a 3456 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3457 deemph_reg_value = 128;
3458 margin_reg_value = 154;
3459 /* FIXME extra to set for 1200 */
3460 break;
3461 default:
3462 return 0;
3463 }
3464 break;
bd60018a 3465 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3466 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3467 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3468 deemph_reg_value = 85;
3469 margin_reg_value = 78;
3470 break;
bd60018a 3471 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3472 deemph_reg_value = 85;
3473 margin_reg_value = 116;
3474 break;
bd60018a 3475 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3476 deemph_reg_value = 85;
3477 margin_reg_value = 154;
3478 break;
3479 default:
3480 return 0;
3481 }
3482 break;
bd60018a 3483 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3484 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3485 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3486 deemph_reg_value = 64;
3487 margin_reg_value = 104;
3488 break;
bd60018a 3489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3490 deemph_reg_value = 64;
3491 margin_reg_value = 154;
3492 break;
3493 default:
3494 return 0;
3495 }
3496 break;
bd60018a 3497 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3498 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3499 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3500 deemph_reg_value = 43;
3501 margin_reg_value = 154;
3502 break;
3503 default:
3504 return 0;
3505 }
3506 break;
3507 default:
3508 return 0;
3509 }
3510
a580516d 3511 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3512
3513 /* Clear calc init */
1966e59e
VS
3514 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3515 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3516 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3517 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3518 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3519
e0fce78f
VS
3520 if (intel_crtc->config->lane_count > 2) {
3521 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3522 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3523 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3524 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3525 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3526 }
e4a1d846 3527
a02ef3c7
VS
3528 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3529 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3530 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3531 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3532
e0fce78f
VS
3533 if (intel_crtc->config->lane_count > 2) {
3534 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3535 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3536 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3537 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3538 }
a02ef3c7 3539
e4a1d846 3540 /* Program swing deemph */
e0fce78f 3541 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db
VS
3542 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3543 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3544 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3545 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3546 }
e4a1d846
CML
3547
3548 /* Program swing margin */
e0fce78f 3549 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3550 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
67fa24b4 3551
1fb44505
VS
3552 val &= ~DPIO_SWING_MARGIN000_MASK;
3553 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
67fa24b4
VS
3554
3555 /*
3556 * Supposedly this value shouldn't matter when unique transition
3557 * scale is disabled, but in fact it does matter. Let's just
3558 * always program the same value and hope it's OK.
3559 */
3560 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3561 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3562
f72df8db
VS
3563 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3564 }
e4a1d846 3565
67fa24b4
VS
3566 /*
3567 * The document said it needs to set bit 27 for ch0 and bit 26
3568 * for ch1. Might be a typo in the doc.
3569 * For now, for this unique transition scale selection, set bit
3570 * 27 for ch0 and ch1.
3571 */
e0fce78f 3572 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3573 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
67fa24b4 3574 if (chv_need_uniq_trans_scale(train_set))
f72df8db 3575 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
67fa24b4
VS
3576 else
3577 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3578 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
e4a1d846
CML
3579 }
3580
3581 /* Start swing calculation */
1966e59e
VS
3582 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3583 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3584 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3585
e0fce78f
VS
3586 if (intel_crtc->config->lane_count > 2) {
3587 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3588 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3589 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3590 }
e4a1d846 3591
a580516d 3592 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3593
3594 return 0;
3595}
3596
a4fc5ed6 3597static uint32_t
5829975c 3598gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3599{
3cf2efb1 3600 uint32_t signal_levels = 0;
a4fc5ed6 3601
3cf2efb1 3602 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3603 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3604 default:
3605 signal_levels |= DP_VOLTAGE_0_4;
3606 break;
bd60018a 3607 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3608 signal_levels |= DP_VOLTAGE_0_6;
3609 break;
bd60018a 3610 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3611 signal_levels |= DP_VOLTAGE_0_8;
3612 break;
bd60018a 3613 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3614 signal_levels |= DP_VOLTAGE_1_2;
3615 break;
3616 }
3cf2efb1 3617 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3618 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3619 default:
3620 signal_levels |= DP_PRE_EMPHASIS_0;
3621 break;
bd60018a 3622 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3623 signal_levels |= DP_PRE_EMPHASIS_3_5;
3624 break;
bd60018a 3625 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3626 signal_levels |= DP_PRE_EMPHASIS_6;
3627 break;
bd60018a 3628 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3629 signal_levels |= DP_PRE_EMPHASIS_9_5;
3630 break;
3631 }
3632 return signal_levels;
3633}
3634
e3421a18
ZW
3635/* Gen6's DP voltage swing and pre-emphasis control */
3636static uint32_t
5829975c 3637gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3638{
3c5a62b5
YL
3639 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3640 DP_TRAIN_PRE_EMPHASIS_MASK);
3641 switch (signal_levels) {
bd60018a
SJ
3642 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3643 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3644 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3645 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3646 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3647 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3648 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3649 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3650 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3651 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3652 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3653 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3654 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3655 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3656 default:
3c5a62b5
YL
3657 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3658 "0x%x\n", signal_levels);
3659 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3660 }
3661}
3662
1a2eb460
KP
3663/* Gen7's DP voltage swing and pre-emphasis control */
3664static uint32_t
5829975c 3665gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3666{
3667 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3668 DP_TRAIN_PRE_EMPHASIS_MASK);
3669 switch (signal_levels) {
bd60018a 3670 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3671 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3672 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3673 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3674 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3675 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3676
bd60018a 3677 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3678 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3679 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3680 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3681
bd60018a 3682 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3683 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3684 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3685 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3686
3687 default:
3688 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3689 "0x%x\n", signal_levels);
3690 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3691 }
3692}
3693
94223d04 3694void
f4eb692e 3695intel_dp_set_signal_levels(struct intel_dp *intel_dp)
f0a3424e
PZ
3696{
3697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3698 enum port port = intel_dig_port->port;
f0a3424e 3699 struct drm_device *dev = intel_dig_port->base.base.dev;
b905a915 3700 struct drm_i915_private *dev_priv = to_i915(dev);
f8896f5d 3701 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3702 uint8_t train_set = intel_dp->train_set[0];
3703
f8896f5d
DW
3704 if (HAS_DDI(dev)) {
3705 signal_levels = ddi_signal_levels(intel_dp);
3706
3707 if (IS_BROXTON(dev))
3708 signal_levels = 0;
3709 else
3710 mask = DDI_BUF_EMP_MASK;
e4a1d846 3711 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3712 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3713 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3714 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3715 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3716 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3717 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3718 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3719 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3720 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3721 } else {
5829975c 3722 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3723 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3724 }
3725
96fb9f9b
VK
3726 if (mask)
3727 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3728
3729 DRM_DEBUG_KMS("Using vswing level %d\n",
3730 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3731 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3732 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3733 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e 3734
f4eb692e 3735 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
b905a915
ACO
3736
3737 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3738 POSTING_READ(intel_dp->output_reg);
f0a3424e
PZ
3739}
3740
94223d04 3741void
e9c176d5
ACO
3742intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3743 uint8_t dp_train_pat)
a4fc5ed6 3744{
174edf1f 3745 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3746 struct drm_i915_private *dev_priv =
3747 to_i915(intel_dig_port->base.base.dev);
a4fc5ed6 3748
f4eb692e 3749 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
47ea7542 3750
f4eb692e 3751 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
ea5b213a 3752 POSTING_READ(intel_dp->output_reg);
e9c176d5
ACO
3753}
3754
94223d04 3755void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3ab9c637
ID
3756{
3757 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3758 struct drm_device *dev = intel_dig_port->base.base.dev;
3759 struct drm_i915_private *dev_priv = dev->dev_private;
3760 enum port port = intel_dig_port->port;
3761 uint32_t val;
3762
3763 if (!HAS_DDI(dev))
3764 return;
3765
3766 val = I915_READ(DP_TP_CTL(port));
3767 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3768 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3769 I915_WRITE(DP_TP_CTL(port), val);
3770
3771 /*
3772 * On PORT_A we can have only eDP in SST mode. There the only reason
3773 * we need to set idle transmission mode is to work around a HW issue
3774 * where we enable the pipe while not in idle link-training mode.
3775 * In this case there is requirement to wait for a minimum number of
3776 * idle patterns to be sent.
3777 */
3778 if (port == PORT_A)
3779 return;
3780
3781 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3782 1))
3783 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3784}
3785
a4fc5ed6 3786static void
ea5b213a 3787intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3788{
da63a9f2 3789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3790 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3791 enum port port = intel_dig_port->port;
da63a9f2 3792 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3793 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3794 uint32_t DP = intel_dp->DP;
a4fc5ed6 3795
bc76e320 3796 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3797 return;
3798
0c33d8d7 3799 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3800 return;
3801
28c97730 3802 DRM_DEBUG_KMS("\n");
32f9d658 3803
39e5fa88
VS
3804 if ((IS_GEN7(dev) && port == PORT_A) ||
3805 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3806 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3807 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3808 } else {
aad3d14d
VS
3809 if (IS_CHERRYVIEW(dev))
3810 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3811 else
3812 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3813 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3814 }
1612c8bd 3815 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3816 POSTING_READ(intel_dp->output_reg);
5eb08b69 3817
1612c8bd
VS
3818 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3819 I915_WRITE(intel_dp->output_reg, DP);
3820 POSTING_READ(intel_dp->output_reg);
3821
3822 /*
3823 * HW workaround for IBX, we need to move the port
3824 * to transcoder A after disabling it to allow the
3825 * matching HDMI port to be enabled on transcoder A.
3826 */
3827 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
0c241d5b
VS
3828 /*
3829 * We get CPU/PCH FIFO underruns on the other pipe when
3830 * doing the workaround. Sweep them under the rug.
3831 */
3832 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3833 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3834
1612c8bd
VS
3835 /* always enable with pattern 1 (as per spec) */
3836 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3837 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3838 I915_WRITE(intel_dp->output_reg, DP);
3839 POSTING_READ(intel_dp->output_reg);
3840
3841 DP &= ~DP_PORT_EN;
5bddd17f 3842 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3843 POSTING_READ(intel_dp->output_reg);
0c241d5b
VS
3844
3845 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3846 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3847 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5bddd17f
EA
3848 }
3849
f01eca2e 3850 msleep(intel_dp->panel_power_down_delay);
6fec7662
VS
3851
3852 intel_dp->DP = DP;
a4fc5ed6
KP
3853}
3854
26d61aad
KP
3855static bool
3856intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3857{
a031d709
RV
3858 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3859 struct drm_device *dev = dig_port->base.base.dev;
3860 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3861 uint8_t rev;
a031d709 3862
9d1a1031
JN
3863 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3864 sizeof(intel_dp->dpcd)) < 0)
edb39244 3865 return false; /* aux transfer failed */
92fd8fd1 3866
a8e98153 3867 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3868
edb39244
AJ
3869 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3870 return false; /* DPCD not present */
3871
2293bb5c
SK
3872 /* Check if the panel supports PSR */
3873 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3874 if (is_edp(intel_dp)) {
9d1a1031
JN
3875 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3876 intel_dp->psr_dpcd,
3877 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3878 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3879 dev_priv->psr.sink_support = true;
50003939 3880 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3881 }
474d1ec4
SJ
3882
3883 if (INTEL_INFO(dev)->gen >= 9 &&
3884 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3885 uint8_t frame_sync_cap;
3886
3887 dev_priv->psr.sink_support = true;
3888 intel_dp_dpcd_read_wake(&intel_dp->aux,
3889 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3890 &frame_sync_cap, 1);
3891 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3892 /* PSR2 needs frame sync as well */
3893 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3894 DRM_DEBUG_KMS("PSR2 %s on sink",
3895 dev_priv->psr.psr2_support ? "supported" : "not supported");
3896 }
50003939
JN
3897 }
3898
bc5133d5 3899 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
e588fa18 3900 yesno(intel_dp_source_supports_hbr2(intel_dp)),
742f491d 3901 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
06ea66b6 3902
fc0f8e25
SJ
3903 /* Intermediate frequency support */
3904 if (is_edp(intel_dp) &&
3905 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3906 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3907 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3908 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3909 int i;
3910
fc0f8e25
SJ
3911 intel_dp_dpcd_read_wake(&intel_dp->aux,
3912 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3913 sink_rates,
3914 sizeof(sink_rates));
ea2d8a42 3915
94ca719e
VS
3916 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3917 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3918
3919 if (val == 0)
3920 break;
3921
af77b974
SJ
3922 /* Value read is in kHz while drm clock is saved in deca-kHz */
3923 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3924 }
94ca719e 3925 intel_dp->num_sink_rates = i;
fc0f8e25 3926 }
0336400e
VS
3927
3928 intel_dp_print_rates(intel_dp);
3929
edb39244
AJ
3930 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3931 DP_DWN_STRM_PORT_PRESENT))
3932 return true; /* native DP sink */
3933
3934 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3935 return true; /* no per-port downstream info */
3936
9d1a1031
JN
3937 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3938 intel_dp->downstream_ports,
3939 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3940 return false; /* downstream port status fetch failed */
3941
3942 return true;
92fd8fd1
KP
3943}
3944
0d198328
AJ
3945static void
3946intel_dp_probe_oui(struct intel_dp *intel_dp)
3947{
3948 u8 buf[3];
3949
3950 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3951 return;
3952
9d1a1031 3953 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3954 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3955 buf[0], buf[1], buf[2]);
3956
9d1a1031 3957 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3958 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3959 buf[0], buf[1], buf[2]);
3960}
3961
0e32b39c
DA
3962static bool
3963intel_dp_probe_mst(struct intel_dp *intel_dp)
3964{
3965 u8 buf[1];
3966
3967 if (!intel_dp->can_mst)
3968 return false;
3969
3970 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3971 return false;
3972
0e32b39c
DA
3973 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3974 if (buf[0] & DP_MST_CAP) {
3975 DRM_DEBUG_KMS("Sink is MST capable\n");
3976 intel_dp->is_mst = true;
3977 } else {
3978 DRM_DEBUG_KMS("Sink is not MST capable\n");
3979 intel_dp->is_mst = false;
3980 }
3981 }
0e32b39c
DA
3982
3983 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3984 return intel_dp->is_mst;
3985}
3986
e5a1cab5 3987static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 3988{
082dcc7c 3989 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 3990 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c 3991 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 3992 u8 buf;
e5a1cab5 3993 int ret = 0;
c6297843
RV
3994 int count = 0;
3995 int attempts = 10;
d2e216d0 3996
082dcc7c
RV
3997 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3998 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
3999 ret = -EIO;
4000 goto out;
4373f0f2
PZ
4001 }
4002
082dcc7c 4003 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 4004 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 4005 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4006 ret = -EIO;
4007 goto out;
4008 }
d2e216d0 4009
c6297843
RV
4010 do {
4011 intel_wait_for_vblank(dev, intel_crtc->pipe);
4012
4013 if (drm_dp_dpcd_readb(&intel_dp->aux,
4014 DP_TEST_SINK_MISC, &buf) < 0) {
4015 ret = -EIO;
4016 goto out;
4017 }
4018 count = buf & DP_TEST_COUNT_MASK;
4019 } while (--attempts && count);
4020
4021 if (attempts == 0) {
dc5a9037 4022 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
c6297843
RV
4023 ret = -ETIMEDOUT;
4024 }
4025
e5a1cab5 4026 out:
082dcc7c 4027 hsw_enable_ips(intel_crtc);
e5a1cab5 4028 return ret;
082dcc7c
RV
4029}
4030
4031static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4032{
4033 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 4034 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c
RV
4035 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4036 u8 buf;
e5a1cab5
RV
4037 int ret;
4038
082dcc7c
RV
4039 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4040 return -EIO;
4041
4042 if (!(buf & DP_TEST_CRC_SUPPORTED))
4043 return -ENOTTY;
4044
4045 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4046 return -EIO;
4047
6d8175da
RV
4048 if (buf & DP_TEST_SINK_START) {
4049 ret = intel_dp_sink_crc_stop(intel_dp);
4050 if (ret)
4051 return ret;
4052 }
4053
082dcc7c 4054 hsw_disable_ips(intel_crtc);
1dda5f93 4055
9d1a1031 4056 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4057 buf | DP_TEST_SINK_START) < 0) {
4058 hsw_enable_ips(intel_crtc);
4059 return -EIO;
4373f0f2
PZ
4060 }
4061
d72f9d91 4062 intel_wait_for_vblank(dev, intel_crtc->pipe);
082dcc7c
RV
4063 return 0;
4064}
4065
4066int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4067{
4068 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4069 struct drm_device *dev = dig_port->base.base.dev;
4070 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4071 u8 buf;
621d4c76 4072 int count, ret;
082dcc7c 4073 int attempts = 6;
082dcc7c
RV
4074
4075 ret = intel_dp_sink_crc_start(intel_dp);
4076 if (ret)
4077 return ret;
4078
ad9dc91b 4079 do {
621d4c76
RV
4080 intel_wait_for_vblank(dev, intel_crtc->pipe);
4081
1dda5f93 4082 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4083 DP_TEST_SINK_MISC, &buf) < 0) {
4084 ret = -EIO;
afe0d67e 4085 goto stop;
4373f0f2 4086 }
621d4c76 4087 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 4088
7e38eeff 4089 } while (--attempts && count == 0);
ad9dc91b
RV
4090
4091 if (attempts == 0) {
7e38eeff
RV
4092 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4093 ret = -ETIMEDOUT;
4094 goto stop;
4095 }
4096
4097 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4098 ret = -EIO;
4099 goto stop;
ad9dc91b 4100 }
d2e216d0 4101
afe0d67e 4102stop:
082dcc7c 4103 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4104 return ret;
d2e216d0
RV
4105}
4106
a60f0e38
JB
4107static bool
4108intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4109{
9d1a1031
JN
4110 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4111 DP_DEVICE_SERVICE_IRQ_VECTOR,
4112 sink_irq_vector, 1) == 1;
a60f0e38
JB
4113}
4114
0e32b39c
DA
4115static bool
4116intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4117{
4118 int ret;
4119
4120 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4121 DP_SINK_COUNT_ESI,
4122 sink_irq_vector, 14);
4123 if (ret != 14)
4124 return false;
4125
4126 return true;
4127}
4128
c5d5ab7a
TP
4129static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4130{
4131 uint8_t test_result = DP_TEST_ACK;
4132 return test_result;
4133}
4134
4135static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4136{
4137 uint8_t test_result = DP_TEST_NAK;
4138 return test_result;
4139}
4140
4141static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4142{
c5d5ab7a 4143 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4144 struct intel_connector *intel_connector = intel_dp->attached_connector;
4145 struct drm_connector *connector = &intel_connector->base;
4146
4147 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4148 connector->edid_corrupt ||
559be30c
TP
4149 intel_dp->aux.i2c_defer_count > 6) {
4150 /* Check EDID read for NACKs, DEFERs and corruption
4151 * (DP CTS 1.2 Core r1.1)
4152 * 4.2.2.4 : Failed EDID read, I2C_NAK
4153 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4154 * 4.2.2.6 : EDID corruption detected
4155 * Use failsafe mode for all cases
4156 */
4157 if (intel_dp->aux.i2c_nack_count > 0 ||
4158 intel_dp->aux.i2c_defer_count > 0)
4159 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4160 intel_dp->aux.i2c_nack_count,
4161 intel_dp->aux.i2c_defer_count);
4162 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4163 } else {
f79b468e
TS
4164 struct edid *block = intel_connector->detect_edid;
4165
4166 /* We have to write the checksum
4167 * of the last block read
4168 */
4169 block += intel_connector->detect_edid->extensions;
4170
559be30c
TP
4171 if (!drm_dp_dpcd_write(&intel_dp->aux,
4172 DP_TEST_EDID_CHECKSUM,
f79b468e 4173 &block->checksum,
5a1cc655 4174 1))
559be30c
TP
4175 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4176
4177 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4178 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4179 }
4180
4181 /* Set test active flag here so userspace doesn't interrupt things */
4182 intel_dp->compliance_test_active = 1;
4183
c5d5ab7a
TP
4184 return test_result;
4185}
4186
4187static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4188{
c5d5ab7a
TP
4189 uint8_t test_result = DP_TEST_NAK;
4190 return test_result;
4191}
4192
4193static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4194{
4195 uint8_t response = DP_TEST_NAK;
4196 uint8_t rxdata = 0;
4197 int status = 0;
4198
c5d5ab7a
TP
4199 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4200 if (status <= 0) {
4201 DRM_DEBUG_KMS("Could not read test request from sink\n");
4202 goto update_status;
4203 }
4204
4205 switch (rxdata) {
4206 case DP_TEST_LINK_TRAINING:
4207 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4208 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4209 response = intel_dp_autotest_link_training(intel_dp);
4210 break;
4211 case DP_TEST_LINK_VIDEO_PATTERN:
4212 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4213 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4214 response = intel_dp_autotest_video_pattern(intel_dp);
4215 break;
4216 case DP_TEST_LINK_EDID_READ:
4217 DRM_DEBUG_KMS("EDID test requested\n");
4218 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4219 response = intel_dp_autotest_edid(intel_dp);
4220 break;
4221 case DP_TEST_LINK_PHY_TEST_PATTERN:
4222 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4223 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4224 response = intel_dp_autotest_phy_pattern(intel_dp);
4225 break;
4226 default:
4227 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4228 break;
4229 }
4230
4231update_status:
4232 status = drm_dp_dpcd_write(&intel_dp->aux,
4233 DP_TEST_RESPONSE,
4234 &response, 1);
4235 if (status <= 0)
4236 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4237}
4238
0e32b39c
DA
4239static int
4240intel_dp_check_mst_status(struct intel_dp *intel_dp)
4241{
4242 bool bret;
4243
4244 if (intel_dp->is_mst) {
4245 u8 esi[16] = { 0 };
4246 int ret = 0;
4247 int retry;
4248 bool handled;
4249 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4250go_again:
4251 if (bret == true) {
4252
4253 /* check link status - esi[10] = 0x200c */
90a6b7b0 4254 if (intel_dp->active_mst_links &&
901c2daf 4255 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4256 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4257 intel_dp_start_link_train(intel_dp);
0e32b39c
DA
4258 intel_dp_stop_link_train(intel_dp);
4259 }
4260
6f34cc39 4261 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4262 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4263
4264 if (handled) {
4265 for (retry = 0; retry < 3; retry++) {
4266 int wret;
4267 wret = drm_dp_dpcd_write(&intel_dp->aux,
4268 DP_SINK_COUNT_ESI+1,
4269 &esi[1], 3);
4270 if (wret == 3) {
4271 break;
4272 }
4273 }
4274
4275 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4276 if (bret == true) {
6f34cc39 4277 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4278 goto go_again;
4279 }
4280 } else
4281 ret = 0;
4282
4283 return ret;
4284 } else {
4285 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4286 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4287 intel_dp->is_mst = false;
4288 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4289 /* send a hotplug event */
4290 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4291 }
4292 }
4293 return -EINVAL;
4294}
4295
a4fc5ed6
KP
4296/*
4297 * According to DP spec
4298 * 5.1.2:
4299 * 1. Read DPCD
4300 * 2. Configure link according to Receiver Capabilities
4301 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4302 * 4. Check link status on receipt of hot-plug interrupt
4303 */
a5146200 4304static void
ea5b213a 4305intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4306{
5b215bcf 4307 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4308 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4309 u8 sink_irq_vector;
93f62dad 4310 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4311
5b215bcf
DA
4312 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4313
4df6960e
SS
4314 /*
4315 * Clearing compliance test variables to allow capturing
4316 * of values for next automated test request.
4317 */
4318 intel_dp->compliance_test_active = 0;
4319 intel_dp->compliance_test_type = 0;
4320 intel_dp->compliance_test_data = 0;
4321
e02f9a06 4322 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4323 return;
4324
1a125d8a
ID
4325 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4326 return;
4327
92fd8fd1 4328 /* Try to read receiver status if the link appears to be up */
93f62dad 4329 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4330 return;
4331 }
4332
92fd8fd1 4333 /* Now read the DPCD to see if it's actually running */
26d61aad 4334 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4335 return;
4336 }
4337
a60f0e38
JB
4338 /* Try to read the source of the interrupt */
4339 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4340 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4341 /* Clear interrupt source */
9d1a1031
JN
4342 drm_dp_dpcd_writeb(&intel_dp->aux,
4343 DP_DEVICE_SERVICE_IRQ_VECTOR,
4344 sink_irq_vector);
a60f0e38
JB
4345
4346 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4347 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4348 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4349 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4350 }
4351
14631e9d
SS
4352 /* if link training is requested we should perform it always */
4353 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4354 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
92fd8fd1 4355 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4356 intel_encoder->base.name);
33a34e4e 4357 intel_dp_start_link_train(intel_dp);
3ab9c637 4358 intel_dp_stop_link_train(intel_dp);
33a34e4e 4359 }
a4fc5ed6 4360}
a4fc5ed6 4361
caf9ab24 4362/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4363static enum drm_connector_status
26d61aad 4364intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4365{
caf9ab24 4366 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4367 uint8_t type;
4368
4369 if (!intel_dp_get_dpcd(intel_dp))
4370 return connector_status_disconnected;
4371
4372 /* if there's no downstream port, we're done */
4373 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4374 return connector_status_connected;
caf9ab24
AJ
4375
4376 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4377 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4378 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4379 uint8_t reg;
9d1a1031
JN
4380
4381 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4382 &reg, 1) < 0)
caf9ab24 4383 return connector_status_unknown;
9d1a1031 4384
23235177
AJ
4385 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4386 : connector_status_disconnected;
caf9ab24
AJ
4387 }
4388
4389 /* If no HPD, poke DDC gently */
0b99836f 4390 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4391 return connector_status_connected;
caf9ab24
AJ
4392
4393 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4394 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4395 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4396 if (type == DP_DS_PORT_TYPE_VGA ||
4397 type == DP_DS_PORT_TYPE_NON_EDID)
4398 return connector_status_unknown;
4399 } else {
4400 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4401 DP_DWN_STRM_PORT_TYPE_MASK;
4402 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4403 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4404 return connector_status_unknown;
4405 }
caf9ab24
AJ
4406
4407 /* Anything else is out of spec, warn and ignore */
4408 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4409 return connector_status_disconnected;
71ba9000
AJ
4410}
4411
d410b56d
CW
4412static enum drm_connector_status
4413edp_detect(struct intel_dp *intel_dp)
4414{
4415 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4416 enum drm_connector_status status;
4417
4418 status = intel_panel_detect(dev);
4419 if (status == connector_status_unknown)
4420 status = connector_status_connected;
4421
4422 return status;
4423}
4424
b93433cc
JN
4425static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4426 struct intel_digital_port *port)
5eb08b69 4427{
b93433cc 4428 u32 bit;
01cb9ea6 4429
0df53b77
JN
4430 switch (port->port) {
4431 case PORT_A:
4432 return true;
4433 case PORT_B:
4434 bit = SDE_PORTB_HOTPLUG;
4435 break;
4436 case PORT_C:
4437 bit = SDE_PORTC_HOTPLUG;
4438 break;
4439 case PORT_D:
4440 bit = SDE_PORTD_HOTPLUG;
4441 break;
4442 default:
4443 MISSING_CASE(port->port);
4444 return false;
4445 }
4446
4447 return I915_READ(SDEISR) & bit;
4448}
4449
4450static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4451 struct intel_digital_port *port)
4452{
4453 u32 bit;
4454
4455 switch (port->port) {
4456 case PORT_A:
4457 return true;
4458 case PORT_B:
4459 bit = SDE_PORTB_HOTPLUG_CPT;
4460 break;
4461 case PORT_C:
4462 bit = SDE_PORTC_HOTPLUG_CPT;
4463 break;
4464 case PORT_D:
4465 bit = SDE_PORTD_HOTPLUG_CPT;
4466 break;
a78695d3
JN
4467 case PORT_E:
4468 bit = SDE_PORTE_HOTPLUG_SPT;
4469 break;
0df53b77
JN
4470 default:
4471 MISSING_CASE(port->port);
4472 return false;
b93433cc 4473 }
1b469639 4474
b93433cc 4475 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4476}
4477
7e66bcf2 4478static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1d245987 4479 struct intel_digital_port *port)
a4fc5ed6 4480{
9642c81c 4481 u32 bit;
5eb08b69 4482
9642c81c
JN
4483 switch (port->port) {
4484 case PORT_B:
4485 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4486 break;
4487 case PORT_C:
4488 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4489 break;
4490 case PORT_D:
4491 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4492 break;
4493 default:
4494 MISSING_CASE(port->port);
4495 return false;
4496 }
4497
4498 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4499}
4500
0780cd36
VS
4501static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4502 struct intel_digital_port *port)
9642c81c
JN
4503{
4504 u32 bit;
4505
4506 switch (port->port) {
4507 case PORT_B:
0780cd36 4508 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4509 break;
4510 case PORT_C:
0780cd36 4511 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4512 break;
4513 case PORT_D:
0780cd36 4514 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4515 break;
4516 default:
4517 MISSING_CASE(port->port);
4518 return false;
a4fc5ed6
KP
4519 }
4520
1d245987 4521 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
4522}
4523
e464bfde 4524static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
e2ec35a5 4525 struct intel_digital_port *intel_dig_port)
e464bfde 4526{
e2ec35a5
SJ
4527 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4528 enum port port;
e464bfde
JN
4529 u32 bit;
4530
e2ec35a5
SJ
4531 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4532 switch (port) {
e464bfde
JN
4533 case PORT_A:
4534 bit = BXT_DE_PORT_HP_DDIA;
4535 break;
4536 case PORT_B:
4537 bit = BXT_DE_PORT_HP_DDIB;
4538 break;
4539 case PORT_C:
4540 bit = BXT_DE_PORT_HP_DDIC;
4541 break;
4542 default:
e2ec35a5 4543 MISSING_CASE(port);
e464bfde
JN
4544 return false;
4545 }
4546
4547 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4548}
4549
7e66bcf2
JN
4550/*
4551 * intel_digital_port_connected - is the specified port connected?
4552 * @dev_priv: i915 private structure
4553 * @port: the port to test
4554 *
4555 * Return %true if @port is connected, %false otherwise.
4556 */
237ed86c 4557bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
7e66bcf2
JN
4558 struct intel_digital_port *port)
4559{
0df53b77 4560 if (HAS_PCH_IBX(dev_priv))
7e66bcf2 4561 return ibx_digital_port_connected(dev_priv, port);
22824fac 4562 else if (HAS_PCH_SPLIT(dev_priv))
0df53b77 4563 return cpt_digital_port_connected(dev_priv, port);
e464bfde
JN
4564 else if (IS_BROXTON(dev_priv))
4565 return bxt_digital_port_connected(dev_priv, port);
0780cd36
VS
4566 else if (IS_GM45(dev_priv))
4567 return gm45_digital_port_connected(dev_priv, port);
7e66bcf2
JN
4568 else
4569 return g4x_digital_port_connected(dev_priv, port);
4570}
4571
8c241fef 4572static struct edid *
beb60608 4573intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4574{
beb60608 4575 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4576
9cd300e0
JN
4577 /* use cached edid if we have one */
4578 if (intel_connector->edid) {
9cd300e0
JN
4579 /* invalid edid */
4580 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4581 return NULL;
4582
55e9edeb 4583 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4584 } else
4585 return drm_get_edid(&intel_connector->base,
4586 &intel_dp->aux.ddc);
4587}
8c241fef 4588
beb60608
CW
4589static void
4590intel_dp_set_edid(struct intel_dp *intel_dp)
4591{
4592 struct intel_connector *intel_connector = intel_dp->attached_connector;
4593 struct edid *edid;
8c241fef 4594
beb60608
CW
4595 edid = intel_dp_get_edid(intel_dp);
4596 intel_connector->detect_edid = edid;
4597
4598 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4599 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4600 else
4601 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4602}
4603
beb60608
CW
4604static void
4605intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4606{
beb60608 4607 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4608
beb60608
CW
4609 kfree(intel_connector->detect_edid);
4610 intel_connector->detect_edid = NULL;
9cd300e0 4611
beb60608
CW
4612 intel_dp->has_audio = false;
4613}
d6f24d0f 4614
a9756bb5
ZW
4615static enum drm_connector_status
4616intel_dp_detect(struct drm_connector *connector, bool force)
4617{
4618 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4619 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4620 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4621 struct drm_device *dev = connector->dev;
a9756bb5 4622 enum drm_connector_status status;
671dedd2 4623 enum intel_display_power_domain power_domain;
0e32b39c 4624 bool ret;
09b1eb13 4625 u8 sink_irq_vector;
a9756bb5 4626
164c8598 4627 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4628 connector->base.id, connector->name);
beb60608 4629 intel_dp_unset_edid(intel_dp);
164c8598 4630
0e32b39c
DA
4631 if (intel_dp->is_mst) {
4632 /* MST devices are disconnected from a monitor POV */
4633 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4634 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4635 return connector_status_disconnected;
0e32b39c
DA
4636 }
4637
25f78f58
VS
4638 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4639 intel_display_power_get(to_i915(dev), power_domain);
a9756bb5 4640
d410b56d
CW
4641 /* Can't disconnect eDP, but you can close the lid... */
4642 if (is_edp(intel_dp))
4643 status = edp_detect(intel_dp);
c555a81d
ACO
4644 else if (intel_digital_port_connected(to_i915(dev),
4645 dp_to_dig_port(intel_dp)))
4646 status = intel_dp_detect_dpcd(intel_dp);
a9756bb5 4647 else
c555a81d
ACO
4648 status = connector_status_disconnected;
4649
4df6960e
SS
4650 if (status != connector_status_connected) {
4651 intel_dp->compliance_test_active = 0;
4652 intel_dp->compliance_test_type = 0;
4653 intel_dp->compliance_test_data = 0;
4654
c8c8fb33 4655 goto out;
4df6960e 4656 }
a9756bb5 4657
0d198328
AJ
4658 intel_dp_probe_oui(intel_dp);
4659
0e32b39c
DA
4660 ret = intel_dp_probe_mst(intel_dp);
4661 if (ret) {
4662 /* if we are in MST mode then this connector
4663 won't appear connected or have anything with EDID on it */
4664 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4665 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4666 status = connector_status_disconnected;
4667 goto out;
4668 }
4669
4df6960e
SS
4670 /*
4671 * Clearing NACK and defer counts to get their exact values
4672 * while reading EDID which are required by Compliance tests
4673 * 4.2.2.4 and 4.2.2.5
4674 */
4675 intel_dp->aux.i2c_nack_count = 0;
4676 intel_dp->aux.i2c_defer_count = 0;
4677
beb60608 4678 intel_dp_set_edid(intel_dp);
a9756bb5 4679
d63885da
PZ
4680 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4681 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4682 status = connector_status_connected;
4683
09b1eb13
TP
4684 /* Try to read the source of the interrupt */
4685 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4686 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4687 /* Clear interrupt source */
4688 drm_dp_dpcd_writeb(&intel_dp->aux,
4689 DP_DEVICE_SERVICE_IRQ_VECTOR,
4690 sink_irq_vector);
4691
4692 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4693 intel_dp_handle_test_request(intel_dp);
4694 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4695 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4696 }
4697
c8c8fb33 4698out:
25f78f58 4699 intel_display_power_put(to_i915(dev), power_domain);
c8c8fb33 4700 return status;
a4fc5ed6
KP
4701}
4702
beb60608
CW
4703static void
4704intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4705{
df0e9248 4706 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4707 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
25f78f58 4708 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
671dedd2 4709 enum intel_display_power_domain power_domain;
a4fc5ed6 4710
beb60608
CW
4711 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4712 connector->base.id, connector->name);
4713 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4714
beb60608
CW
4715 if (connector->status != connector_status_connected)
4716 return;
671dedd2 4717
25f78f58
VS
4718 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4719 intel_display_power_get(dev_priv, power_domain);
beb60608
CW
4720
4721 intel_dp_set_edid(intel_dp);
4722
25f78f58 4723 intel_display_power_put(dev_priv, power_domain);
beb60608
CW
4724
4725 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4726 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4727}
4728
4729static int intel_dp_get_modes(struct drm_connector *connector)
4730{
4731 struct intel_connector *intel_connector = to_intel_connector(connector);
4732 struct edid *edid;
4733
4734 edid = intel_connector->detect_edid;
4735 if (edid) {
4736 int ret = intel_connector_update_modes(connector, edid);
4737 if (ret)
4738 return ret;
4739 }
32f9d658 4740
f8779fda 4741 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4742 if (is_edp(intel_attached_dp(connector)) &&
4743 intel_connector->panel.fixed_mode) {
f8779fda 4744 struct drm_display_mode *mode;
beb60608
CW
4745
4746 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4747 intel_connector->panel.fixed_mode);
f8779fda 4748 if (mode) {
32f9d658
ZW
4749 drm_mode_probed_add(connector, mode);
4750 return 1;
4751 }
4752 }
beb60608 4753
32f9d658 4754 return 0;
a4fc5ed6
KP
4755}
4756
1aad7ac0
CW
4757static bool
4758intel_dp_detect_audio(struct drm_connector *connector)
4759{
1aad7ac0 4760 bool has_audio = false;
beb60608 4761 struct edid *edid;
1aad7ac0 4762
beb60608
CW
4763 edid = to_intel_connector(connector)->detect_edid;
4764 if (edid)
1aad7ac0 4765 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4766
1aad7ac0
CW
4767 return has_audio;
4768}
4769
f684960e
CW
4770static int
4771intel_dp_set_property(struct drm_connector *connector,
4772 struct drm_property *property,
4773 uint64_t val)
4774{
e953fd7b 4775 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4776 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4777 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4778 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4779 int ret;
4780
662595df 4781 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4782 if (ret)
4783 return ret;
4784
3f43c48d 4785 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4786 int i = val;
4787 bool has_audio;
4788
4789 if (i == intel_dp->force_audio)
f684960e
CW
4790 return 0;
4791
1aad7ac0 4792 intel_dp->force_audio = i;
f684960e 4793
c3e5f67b 4794 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4795 has_audio = intel_dp_detect_audio(connector);
4796 else
c3e5f67b 4797 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4798
4799 if (has_audio == intel_dp->has_audio)
f684960e
CW
4800 return 0;
4801
1aad7ac0 4802 intel_dp->has_audio = has_audio;
f684960e
CW
4803 goto done;
4804 }
4805
e953fd7b 4806 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4807 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4808 bool old_range = intel_dp->limited_color_range;
ae4edb80 4809
55bc60db
VS
4810 switch (val) {
4811 case INTEL_BROADCAST_RGB_AUTO:
4812 intel_dp->color_range_auto = true;
4813 break;
4814 case INTEL_BROADCAST_RGB_FULL:
4815 intel_dp->color_range_auto = false;
0f2a2a75 4816 intel_dp->limited_color_range = false;
55bc60db
VS
4817 break;
4818 case INTEL_BROADCAST_RGB_LIMITED:
4819 intel_dp->color_range_auto = false;
0f2a2a75 4820 intel_dp->limited_color_range = true;
55bc60db
VS
4821 break;
4822 default:
4823 return -EINVAL;
4824 }
ae4edb80
DV
4825
4826 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4827 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4828 return 0;
4829
e953fd7b
CW
4830 goto done;
4831 }
4832
53b41837
YN
4833 if (is_edp(intel_dp) &&
4834 property == connector->dev->mode_config.scaling_mode_property) {
4835 if (val == DRM_MODE_SCALE_NONE) {
4836 DRM_DEBUG_KMS("no scaling not supported\n");
4837 return -EINVAL;
4838 }
4839
4840 if (intel_connector->panel.fitting_mode == val) {
4841 /* the eDP scaling property is not changed */
4842 return 0;
4843 }
4844 intel_connector->panel.fitting_mode = val;
4845
4846 goto done;
4847 }
4848
f684960e
CW
4849 return -EINVAL;
4850
4851done:
c0c36b94
CW
4852 if (intel_encoder->base.crtc)
4853 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4854
4855 return 0;
4856}
4857
a4fc5ed6 4858static void
73845adf 4859intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4860{
1d508706 4861 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4862
10e972d3 4863 kfree(intel_connector->detect_edid);
beb60608 4864
9cd300e0
JN
4865 if (!IS_ERR_OR_NULL(intel_connector->edid))
4866 kfree(intel_connector->edid);
4867
acd8db10
PZ
4868 /* Can't call is_edp() since the encoder may have been destroyed
4869 * already. */
4870 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4871 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4872
a4fc5ed6 4873 drm_connector_cleanup(connector);
55f78c43 4874 kfree(connector);
a4fc5ed6
KP
4875}
4876
00c09d70 4877void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4878{
da63a9f2
PZ
4879 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4880 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4881
a121f4e5 4882 intel_dp_aux_fini(intel_dp);
0e32b39c 4883 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4884 if (is_edp(intel_dp)) {
4885 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4886 /*
4887 * vdd might still be enabled do to the delayed vdd off.
4888 * Make sure vdd is actually turned off here.
4889 */
773538e8 4890 pps_lock(intel_dp);
4be73780 4891 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4892 pps_unlock(intel_dp);
4893
01527b31
CT
4894 if (intel_dp->edp_notifier.notifier_call) {
4895 unregister_reboot_notifier(&intel_dp->edp_notifier);
4896 intel_dp->edp_notifier.notifier_call = NULL;
4897 }
bd943159 4898 }
c8bd0e49 4899 drm_encoder_cleanup(encoder);
da63a9f2 4900 kfree(intel_dig_port);
24d05927
DV
4901}
4902
07f9cd0b
ID
4903static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4904{
4905 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4906
4907 if (!is_edp(intel_dp))
4908 return;
4909
951468f3
VS
4910 /*
4911 * vdd might still be enabled do to the delayed vdd off.
4912 * Make sure vdd is actually turned off here.
4913 */
afa4e53a 4914 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4915 pps_lock(intel_dp);
07f9cd0b 4916 edp_panel_vdd_off_sync(intel_dp);
773538e8 4917 pps_unlock(intel_dp);
07f9cd0b
ID
4918}
4919
49e6bc51
VS
4920static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4921{
4922 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4923 struct drm_device *dev = intel_dig_port->base.base.dev;
4924 struct drm_i915_private *dev_priv = dev->dev_private;
4925 enum intel_display_power_domain power_domain;
4926
4927 lockdep_assert_held(&dev_priv->pps_mutex);
4928
4929 if (!edp_have_panel_vdd(intel_dp))
4930 return;
4931
4932 /*
4933 * The VDD bit needs a power domain reference, so if the bit is
4934 * already enabled when we boot or resume, grab this reference and
4935 * schedule a vdd off, so we don't hold on to the reference
4936 * indefinitely.
4937 */
4938 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
25f78f58 4939 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
49e6bc51
VS
4940 intel_display_power_get(dev_priv, power_domain);
4941
4942 edp_panel_vdd_schedule_off(intel_dp);
4943}
4944
6d93c0c4
ID
4945static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4946{
49e6bc51
VS
4947 struct intel_dp *intel_dp;
4948
4949 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4950 return;
4951
4952 intel_dp = enc_to_intel_dp(encoder);
4953
4954 pps_lock(intel_dp);
4955
4956 /*
4957 * Read out the current power sequencer assignment,
4958 * in case the BIOS did something with it.
4959 */
666a4537 4960 if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
49e6bc51
VS
4961 vlv_initial_power_sequencer_setup(intel_dp);
4962
4963 intel_edp_panel_vdd_sanitize(intel_dp);
4964
4965 pps_unlock(intel_dp);
6d93c0c4
ID
4966}
4967
a4fc5ed6 4968static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 4969 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 4970 .detect = intel_dp_detect,
beb60608 4971 .force = intel_dp_force,
a4fc5ed6 4972 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4973 .set_property = intel_dp_set_property,
2545e4a6 4974 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4975 .destroy = intel_dp_connector_destroy,
c6f95f27 4976 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4977 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4978};
4979
4980static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4981 .get_modes = intel_dp_get_modes,
4982 .mode_valid = intel_dp_mode_valid,
df0e9248 4983 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4984};
4985
a4fc5ed6 4986static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4987 .reset = intel_dp_encoder_reset,
24d05927 4988 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4989};
4990
b2c5c181 4991enum irqreturn
13cf5504
DA
4992intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4993{
4994 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4995 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4996 struct drm_device *dev = intel_dig_port->base.base.dev;
4997 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4998 enum intel_display_power_domain power_domain;
b2c5c181 4999 enum irqreturn ret = IRQ_NONE;
1c767b33 5000
2540058f
TI
5001 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5002 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
0e32b39c 5003 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 5004
7a7f84cc
VS
5005 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5006 /*
5007 * vdd off can generate a long pulse on eDP which
5008 * would require vdd on to handle it, and thus we
5009 * would end up in an endless cycle of
5010 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5011 */
5012 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5013 port_name(intel_dig_port->port));
a8b3d52f 5014 return IRQ_HANDLED;
7a7f84cc
VS
5015 }
5016
26fbb774
VS
5017 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5018 port_name(intel_dig_port->port),
0e32b39c 5019 long_hpd ? "long" : "short");
13cf5504 5020
25f78f58 5021 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1c767b33
ID
5022 intel_display_power_get(dev_priv, power_domain);
5023
0e32b39c 5024 if (long_hpd) {
5fa836a9
MK
5025 /* indicate that we need to restart link training */
5026 intel_dp->train_set_valid = false;
2a592bec 5027
7e66bcf2
JN
5028 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5029 goto mst_fail;
0e32b39c
DA
5030
5031 if (!intel_dp_get_dpcd(intel_dp)) {
5032 goto mst_fail;
5033 }
5034
5035 intel_dp_probe_oui(intel_dp);
5036
d14e7b6d
VS
5037 if (!intel_dp_probe_mst(intel_dp)) {
5038 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5039 intel_dp_check_link_status(intel_dp);
5040 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c 5041 goto mst_fail;
d14e7b6d 5042 }
0e32b39c
DA
5043 } else {
5044 if (intel_dp->is_mst) {
1c767b33 5045 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
5046 goto mst_fail;
5047 }
5048
5049 if (!intel_dp->is_mst) {
5b215bcf 5050 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 5051 intel_dp_check_link_status(intel_dp);
5b215bcf 5052 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
5053 }
5054 }
b2c5c181
DV
5055
5056 ret = IRQ_HANDLED;
5057
1c767b33 5058 goto put_power;
0e32b39c
DA
5059mst_fail:
5060 /* if we were in MST mode, and device is not there get out of MST mode */
5061 if (intel_dp->is_mst) {
5062 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5063 intel_dp->is_mst = false;
5064 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5065 }
1c767b33
ID
5066put_power:
5067 intel_display_power_put(dev_priv, power_domain);
5068
5069 return ret;
13cf5504
DA
5070}
5071
477ec328 5072/* check the VBT to see whether the eDP is on another port */
5d8a7752 5073bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5074{
5075 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5076 union child_device_config *p_child;
36e83a18 5077 int i;
5d8a7752 5078 static const short port_mapping[] = {
477ec328
RV
5079 [PORT_B] = DVO_PORT_DPB,
5080 [PORT_C] = DVO_PORT_DPC,
5081 [PORT_D] = DVO_PORT_DPD,
5082 [PORT_E] = DVO_PORT_DPE,
5d8a7752 5083 };
36e83a18 5084
53ce81a7
VS
5085 /*
5086 * eDP not supported on g4x. so bail out early just
5087 * for a bit extra safety in case the VBT is bonkers.
5088 */
5089 if (INTEL_INFO(dev)->gen < 5)
5090 return false;
5091
3b32a35b
VS
5092 if (port == PORT_A)
5093 return true;
5094
41aa3448 5095 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5096 return false;
5097
41aa3448
RV
5098 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5099 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5100
5d8a7752 5101 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5102 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5103 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5104 return true;
5105 }
5106 return false;
5107}
5108
0e32b39c 5109void
f684960e
CW
5110intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5111{
53b41837
YN
5112 struct intel_connector *intel_connector = to_intel_connector(connector);
5113
3f43c48d 5114 intel_attach_force_audio_property(connector);
e953fd7b 5115 intel_attach_broadcast_rgb_property(connector);
55bc60db 5116 intel_dp->color_range_auto = true;
53b41837
YN
5117
5118 if (is_edp(intel_dp)) {
5119 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5120 drm_object_attach_property(
5121 &connector->base,
53b41837 5122 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5123 DRM_MODE_SCALE_ASPECT);
5124 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5125 }
f684960e
CW
5126}
5127
dada1a9f
ID
5128static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5129{
d28d4731 5130 intel_dp->panel_power_off_time = ktime_get_boottime();
dada1a9f
ID
5131 intel_dp->last_power_on = jiffies;
5132 intel_dp->last_backlight_off = jiffies;
5133}
5134
67a54566
DV
5135static void
5136intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5137 struct intel_dp *intel_dp)
67a54566
DV
5138{
5139 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5140 struct edp_power_seq cur, vbt, spec,
5141 *final = &intel_dp->pps_delays;
b0a08bec 5142 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
f0f59a00 5143 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 5144
e39b999a
VS
5145 lockdep_assert_held(&dev_priv->pps_mutex);
5146
81ddbc69
VS
5147 /* already initialized? */
5148 if (final->t11_t12 != 0)
5149 return;
5150
b0a08bec
VK
5151 if (IS_BROXTON(dev)) {
5152 /*
5153 * TODO: BXT has 2 sets of PPS registers.
5154 * Correct Register for Broxton need to be identified
5155 * using VBT. hardcoding for now
5156 */
5157 pp_ctrl_reg = BXT_PP_CONTROL(0);
5158 pp_on_reg = BXT_PP_ON_DELAYS(0);
5159 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5160 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5161 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5162 pp_on_reg = PCH_PP_ON_DELAYS;
5163 pp_off_reg = PCH_PP_OFF_DELAYS;
5164 pp_div_reg = PCH_PP_DIVISOR;
5165 } else {
bf13e81b
JN
5166 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5167
5168 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5169 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5170 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5171 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5172 }
67a54566
DV
5173
5174 /* Workaround: Need to write PP_CONTROL with the unlock key as
5175 * the very first thing. */
b0a08bec 5176 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5177
453c5420
JB
5178 pp_on = I915_READ(pp_on_reg);
5179 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5180 if (!IS_BROXTON(dev)) {
5181 I915_WRITE(pp_ctrl_reg, pp_ctl);
5182 pp_div = I915_READ(pp_div_reg);
5183 }
67a54566
DV
5184
5185 /* Pull timing values out of registers */
5186 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5187 PANEL_POWER_UP_DELAY_SHIFT;
5188
5189 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5190 PANEL_LIGHT_ON_DELAY_SHIFT;
5191
5192 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5193 PANEL_LIGHT_OFF_DELAY_SHIFT;
5194
5195 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5196 PANEL_POWER_DOWN_DELAY_SHIFT;
5197
b0a08bec
VK
5198 if (IS_BROXTON(dev)) {
5199 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5200 BXT_POWER_CYCLE_DELAY_SHIFT;
5201 if (tmp > 0)
5202 cur.t11_t12 = (tmp - 1) * 1000;
5203 else
5204 cur.t11_t12 = 0;
5205 } else {
5206 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5207 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5208 }
67a54566
DV
5209
5210 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5211 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5212
41aa3448 5213 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5214
5215 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5216 * our hw here, which are all in 100usec. */
5217 spec.t1_t3 = 210 * 10;
5218 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5219 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5220 spec.t10 = 500 * 10;
5221 /* This one is special and actually in units of 100ms, but zero
5222 * based in the hw (so we need to add 100 ms). But the sw vbt
5223 * table multiplies it with 1000 to make it in units of 100usec,
5224 * too. */
5225 spec.t11_t12 = (510 + 100) * 10;
5226
5227 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5228 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5229
5230 /* Use the max of the register settings and vbt. If both are
5231 * unset, fall back to the spec limits. */
36b5f425 5232#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5233 spec.field : \
5234 max(cur.field, vbt.field))
5235 assign_final(t1_t3);
5236 assign_final(t8);
5237 assign_final(t9);
5238 assign_final(t10);
5239 assign_final(t11_t12);
5240#undef assign_final
5241
36b5f425 5242#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5243 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5244 intel_dp->backlight_on_delay = get_delay(t8);
5245 intel_dp->backlight_off_delay = get_delay(t9);
5246 intel_dp->panel_power_down_delay = get_delay(t10);
5247 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5248#undef get_delay
5249
f30d26e4
JN
5250 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5251 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5252 intel_dp->panel_power_cycle_delay);
5253
5254 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5255 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5256}
5257
5258static void
5259intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5260 struct intel_dp *intel_dp)
f30d26e4
JN
5261{
5262 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420 5263 u32 pp_on, pp_off, pp_div, port_sel = 0;
e7dc33f3 5264 int div = dev_priv->rawclk_freq / 1000;
f0f59a00 5265 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
ad933b56 5266 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5267 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5268
e39b999a 5269 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5270
b0a08bec
VK
5271 if (IS_BROXTON(dev)) {
5272 /*
5273 * TODO: BXT has 2 sets of PPS registers.
5274 * Correct Register for Broxton need to be identified
5275 * using VBT. hardcoding for now
5276 */
5277 pp_ctrl_reg = BXT_PP_CONTROL(0);
5278 pp_on_reg = BXT_PP_ON_DELAYS(0);
5279 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5280
5281 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5282 pp_on_reg = PCH_PP_ON_DELAYS;
5283 pp_off_reg = PCH_PP_OFF_DELAYS;
5284 pp_div_reg = PCH_PP_DIVISOR;
5285 } else {
bf13e81b
JN
5286 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5287
5288 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5289 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5290 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5291 }
5292
b2f19d1a
PZ
5293 /*
5294 * And finally store the new values in the power sequencer. The
5295 * backlight delays are set to 1 because we do manual waits on them. For
5296 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5297 * we'll end up waiting for the backlight off delay twice: once when we
5298 * do the manual sleep, and once when we disable the panel and wait for
5299 * the PP_STATUS bit to become zero.
5300 */
f30d26e4 5301 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5302 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5303 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5304 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5305 /* Compute the divisor for the pp clock, simply match the Bspec
5306 * formula. */
b0a08bec
VK
5307 if (IS_BROXTON(dev)) {
5308 pp_div = I915_READ(pp_ctrl_reg);
5309 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5310 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5311 << BXT_POWER_CYCLE_DELAY_SHIFT);
5312 } else {
5313 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5314 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5315 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5316 }
67a54566
DV
5317
5318 /* Haswell doesn't have any port selection bits for the panel
5319 * power sequencer any more. */
666a4537 5320 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
ad933b56 5321 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5322 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5323 if (port == PORT_A)
a24c144c 5324 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5325 else
a24c144c 5326 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5327 }
5328
453c5420
JB
5329 pp_on |= port_sel;
5330
5331 I915_WRITE(pp_on_reg, pp_on);
5332 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5333 if (IS_BROXTON(dev))
5334 I915_WRITE(pp_ctrl_reg, pp_div);
5335 else
5336 I915_WRITE(pp_div_reg, pp_div);
67a54566 5337
67a54566 5338 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5339 I915_READ(pp_on_reg),
5340 I915_READ(pp_off_reg),
b0a08bec
VK
5341 IS_BROXTON(dev) ?
5342 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5343 I915_READ(pp_div_reg));
f684960e
CW
5344}
5345
b33a2815
VK
5346/**
5347 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5348 * @dev: DRM device
5349 * @refresh_rate: RR to be programmed
5350 *
5351 * This function gets called when refresh rate (RR) has to be changed from
5352 * one frequency to another. Switches can be between high and low RR
5353 * supported by the panel or to any other RR based on media playback (in
5354 * this case, RR value needs to be passed from user space).
5355 *
5356 * The caller of this function needs to take a lock on dev_priv->drrs.
5357 */
96178eeb 5358static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5359{
5360 struct drm_i915_private *dev_priv = dev->dev_private;
5361 struct intel_encoder *encoder;
96178eeb
VK
5362 struct intel_digital_port *dig_port = NULL;
5363 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5364 struct intel_crtc_state *config = NULL;
439d7ac0 5365 struct intel_crtc *intel_crtc = NULL;
96178eeb 5366 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5367
5368 if (refresh_rate <= 0) {
5369 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5370 return;
5371 }
5372
96178eeb
VK
5373 if (intel_dp == NULL) {
5374 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5375 return;
5376 }
5377
1fcc9d1c 5378 /*
e4d59f6b
RV
5379 * FIXME: This needs proper synchronization with psr state for some
5380 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5381 */
439d7ac0 5382
96178eeb
VK
5383 dig_port = dp_to_dig_port(intel_dp);
5384 encoder = &dig_port->base;
723f9aab 5385 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5386
5387 if (!intel_crtc) {
5388 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5389 return;
5390 }
5391
6e3c9717 5392 config = intel_crtc->config;
439d7ac0 5393
96178eeb 5394 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5395 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5396 return;
5397 }
5398
96178eeb
VK
5399 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5400 refresh_rate)
439d7ac0
PB
5401 index = DRRS_LOW_RR;
5402
96178eeb 5403 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5404 DRM_DEBUG_KMS(
5405 "DRRS requested for previously set RR...ignoring\n");
5406 return;
5407 }
5408
5409 if (!intel_crtc->active) {
5410 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5411 return;
5412 }
5413
44395bfe 5414 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5415 switch (index) {
5416 case DRRS_HIGH_RR:
5417 intel_dp_set_m_n(intel_crtc, M1_N1);
5418 break;
5419 case DRRS_LOW_RR:
5420 intel_dp_set_m_n(intel_crtc, M2_N2);
5421 break;
5422 case DRRS_MAX_RR:
5423 default:
5424 DRM_ERROR("Unsupported refreshrate type\n");
5425 }
5426 } else if (INTEL_INFO(dev)->gen > 6) {
f0f59a00 5427 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
649636ef 5428 u32 val;
a4c30b1d 5429
649636ef 5430 val = I915_READ(reg);
439d7ac0 5431 if (index > DRRS_HIGH_RR) {
666a4537 5432 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6fa7aec1
VK
5433 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5434 else
5435 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5436 } else {
666a4537 5437 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6fa7aec1
VK
5438 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5439 else
5440 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5441 }
5442 I915_WRITE(reg, val);
5443 }
5444
4e9ac947
VK
5445 dev_priv->drrs.refresh_rate_type = index;
5446
5447 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5448}
5449
b33a2815
VK
5450/**
5451 * intel_edp_drrs_enable - init drrs struct if supported
5452 * @intel_dp: DP struct
5453 *
5454 * Initializes frontbuffer_bits and drrs.dp
5455 */
c395578e
VK
5456void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5457{
5458 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5459 struct drm_i915_private *dev_priv = dev->dev_private;
5460 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5461 struct drm_crtc *crtc = dig_port->base.base.crtc;
5462 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5463
5464 if (!intel_crtc->config->has_drrs) {
5465 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5466 return;
5467 }
5468
5469 mutex_lock(&dev_priv->drrs.mutex);
5470 if (WARN_ON(dev_priv->drrs.dp)) {
5471 DRM_ERROR("DRRS already enabled\n");
5472 goto unlock;
5473 }
5474
5475 dev_priv->drrs.busy_frontbuffer_bits = 0;
5476
5477 dev_priv->drrs.dp = intel_dp;
5478
5479unlock:
5480 mutex_unlock(&dev_priv->drrs.mutex);
5481}
5482
b33a2815
VK
5483/**
5484 * intel_edp_drrs_disable - Disable DRRS
5485 * @intel_dp: DP struct
5486 *
5487 */
c395578e
VK
5488void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5489{
5490 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5491 struct drm_i915_private *dev_priv = dev->dev_private;
5492 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5493 struct drm_crtc *crtc = dig_port->base.base.crtc;
5494 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5495
5496 if (!intel_crtc->config->has_drrs)
5497 return;
5498
5499 mutex_lock(&dev_priv->drrs.mutex);
5500 if (!dev_priv->drrs.dp) {
5501 mutex_unlock(&dev_priv->drrs.mutex);
5502 return;
5503 }
5504
5505 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5506 intel_dp_set_drrs_state(dev_priv->dev,
5507 intel_dp->attached_connector->panel.
5508 fixed_mode->vrefresh);
5509
5510 dev_priv->drrs.dp = NULL;
5511 mutex_unlock(&dev_priv->drrs.mutex);
5512
5513 cancel_delayed_work_sync(&dev_priv->drrs.work);
5514}
5515
4e9ac947
VK
5516static void intel_edp_drrs_downclock_work(struct work_struct *work)
5517{
5518 struct drm_i915_private *dev_priv =
5519 container_of(work, typeof(*dev_priv), drrs.work.work);
5520 struct intel_dp *intel_dp;
5521
5522 mutex_lock(&dev_priv->drrs.mutex);
5523
5524 intel_dp = dev_priv->drrs.dp;
5525
5526 if (!intel_dp)
5527 goto unlock;
5528
439d7ac0 5529 /*
4e9ac947
VK
5530 * The delayed work can race with an invalidate hence we need to
5531 * recheck.
439d7ac0
PB
5532 */
5533
4e9ac947
VK
5534 if (dev_priv->drrs.busy_frontbuffer_bits)
5535 goto unlock;
439d7ac0 5536
4e9ac947
VK
5537 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5538 intel_dp_set_drrs_state(dev_priv->dev,
5539 intel_dp->attached_connector->panel.
5540 downclock_mode->vrefresh);
439d7ac0 5541
4e9ac947 5542unlock:
4e9ac947 5543 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5544}
5545
b33a2815 5546/**
0ddfd203 5547 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5548 * @dev: DRM device
5549 * @frontbuffer_bits: frontbuffer plane tracking bits
5550 *
0ddfd203
R
5551 * This function gets called everytime rendering on the given planes start.
5552 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5553 *
5554 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5555 */
a93fad0f
VK
5556void intel_edp_drrs_invalidate(struct drm_device *dev,
5557 unsigned frontbuffer_bits)
5558{
5559 struct drm_i915_private *dev_priv = dev->dev_private;
5560 struct drm_crtc *crtc;
5561 enum pipe pipe;
5562
9da7d693 5563 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5564 return;
5565
88f933a8 5566 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5567
a93fad0f 5568 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5569 if (!dev_priv->drrs.dp) {
5570 mutex_unlock(&dev_priv->drrs.mutex);
5571 return;
5572 }
5573
a93fad0f
VK
5574 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5575 pipe = to_intel_crtc(crtc)->pipe;
5576
c1d038c6
DV
5577 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5578 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5579
0ddfd203 5580 /* invalidate means busy screen hence upclock */
c1d038c6 5581 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5582 intel_dp_set_drrs_state(dev_priv->dev,
5583 dev_priv->drrs.dp->attached_connector->panel.
5584 fixed_mode->vrefresh);
a93fad0f 5585
a93fad0f
VK
5586 mutex_unlock(&dev_priv->drrs.mutex);
5587}
5588
b33a2815 5589/**
0ddfd203 5590 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5591 * @dev: DRM device
5592 * @frontbuffer_bits: frontbuffer plane tracking bits
5593 *
0ddfd203
R
5594 * This function gets called every time rendering on the given planes has
5595 * completed or flip on a crtc is completed. So DRRS should be upclocked
5596 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5597 * if no other planes are dirty.
b33a2815
VK
5598 *
5599 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5600 */
a93fad0f
VK
5601void intel_edp_drrs_flush(struct drm_device *dev,
5602 unsigned frontbuffer_bits)
5603{
5604 struct drm_i915_private *dev_priv = dev->dev_private;
5605 struct drm_crtc *crtc;
5606 enum pipe pipe;
5607
9da7d693 5608 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5609 return;
5610
88f933a8 5611 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5612
a93fad0f 5613 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5614 if (!dev_priv->drrs.dp) {
5615 mutex_unlock(&dev_priv->drrs.mutex);
5616 return;
5617 }
5618
a93fad0f
VK
5619 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5620 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5621
5622 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5623 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5624
0ddfd203 5625 /* flush means busy screen hence upclock */
c1d038c6 5626 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5627 intel_dp_set_drrs_state(dev_priv->dev,
5628 dev_priv->drrs.dp->attached_connector->panel.
5629 fixed_mode->vrefresh);
5630
5631 /*
5632 * flush also means no more activity hence schedule downclock, if all
5633 * other fbs are quiescent too
5634 */
5635 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5636 schedule_delayed_work(&dev_priv->drrs.work,
5637 msecs_to_jiffies(1000));
5638 mutex_unlock(&dev_priv->drrs.mutex);
5639}
5640
b33a2815
VK
5641/**
5642 * DOC: Display Refresh Rate Switching (DRRS)
5643 *
5644 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5645 * which enables swtching between low and high refresh rates,
5646 * dynamically, based on the usage scenario. This feature is applicable
5647 * for internal panels.
5648 *
5649 * Indication that the panel supports DRRS is given by the panel EDID, which
5650 * would list multiple refresh rates for one resolution.
5651 *
5652 * DRRS is of 2 types - static and seamless.
5653 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5654 * (may appear as a blink on screen) and is used in dock-undock scenario.
5655 * Seamless DRRS involves changing RR without any visual effect to the user
5656 * and can be used during normal system usage. This is done by programming
5657 * certain registers.
5658 *
5659 * Support for static/seamless DRRS may be indicated in the VBT based on
5660 * inputs from the panel spec.
5661 *
5662 * DRRS saves power by switching to low RR based on usage scenarios.
5663 *
5664 * eDP DRRS:-
5665 * The implementation is based on frontbuffer tracking implementation.
5666 * When there is a disturbance on the screen triggered by user activity or a
5667 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5668 * When there is no movement on screen, after a timeout of 1 second, a switch
5669 * to low RR is made.
5670 * For integration with frontbuffer tracking code,
5671 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5672 *
5673 * DRRS can be further extended to support other internal panels and also
5674 * the scenario of video playback wherein RR is set based on the rate
5675 * requested by userspace.
5676 */
5677
5678/**
5679 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5680 * @intel_connector: eDP connector
5681 * @fixed_mode: preferred mode of panel
5682 *
5683 * This function is called only once at driver load to initialize basic
5684 * DRRS stuff.
5685 *
5686 * Returns:
5687 * Downclock mode if panel supports it, else return NULL.
5688 * DRRS support is determined by the presence of downclock mode (apart
5689 * from VBT setting).
5690 */
4f9db5b5 5691static struct drm_display_mode *
96178eeb
VK
5692intel_dp_drrs_init(struct intel_connector *intel_connector,
5693 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5694{
5695 struct drm_connector *connector = &intel_connector->base;
96178eeb 5696 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5697 struct drm_i915_private *dev_priv = dev->dev_private;
5698 struct drm_display_mode *downclock_mode = NULL;
5699
9da7d693
DV
5700 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5701 mutex_init(&dev_priv->drrs.mutex);
5702
4f9db5b5
PB
5703 if (INTEL_INFO(dev)->gen <= 6) {
5704 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5705 return NULL;
5706 }
5707
5708 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5709 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5710 return NULL;
5711 }
5712
5713 downclock_mode = intel_find_panel_downclock
5714 (dev, fixed_mode, connector);
5715
5716 if (!downclock_mode) {
a1d26342 5717 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5718 return NULL;
5719 }
5720
96178eeb 5721 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5722
96178eeb 5723 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5724 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5725 return downclock_mode;
5726}
5727
ed92f0b2 5728static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5729 struct intel_connector *intel_connector)
ed92f0b2
PZ
5730{
5731 struct drm_connector *connector = &intel_connector->base;
5732 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5733 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5734 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5735 struct drm_i915_private *dev_priv = dev->dev_private;
5736 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5737 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5738 bool has_dpcd;
5739 struct drm_display_mode *scan;
5740 struct edid *edid;
6517d273 5741 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5742
5743 if (!is_edp(intel_dp))
5744 return true;
5745
49e6bc51
VS
5746 pps_lock(intel_dp);
5747 intel_edp_panel_vdd_sanitize(intel_dp);
5748 pps_unlock(intel_dp);
63635217 5749
ed92f0b2 5750 /* Cache DPCD and EDID for edp. */
ed92f0b2 5751 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5752
5753 if (has_dpcd) {
5754 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5755 dev_priv->no_aux_handshake =
5756 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5757 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5758 } else {
5759 /* if this fails, presume the device is a ghost */
5760 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5761 return false;
5762 }
5763
5764 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5765 pps_lock(intel_dp);
36b5f425 5766 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5767 pps_unlock(intel_dp);
ed92f0b2 5768
060c8778 5769 mutex_lock(&dev->mode_config.mutex);
0b99836f 5770 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5771 if (edid) {
5772 if (drm_add_edid_modes(connector, edid)) {
5773 drm_mode_connector_update_edid_property(connector,
5774 edid);
5775 drm_edid_to_eld(connector, edid);
5776 } else {
5777 kfree(edid);
5778 edid = ERR_PTR(-EINVAL);
5779 }
5780 } else {
5781 edid = ERR_PTR(-ENOENT);
5782 }
5783 intel_connector->edid = edid;
5784
5785 /* prefer fixed mode from EDID if available */
5786 list_for_each_entry(scan, &connector->probed_modes, head) {
5787 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5788 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5789 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5790 intel_connector, fixed_mode);
ed92f0b2
PZ
5791 break;
5792 }
5793 }
5794
5795 /* fallback to VBT if available for eDP */
5796 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5797 fixed_mode = drm_mode_duplicate(dev,
5798 dev_priv->vbt.lfp_lvds_vbt_mode);
5799 if (fixed_mode)
5800 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5801 }
060c8778 5802 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5803
666a4537 5804 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
01527b31
CT
5805 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5806 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5807
5808 /*
5809 * Figure out the current pipe for the initial backlight setup.
5810 * If the current pipe isn't valid, try the PPS pipe, and if that
5811 * fails just assume pipe A.
5812 */
5813 if (IS_CHERRYVIEW(dev))
5814 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5815 else
5816 pipe = PORT_TO_PIPE(intel_dp->DP);
5817
5818 if (pipe != PIPE_A && pipe != PIPE_B)
5819 pipe = intel_dp->pps_pipe;
5820
5821 if (pipe != PIPE_A && pipe != PIPE_B)
5822 pipe = PIPE_A;
5823
5824 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5825 pipe_name(pipe));
01527b31
CT
5826 }
5827
4f9db5b5 5828 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5507faeb 5829 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6517d273 5830 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5831
5832 return true;
5833}
5834
16c25533 5835bool
f0fec3f2
PZ
5836intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5837 struct intel_connector *intel_connector)
a4fc5ed6 5838{
f0fec3f2
PZ
5839 struct drm_connector *connector = &intel_connector->base;
5840 struct intel_dp *intel_dp = &intel_dig_port->dp;
5841 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5842 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5843 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5844 enum port port = intel_dig_port->port;
a121f4e5 5845 int type, ret;
a4fc5ed6 5846
ccb1a831
VS
5847 if (WARN(intel_dig_port->max_lanes < 1,
5848 "Not enough lanes (%d) for DP on port %c\n",
5849 intel_dig_port->max_lanes, port_name(port)))
5850 return false;
5851
a4a5d2f8
VS
5852 intel_dp->pps_pipe = INVALID_PIPE;
5853
ec5b01dd 5854 /* intel_dp vfuncs */
b6b5e383
DL
5855 if (INTEL_INFO(dev)->gen >= 9)
5856 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
ec5b01dd
DL
5857 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5858 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5859 else if (HAS_PCH_SPLIT(dev))
5860 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5861 else
6ffb1be7 5862 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
ec5b01dd 5863
b9ca5fad
DL
5864 if (INTEL_INFO(dev)->gen >= 9)
5865 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5866 else
6ffb1be7 5867 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
153b1100 5868
ad64217b
ACO
5869 if (HAS_DDI(dev))
5870 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5871
0767935e
DV
5872 /* Preserve the current hw state. */
5873 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5874 intel_dp->attached_connector = intel_connector;
3d3dc149 5875
3b32a35b 5876 if (intel_dp_is_edp(dev, port))
b329530c 5877 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5878 else
5879 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5880
f7d24902
ID
5881 /*
5882 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5883 * for DP the encoder type can be set by the caller to
5884 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5885 */
5886 if (type == DRM_MODE_CONNECTOR_eDP)
5887 intel_encoder->type = INTEL_OUTPUT_EDP;
5888
c17ed5b5 5889 /* eDP only on port B and/or C on vlv/chv */
666a4537
WB
5890 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5891 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
c17ed5b5
VS
5892 return false;
5893
e7281eab
ID
5894 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5895 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5896 port_name(port));
5897
b329530c 5898 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5899 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5900
a4fc5ed6
KP
5901 connector->interlace_allowed = true;
5902 connector->doublescan_allowed = 0;
5903
f0fec3f2 5904 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5905 edp_panel_vdd_work);
a4fc5ed6 5906
df0e9248 5907 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5908 drm_connector_register(connector);
a4fc5ed6 5909
affa9354 5910 if (HAS_DDI(dev))
bcbc889b
PZ
5911 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5912 else
5913 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5914 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5915
0b99836f 5916 /* Set up the hotplug pin. */
ab9d7c30
PZ
5917 switch (port) {
5918 case PORT_A:
1d843f9d 5919 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5920 break;
5921 case PORT_B:
1d843f9d 5922 intel_encoder->hpd_pin = HPD_PORT_B;
e87a005d 5923 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
cf1d5883 5924 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5925 break;
5926 case PORT_C:
1d843f9d 5927 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5928 break;
5929 case PORT_D:
1d843f9d 5930 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30 5931 break;
26951caf
XZ
5932 case PORT_E:
5933 intel_encoder->hpd_pin = HPD_PORT_E;
5934 break;
ab9d7c30 5935 default:
ad1c0b19 5936 BUG();
5eb08b69
ZW
5937 }
5938
dada1a9f 5939 if (is_edp(intel_dp)) {
773538e8 5940 pps_lock(intel_dp);
1e74a324 5941 intel_dp_init_panel_power_timestamps(intel_dp);
666a4537 5942 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
a4a5d2f8 5943 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5944 else
36b5f425 5945 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5946 pps_unlock(intel_dp);
dada1a9f 5947 }
0095e6dc 5948
a121f4e5
VS
5949 ret = intel_dp_aux_init(intel_dp, intel_connector);
5950 if (ret)
5951 goto fail;
c1f05264 5952
0e32b39c 5953 /* init MST on ports that can support it */
0c9b3715
JN
5954 if (HAS_DP_MST(dev) &&
5955 (port == PORT_B || port == PORT_C || port == PORT_D))
5956 intel_dp_mst_encoder_init(intel_dig_port,
5957 intel_connector->base.base.id);
0e32b39c 5958
36b5f425 5959 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
a121f4e5
VS
5960 intel_dp_aux_fini(intel_dp);
5961 intel_dp_mst_encoder_cleanup(intel_dig_port);
5962 goto fail;
b2f246a8 5963 }
32f9d658 5964
f684960e
CW
5965 intel_dp_add_properties(intel_dp, connector);
5966
a4fc5ed6
KP
5967 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5968 * 0xd. Failure to do so will result in spurious interrupts being
5969 * generated on the port when a cable is not attached.
5970 */
5971 if (IS_G4X(dev) && !IS_GM45(dev)) {
5972 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5973 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5974 }
16c25533 5975
aa7471d2
JN
5976 i915_debugfs_connector_add(connector);
5977
16c25533 5978 return true;
a121f4e5
VS
5979
5980fail:
5981 if (is_edp(intel_dp)) {
5982 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5983 /*
5984 * vdd might still be enabled do to the delayed vdd off.
5985 * Make sure vdd is actually turned off here.
5986 */
5987 pps_lock(intel_dp);
5988 edp_panel_vdd_off_sync(intel_dp);
5989 pps_unlock(intel_dp);
5990 }
5991 drm_connector_unregister(connector);
5992 drm_connector_cleanup(connector);
5993
5994 return false;
a4fc5ed6 5995}
f0fec3f2
PZ
5996
5997void
f0f59a00
VS
5998intel_dp_init(struct drm_device *dev,
5999 i915_reg_t output_reg, enum port port)
f0fec3f2 6000{
13cf5504 6001 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
6002 struct intel_digital_port *intel_dig_port;
6003 struct intel_encoder *intel_encoder;
6004 struct drm_encoder *encoder;
6005 struct intel_connector *intel_connector;
6006
b14c5679 6007 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
6008 if (!intel_dig_port)
6009 return;
6010
08d9bc92 6011 intel_connector = intel_connector_alloc();
11aee0f6
SM
6012 if (!intel_connector)
6013 goto err_connector_alloc;
f0fec3f2
PZ
6014
6015 intel_encoder = &intel_dig_port->base;
6016 encoder = &intel_encoder->base;
6017
893da0c9 6018 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
ade1ba73 6019 DRM_MODE_ENCODER_TMDS, NULL))
893da0c9 6020 goto err_encoder_init;
f0fec3f2 6021
5bfe2ac0 6022 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 6023 intel_encoder->disable = intel_disable_dp;
00c09d70 6024 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 6025 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 6026 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 6027 if (IS_CHERRYVIEW(dev)) {
9197c88b 6028 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
6029 intel_encoder->pre_enable = chv_pre_enable_dp;
6030 intel_encoder->enable = vlv_enable_dp;
580d3811 6031 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 6032 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
e4a1d846 6033 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 6034 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
6035 intel_encoder->pre_enable = vlv_pre_enable_dp;
6036 intel_encoder->enable = vlv_enable_dp;
49277c31 6037 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 6038 } else {
ecff4f3b
JN
6039 intel_encoder->pre_enable = g4x_pre_enable_dp;
6040 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
6041 if (INTEL_INFO(dev)->gen >= 5)
6042 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 6043 }
f0fec3f2 6044
174edf1f 6045 intel_dig_port->port = port;
0bdf5a05 6046 dev_priv->dig_port_map[port] = intel_encoder;
f0fec3f2 6047 intel_dig_port->dp.output_reg = output_reg;
ccb1a831 6048 intel_dig_port->max_lanes = 4;
f0fec3f2 6049
00c09d70 6050 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
6051 if (IS_CHERRYVIEW(dev)) {
6052 if (port == PORT_D)
6053 intel_encoder->crtc_mask = 1 << 2;
6054 else
6055 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6056 } else {
6057 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6058 }
bc079e8b 6059 intel_encoder->cloneable = 0;
f0fec3f2 6060
13cf5504 6061 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 6062 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 6063
11aee0f6
SM
6064 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6065 goto err_init_connector;
6066
6067 return;
6068
6069err_init_connector:
6070 drm_encoder_cleanup(encoder);
893da0c9 6071err_encoder_init:
11aee0f6
SM
6072 kfree(intel_connector);
6073err_connector_alloc:
6074 kfree(intel_dig_port);
6075
6076 return;
f0fec3f2 6077}
0e32b39c
DA
6078
6079void intel_dp_mst_suspend(struct drm_device *dev)
6080{
6081 struct drm_i915_private *dev_priv = dev->dev_private;
6082 int i;
6083
6084 /* disable MST */
6085 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6086 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6087 if (!intel_dig_port)
6088 continue;
6089
6090 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6091 if (!intel_dig_port->dp.can_mst)
6092 continue;
6093 if (intel_dig_port->dp.is_mst)
6094 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6095 }
6096 }
6097}
6098
6099void intel_dp_mst_resume(struct drm_device *dev)
6100{
6101 struct drm_i915_private *dev_priv = dev->dev_private;
6102 int i;
6103
6104 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6105 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6106 if (!intel_dig_port)
6107 continue;
6108 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6109 int ret;
6110
6111 if (!intel_dig_port->dp.can_mst)
6112 continue;
6113
6114 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6115 if (ret != 0) {
6116 intel_dp_check_mst_status(&intel_dig_port->dp);
6117 }
6118 }
6119 }
6120}
This page took 1.135829 seconds and 5 git commands to generate.