Merge tag 'drm-intel-next-2015-05-08' of git://anongit.freedesktop.org/drm-intel...
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51 int link_bw;
52 struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56 { DP_LINK_BW_1_62,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { DP_LINK_BW_2_7,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63 { DP_LINK_BW_1_62,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { DP_LINK_BW_2_7,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70 { DP_LINK_BW_1_62,
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72 { DP_LINK_BW_2_7,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80 static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int skl_rates[] = { 162000, 216000, 270000,
95 324000, 432000, 540000 };
96 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
97 243000, 270000, 324000, 405000,
98 420000, 432000, 540000 };
99 static const int default_rates[] = { 162000, 270000, 540000 };
100
101 /**
102 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
103 * @intel_dp: DP struct
104 *
105 * If a CPU or PCH DP output is attached to an eDP panel, this function
106 * will return true, and false otherwise.
107 */
108 static bool is_edp(struct intel_dp *intel_dp)
109 {
110 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
111
112 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
113 }
114
115 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
116 {
117 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
118
119 return intel_dig_port->base.base.dev;
120 }
121
122 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
123 {
124 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
125 }
126
127 static void intel_dp_link_down(struct intel_dp *intel_dp);
128 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
129 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
130 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
131 static void vlv_steal_power_sequencer(struct drm_device *dev,
132 enum pipe pipe);
133
134 static int
135 intel_dp_max_link_bw(struct intel_dp *intel_dp)
136 {
137 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
138
139 switch (max_link_bw) {
140 case DP_LINK_BW_1_62:
141 case DP_LINK_BW_2_7:
142 case DP_LINK_BW_5_4:
143 break;
144 default:
145 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
146 max_link_bw);
147 max_link_bw = DP_LINK_BW_1_62;
148 break;
149 }
150 return max_link_bw;
151 }
152
153 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
154 {
155 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
156 struct drm_device *dev = intel_dig_port->base.base.dev;
157 u8 source_max, sink_max;
158
159 source_max = 4;
160 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
161 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
162 source_max = 2;
163
164 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
165
166 return min(source_max, sink_max);
167 }
168
169 /*
170 * The units on the numbers in the next two are... bizarre. Examples will
171 * make it clearer; this one parallels an example in the eDP spec.
172 *
173 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
174 *
175 * 270000 * 1 * 8 / 10 == 216000
176 *
177 * The actual data capacity of that configuration is 2.16Gbit/s, so the
178 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
179 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
180 * 119000. At 18bpp that's 2142000 kilobits per second.
181 *
182 * Thus the strange-looking division by 10 in intel_dp_link_required, to
183 * get the result in decakilobits instead of kilobits.
184 */
185
186 static int
187 intel_dp_link_required(int pixel_clock, int bpp)
188 {
189 return (pixel_clock * bpp + 9) / 10;
190 }
191
192 static int
193 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
194 {
195 return (max_link_clock * max_lanes * 8) / 10;
196 }
197
198 static enum drm_mode_status
199 intel_dp_mode_valid(struct drm_connector *connector,
200 struct drm_display_mode *mode)
201 {
202 struct intel_dp *intel_dp = intel_attached_dp(connector);
203 struct intel_connector *intel_connector = to_intel_connector(connector);
204 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
205 int target_clock = mode->clock;
206 int max_rate, mode_rate, max_lanes, max_link_clock;
207
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
210 return MODE_PANEL;
211
212 if (mode->vdisplay > fixed_mode->vdisplay)
213 return MODE_PANEL;
214
215 target_clock = fixed_mode->clock;
216 }
217
218 max_link_clock = intel_dp_max_link_rate(intel_dp);
219 max_lanes = intel_dp_max_lane_count(intel_dp);
220
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
223
224 if (mode_rate > max_rate)
225 return MODE_CLOCK_HIGH;
226
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
229
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
232
233 return MODE_OK;
234 }
235
236 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
237 {
238 int i;
239 uint32_t v = 0;
240
241 if (src_bytes > 4)
242 src_bytes = 4;
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 return v;
246 }
247
248 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
249 {
250 int i;
251 if (dst_bytes > 4)
252 dst_bytes = 4;
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
255 }
256
257 /* hrawclock is 1/4 the FSB frequency */
258 static int
259 intel_hrawclk(struct drm_device *dev)
260 {
261 struct drm_i915_private *dev_priv = dev->dev_private;
262 uint32_t clkcfg;
263
264 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
265 if (IS_VALLEYVIEW(dev))
266 return 200;
267
268 clkcfg = I915_READ(CLKCFG);
269 switch (clkcfg & CLKCFG_FSB_MASK) {
270 case CLKCFG_FSB_400:
271 return 100;
272 case CLKCFG_FSB_533:
273 return 133;
274 case CLKCFG_FSB_667:
275 return 166;
276 case CLKCFG_FSB_800:
277 return 200;
278 case CLKCFG_FSB_1067:
279 return 266;
280 case CLKCFG_FSB_1333:
281 return 333;
282 /* these two are just a guess; one of them might be right */
283 case CLKCFG_FSB_1600:
284 case CLKCFG_FSB_1600_ALT:
285 return 400;
286 default:
287 return 133;
288 }
289 }
290
291 static void
292 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
293 struct intel_dp *intel_dp);
294 static void
295 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
296 struct intel_dp *intel_dp);
297
298 static void pps_lock(struct intel_dp *intel_dp)
299 {
300 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
301 struct intel_encoder *encoder = &intel_dig_port->base;
302 struct drm_device *dev = encoder->base.dev;
303 struct drm_i915_private *dev_priv = dev->dev_private;
304 enum intel_display_power_domain power_domain;
305
306 /*
307 * See vlv_power_sequencer_reset() why we need
308 * a power domain reference here.
309 */
310 power_domain = intel_display_port_power_domain(encoder);
311 intel_display_power_get(dev_priv, power_domain);
312
313 mutex_lock(&dev_priv->pps_mutex);
314 }
315
316 static void pps_unlock(struct intel_dp *intel_dp)
317 {
318 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
319 struct intel_encoder *encoder = &intel_dig_port->base;
320 struct drm_device *dev = encoder->base.dev;
321 struct drm_i915_private *dev_priv = dev->dev_private;
322 enum intel_display_power_domain power_domain;
323
324 mutex_unlock(&dev_priv->pps_mutex);
325
326 power_domain = intel_display_port_power_domain(encoder);
327 intel_display_power_put(dev_priv, power_domain);
328 }
329
330 static void
331 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
332 {
333 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334 struct drm_device *dev = intel_dig_port->base.base.dev;
335 struct drm_i915_private *dev_priv = dev->dev_private;
336 enum pipe pipe = intel_dp->pps_pipe;
337 bool pll_enabled;
338 uint32_t DP;
339
340 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
341 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
342 pipe_name(pipe), port_name(intel_dig_port->port)))
343 return;
344
345 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
346 pipe_name(pipe), port_name(intel_dig_port->port));
347
348 /* Preserve the BIOS-computed detected bit. This is
349 * supposed to be read-only.
350 */
351 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
352 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
353 DP |= DP_PORT_WIDTH(1);
354 DP |= DP_LINK_TRAIN_PAT_1;
355
356 if (IS_CHERRYVIEW(dev))
357 DP |= DP_PIPE_SELECT_CHV(pipe);
358 else if (pipe == PIPE_B)
359 DP |= DP_PIPEB_SELECT;
360
361 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
362
363 /*
364 * The DPLL for the pipe must be enabled for this to work.
365 * So enable temporarily it if it's not already enabled.
366 */
367 if (!pll_enabled)
368 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
369 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
370
371 /*
372 * Similar magic as in intel_dp_enable_port().
373 * We _must_ do this port enable + disable trick
374 * to make this power seqeuencer lock onto the port.
375 * Otherwise even VDD force bit won't work.
376 */
377 I915_WRITE(intel_dp->output_reg, DP);
378 POSTING_READ(intel_dp->output_reg);
379
380 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
381 POSTING_READ(intel_dp->output_reg);
382
383 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
384 POSTING_READ(intel_dp->output_reg);
385
386 if (!pll_enabled)
387 vlv_force_pll_off(dev, pipe);
388 }
389
390 static enum pipe
391 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
392 {
393 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
394 struct drm_device *dev = intel_dig_port->base.base.dev;
395 struct drm_i915_private *dev_priv = dev->dev_private;
396 struct intel_encoder *encoder;
397 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
398 enum pipe pipe;
399
400 lockdep_assert_held(&dev_priv->pps_mutex);
401
402 /* We should never land here with regular DP ports */
403 WARN_ON(!is_edp(intel_dp));
404
405 if (intel_dp->pps_pipe != INVALID_PIPE)
406 return intel_dp->pps_pipe;
407
408 /*
409 * We don't have power sequencer currently.
410 * Pick one that's not used by other ports.
411 */
412 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
413 base.head) {
414 struct intel_dp *tmp;
415
416 if (encoder->type != INTEL_OUTPUT_EDP)
417 continue;
418
419 tmp = enc_to_intel_dp(&encoder->base);
420
421 if (tmp->pps_pipe != INVALID_PIPE)
422 pipes &= ~(1 << tmp->pps_pipe);
423 }
424
425 /*
426 * Didn't find one. This should not happen since there
427 * are two power sequencers and up to two eDP ports.
428 */
429 if (WARN_ON(pipes == 0))
430 pipe = PIPE_A;
431 else
432 pipe = ffs(pipes) - 1;
433
434 vlv_steal_power_sequencer(dev, pipe);
435 intel_dp->pps_pipe = pipe;
436
437 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
438 pipe_name(intel_dp->pps_pipe),
439 port_name(intel_dig_port->port));
440
441 /* init power sequencer on this pipe and port */
442 intel_dp_init_panel_power_sequencer(dev, intel_dp);
443 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
444
445 /*
446 * Even vdd force doesn't work until we've made
447 * the power sequencer lock in on the port.
448 */
449 vlv_power_sequencer_kick(intel_dp);
450
451 return intel_dp->pps_pipe;
452 }
453
454 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
455 enum pipe pipe);
456
457 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
458 enum pipe pipe)
459 {
460 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
461 }
462
463 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
464 enum pipe pipe)
465 {
466 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
467 }
468
469 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
470 enum pipe pipe)
471 {
472 return true;
473 }
474
475 static enum pipe
476 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
477 enum port port,
478 vlv_pipe_check pipe_check)
479 {
480 enum pipe pipe;
481
482 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
483 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
484 PANEL_PORT_SELECT_MASK;
485
486 if (port_sel != PANEL_PORT_SELECT_VLV(port))
487 continue;
488
489 if (!pipe_check(dev_priv, pipe))
490 continue;
491
492 return pipe;
493 }
494
495 return INVALID_PIPE;
496 }
497
498 static void
499 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
500 {
501 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
502 struct drm_device *dev = intel_dig_port->base.base.dev;
503 struct drm_i915_private *dev_priv = dev->dev_private;
504 enum port port = intel_dig_port->port;
505
506 lockdep_assert_held(&dev_priv->pps_mutex);
507
508 /* try to find a pipe with this port selected */
509 /* first pick one where the panel is on */
510 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
511 vlv_pipe_has_pp_on);
512 /* didn't find one? pick one where vdd is on */
513 if (intel_dp->pps_pipe == INVALID_PIPE)
514 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
515 vlv_pipe_has_vdd_on);
516 /* didn't find one? pick one with just the correct port */
517 if (intel_dp->pps_pipe == INVALID_PIPE)
518 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
519 vlv_pipe_any);
520
521 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
522 if (intel_dp->pps_pipe == INVALID_PIPE) {
523 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
524 port_name(port));
525 return;
526 }
527
528 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
529 port_name(port), pipe_name(intel_dp->pps_pipe));
530
531 intel_dp_init_panel_power_sequencer(dev, intel_dp);
532 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
533 }
534
535 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
536 {
537 struct drm_device *dev = dev_priv->dev;
538 struct intel_encoder *encoder;
539
540 if (WARN_ON(!IS_VALLEYVIEW(dev)))
541 return;
542
543 /*
544 * We can't grab pps_mutex here due to deadlock with power_domain
545 * mutex when power_domain functions are called while holding pps_mutex.
546 * That also means that in order to use pps_pipe the code needs to
547 * hold both a power domain reference and pps_mutex, and the power domain
548 * reference get/put must be done while _not_ holding pps_mutex.
549 * pps_{lock,unlock}() do these steps in the correct order, so one
550 * should use them always.
551 */
552
553 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
554 struct intel_dp *intel_dp;
555
556 if (encoder->type != INTEL_OUTPUT_EDP)
557 continue;
558
559 intel_dp = enc_to_intel_dp(&encoder->base);
560 intel_dp->pps_pipe = INVALID_PIPE;
561 }
562 }
563
564 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
565 {
566 struct drm_device *dev = intel_dp_to_dev(intel_dp);
567
568 if (HAS_PCH_SPLIT(dev))
569 return PCH_PP_CONTROL;
570 else
571 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
572 }
573
574 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
575 {
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
577
578 if (HAS_PCH_SPLIT(dev))
579 return PCH_PP_STATUS;
580 else
581 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
582 }
583
584 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
585 This function only applicable when panel PM state is not to be tracked */
586 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
587 void *unused)
588 {
589 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
590 edp_notifier);
591 struct drm_device *dev = intel_dp_to_dev(intel_dp);
592 struct drm_i915_private *dev_priv = dev->dev_private;
593 u32 pp_div;
594 u32 pp_ctrl_reg, pp_div_reg;
595
596 if (!is_edp(intel_dp) || code != SYS_RESTART)
597 return 0;
598
599 pps_lock(intel_dp);
600
601 if (IS_VALLEYVIEW(dev)) {
602 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
603
604 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
605 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
606 pp_div = I915_READ(pp_div_reg);
607 pp_div &= PP_REFERENCE_DIVIDER_MASK;
608
609 /* 0x1F write to PP_DIV_REG sets max cycle delay */
610 I915_WRITE(pp_div_reg, pp_div | 0x1F);
611 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
612 msleep(intel_dp->panel_power_cycle_delay);
613 }
614
615 pps_unlock(intel_dp);
616
617 return 0;
618 }
619
620 static bool edp_have_panel_power(struct intel_dp *intel_dp)
621 {
622 struct drm_device *dev = intel_dp_to_dev(intel_dp);
623 struct drm_i915_private *dev_priv = dev->dev_private;
624
625 lockdep_assert_held(&dev_priv->pps_mutex);
626
627 if (IS_VALLEYVIEW(dev) &&
628 intel_dp->pps_pipe == INVALID_PIPE)
629 return false;
630
631 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
632 }
633
634 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
635 {
636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
637 struct drm_i915_private *dev_priv = dev->dev_private;
638
639 lockdep_assert_held(&dev_priv->pps_mutex);
640
641 if (IS_VALLEYVIEW(dev) &&
642 intel_dp->pps_pipe == INVALID_PIPE)
643 return false;
644
645 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
646 }
647
648 static void
649 intel_dp_check_edp(struct intel_dp *intel_dp)
650 {
651 struct drm_device *dev = intel_dp_to_dev(intel_dp);
652 struct drm_i915_private *dev_priv = dev->dev_private;
653
654 if (!is_edp(intel_dp))
655 return;
656
657 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
658 WARN(1, "eDP powered off while attempting aux channel communication.\n");
659 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
660 I915_READ(_pp_stat_reg(intel_dp)),
661 I915_READ(_pp_ctrl_reg(intel_dp)));
662 }
663 }
664
665 static uint32_t
666 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
667 {
668 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
669 struct drm_device *dev = intel_dig_port->base.base.dev;
670 struct drm_i915_private *dev_priv = dev->dev_private;
671 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
672 uint32_t status;
673 bool done;
674
675 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
676 if (has_aux_irq)
677 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
678 msecs_to_jiffies_timeout(10));
679 else
680 done = wait_for_atomic(C, 10) == 0;
681 if (!done)
682 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
683 has_aux_irq);
684 #undef C
685
686 return status;
687 }
688
689 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
690 {
691 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692 struct drm_device *dev = intel_dig_port->base.base.dev;
693
694 /*
695 * The clock divider is based off the hrawclk, and would like to run at
696 * 2MHz. So, take the hrawclk value and divide by 2 and use that
697 */
698 return index ? 0 : intel_hrawclk(dev) / 2;
699 }
700
701 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
702 {
703 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
704 struct drm_device *dev = intel_dig_port->base.base.dev;
705 struct drm_i915_private *dev_priv = dev->dev_private;
706
707 if (index)
708 return 0;
709
710 if (intel_dig_port->port == PORT_A) {
711 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
712 } else {
713 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
714 }
715 }
716
717 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
718 {
719 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
720 struct drm_device *dev = intel_dig_port->base.base.dev;
721 struct drm_i915_private *dev_priv = dev->dev_private;
722
723 if (intel_dig_port->port == PORT_A) {
724 if (index)
725 return 0;
726 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
727 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
728 /* Workaround for non-ULT HSW */
729 switch (index) {
730 case 0: return 63;
731 case 1: return 72;
732 default: return 0;
733 }
734 } else {
735 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
736 }
737 }
738
739 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
740 {
741 return index ? 0 : 100;
742 }
743
744 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
745 {
746 /*
747 * SKL doesn't need us to program the AUX clock divider (Hardware will
748 * derive the clock from CDCLK automatically). We still implement the
749 * get_aux_clock_divider vfunc to plug-in into the existing code.
750 */
751 return index ? 0 : 1;
752 }
753
754 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
755 bool has_aux_irq,
756 int send_bytes,
757 uint32_t aux_clock_divider)
758 {
759 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
760 struct drm_device *dev = intel_dig_port->base.base.dev;
761 uint32_t precharge, timeout;
762
763 if (IS_GEN6(dev))
764 precharge = 3;
765 else
766 precharge = 5;
767
768 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
769 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
770 else
771 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
772
773 return DP_AUX_CH_CTL_SEND_BUSY |
774 DP_AUX_CH_CTL_DONE |
775 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
776 DP_AUX_CH_CTL_TIME_OUT_ERROR |
777 timeout |
778 DP_AUX_CH_CTL_RECEIVE_ERROR |
779 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
780 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
781 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
782 }
783
784 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
785 bool has_aux_irq,
786 int send_bytes,
787 uint32_t unused)
788 {
789 return DP_AUX_CH_CTL_SEND_BUSY |
790 DP_AUX_CH_CTL_DONE |
791 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
792 DP_AUX_CH_CTL_TIME_OUT_ERROR |
793 DP_AUX_CH_CTL_TIME_OUT_1600us |
794 DP_AUX_CH_CTL_RECEIVE_ERROR |
795 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
796 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
797 }
798
799 static int
800 intel_dp_aux_ch(struct intel_dp *intel_dp,
801 const uint8_t *send, int send_bytes,
802 uint8_t *recv, int recv_size)
803 {
804 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
805 struct drm_device *dev = intel_dig_port->base.base.dev;
806 struct drm_i915_private *dev_priv = dev->dev_private;
807 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
808 uint32_t ch_data = ch_ctl + 4;
809 uint32_t aux_clock_divider;
810 int i, ret, recv_bytes;
811 uint32_t status;
812 int try, clock = 0;
813 bool has_aux_irq = HAS_AUX_IRQ(dev);
814 bool vdd;
815
816 pps_lock(intel_dp);
817
818 /*
819 * We will be called with VDD already enabled for dpcd/edid/oui reads.
820 * In such cases we want to leave VDD enabled and it's up to upper layers
821 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
822 * ourselves.
823 */
824 vdd = edp_panel_vdd_on(intel_dp);
825
826 /* dp aux is extremely sensitive to irq latency, hence request the
827 * lowest possible wakeup latency and so prevent the cpu from going into
828 * deep sleep states.
829 */
830 pm_qos_update_request(&dev_priv->pm_qos, 0);
831
832 intel_dp_check_edp(intel_dp);
833
834 intel_aux_display_runtime_get(dev_priv);
835
836 /* Try to wait for any previous AUX channel activity */
837 for (try = 0; try < 3; try++) {
838 status = I915_READ_NOTRACE(ch_ctl);
839 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
840 break;
841 msleep(1);
842 }
843
844 if (try == 3) {
845 WARN(1, "dp_aux_ch not started status 0x%08x\n",
846 I915_READ(ch_ctl));
847 ret = -EBUSY;
848 goto out;
849 }
850
851 /* Only 5 data registers! */
852 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
853 ret = -E2BIG;
854 goto out;
855 }
856
857 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
858 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
859 has_aux_irq,
860 send_bytes,
861 aux_clock_divider);
862
863 /* Must try at least 3 times according to DP spec */
864 for (try = 0; try < 5; try++) {
865 /* Load the send data into the aux channel data registers */
866 for (i = 0; i < send_bytes; i += 4)
867 I915_WRITE(ch_data + i,
868 intel_dp_pack_aux(send + i,
869 send_bytes - i));
870
871 /* Send the command and wait for it to complete */
872 I915_WRITE(ch_ctl, send_ctl);
873
874 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
875
876 /* Clear done status and any errors */
877 I915_WRITE(ch_ctl,
878 status |
879 DP_AUX_CH_CTL_DONE |
880 DP_AUX_CH_CTL_TIME_OUT_ERROR |
881 DP_AUX_CH_CTL_RECEIVE_ERROR);
882
883 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
884 continue;
885
886 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
887 * 400us delay required for errors and timeouts
888 * Timeout errors from the HW already meet this
889 * requirement so skip to next iteration
890 */
891 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
892 usleep_range(400, 500);
893 continue;
894 }
895 if (status & DP_AUX_CH_CTL_DONE)
896 break;
897 }
898 if (status & DP_AUX_CH_CTL_DONE)
899 break;
900 }
901
902 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
903 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
904 ret = -EBUSY;
905 goto out;
906 }
907
908 /* Check for timeout or receive error.
909 * Timeouts occur when the sink is not connected
910 */
911 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
912 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
913 ret = -EIO;
914 goto out;
915 }
916
917 /* Timeouts occur when the device isn't connected, so they're
918 * "normal" -- don't fill the kernel log with these */
919 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
920 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
921 ret = -ETIMEDOUT;
922 goto out;
923 }
924
925 /* Unload any bytes sent back from the other side */
926 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
927 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
928 if (recv_bytes > recv_size)
929 recv_bytes = recv_size;
930
931 for (i = 0; i < recv_bytes; i += 4)
932 intel_dp_unpack_aux(I915_READ(ch_data + i),
933 recv + i, recv_bytes - i);
934
935 ret = recv_bytes;
936 out:
937 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
938 intel_aux_display_runtime_put(dev_priv);
939
940 if (vdd)
941 edp_panel_vdd_off(intel_dp, false);
942
943 pps_unlock(intel_dp);
944
945 return ret;
946 }
947
948 #define BARE_ADDRESS_SIZE 3
949 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
950 static ssize_t
951 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
952 {
953 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
954 uint8_t txbuf[20], rxbuf[20];
955 size_t txsize, rxsize;
956 int ret;
957
958 txbuf[0] = (msg->request << 4) |
959 ((msg->address >> 16) & 0xf);
960 txbuf[1] = (msg->address >> 8) & 0xff;
961 txbuf[2] = msg->address & 0xff;
962 txbuf[3] = msg->size - 1;
963
964 switch (msg->request & ~DP_AUX_I2C_MOT) {
965 case DP_AUX_NATIVE_WRITE:
966 case DP_AUX_I2C_WRITE:
967 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
968 rxsize = 2; /* 0 or 1 data bytes */
969
970 if (WARN_ON(txsize > 20))
971 return -E2BIG;
972
973 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
974
975 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976 if (ret > 0) {
977 msg->reply = rxbuf[0] >> 4;
978
979 if (ret > 1) {
980 /* Number of bytes written in a short write. */
981 ret = clamp_t(int, rxbuf[1], 0, msg->size);
982 } else {
983 /* Return payload size. */
984 ret = msg->size;
985 }
986 }
987 break;
988
989 case DP_AUX_NATIVE_READ:
990 case DP_AUX_I2C_READ:
991 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
992 rxsize = msg->size + 1;
993
994 if (WARN_ON(rxsize > 20))
995 return -E2BIG;
996
997 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
998 if (ret > 0) {
999 msg->reply = rxbuf[0] >> 4;
1000 /*
1001 * Assume happy day, and copy the data. The caller is
1002 * expected to check msg->reply before touching it.
1003 *
1004 * Return payload size.
1005 */
1006 ret--;
1007 memcpy(msg->buffer, rxbuf + 1, ret);
1008 }
1009 break;
1010
1011 default:
1012 ret = -EINVAL;
1013 break;
1014 }
1015
1016 return ret;
1017 }
1018
1019 static void
1020 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1021 {
1022 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1023 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1024 enum port port = intel_dig_port->port;
1025 const char *name = NULL;
1026 int ret;
1027
1028 switch (port) {
1029 case PORT_A:
1030 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1031 name = "DPDDC-A";
1032 break;
1033 case PORT_B:
1034 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1035 name = "DPDDC-B";
1036 break;
1037 case PORT_C:
1038 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1039 name = "DPDDC-C";
1040 break;
1041 case PORT_D:
1042 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1043 name = "DPDDC-D";
1044 break;
1045 default:
1046 BUG();
1047 }
1048
1049 /*
1050 * The AUX_CTL register is usually DP_CTL + 0x10.
1051 *
1052 * On Haswell and Broadwell though:
1053 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1054 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1055 *
1056 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1057 */
1058 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1059 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1060
1061 intel_dp->aux.name = name;
1062 intel_dp->aux.dev = dev->dev;
1063 intel_dp->aux.transfer = intel_dp_aux_transfer;
1064
1065 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1066 connector->base.kdev->kobj.name);
1067
1068 ret = drm_dp_aux_register(&intel_dp->aux);
1069 if (ret < 0) {
1070 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1071 name, ret);
1072 return;
1073 }
1074
1075 ret = sysfs_create_link(&connector->base.kdev->kobj,
1076 &intel_dp->aux.ddc.dev.kobj,
1077 intel_dp->aux.ddc.dev.kobj.name);
1078 if (ret < 0) {
1079 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1080 drm_dp_aux_unregister(&intel_dp->aux);
1081 }
1082 }
1083
1084 static void
1085 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1086 {
1087 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1088
1089 if (!intel_connector->mst_port)
1090 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1091 intel_dp->aux.ddc.dev.kobj.name);
1092 intel_connector_unregister(intel_connector);
1093 }
1094
1095 static void
1096 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1097 {
1098 u32 ctrl1;
1099
1100 pipe_config->ddi_pll_sel = SKL_DPLL0;
1101 pipe_config->dpll_hw_state.cfgcr1 = 0;
1102 pipe_config->dpll_hw_state.cfgcr2 = 0;
1103
1104 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1105 switch (link_clock / 2) {
1106 case 81000:
1107 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1108 SKL_DPLL0);
1109 break;
1110 case 135000:
1111 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1112 SKL_DPLL0);
1113 break;
1114 case 270000:
1115 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1116 SKL_DPLL0);
1117 break;
1118 case 162000:
1119 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1120 SKL_DPLL0);
1121 break;
1122 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1123 results in CDCLK change. Need to handle the change of CDCLK by
1124 disabling pipes and re-enabling them */
1125 case 108000:
1126 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1127 SKL_DPLL0);
1128 break;
1129 case 216000:
1130 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1131 SKL_DPLL0);
1132 break;
1133
1134 }
1135 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1136 }
1137
1138 static void
1139 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1140 {
1141 switch (link_bw) {
1142 case DP_LINK_BW_1_62:
1143 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1144 break;
1145 case DP_LINK_BW_2_7:
1146 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1147 break;
1148 case DP_LINK_BW_5_4:
1149 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1150 break;
1151 }
1152 }
1153
1154 static int
1155 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1156 {
1157 if (intel_dp->num_sink_rates) {
1158 *sink_rates = intel_dp->sink_rates;
1159 return intel_dp->num_sink_rates;
1160 }
1161
1162 *sink_rates = default_rates;
1163
1164 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1165 }
1166
1167 static int
1168 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1169 {
1170 if (IS_SKYLAKE(dev)) {
1171 *source_rates = skl_rates;
1172 return ARRAY_SIZE(skl_rates);
1173 } else if (IS_CHERRYVIEW(dev)) {
1174 *source_rates = chv_rates;
1175 return ARRAY_SIZE(chv_rates);
1176 }
1177
1178 *source_rates = default_rates;
1179
1180 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1181 /* WaDisableHBR2:skl */
1182 return (DP_LINK_BW_2_7 >> 3) + 1;
1183 else if (INTEL_INFO(dev)->gen >= 8 ||
1184 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1185 return (DP_LINK_BW_5_4 >> 3) + 1;
1186 else
1187 return (DP_LINK_BW_2_7 >> 3) + 1;
1188 }
1189
1190 static void
1191 intel_dp_set_clock(struct intel_encoder *encoder,
1192 struct intel_crtc_state *pipe_config, int link_bw)
1193 {
1194 struct drm_device *dev = encoder->base.dev;
1195 const struct dp_link_dpll *divisor = NULL;
1196 int i, count = 0;
1197
1198 if (IS_G4X(dev)) {
1199 divisor = gen4_dpll;
1200 count = ARRAY_SIZE(gen4_dpll);
1201 } else if (HAS_PCH_SPLIT(dev)) {
1202 divisor = pch_dpll;
1203 count = ARRAY_SIZE(pch_dpll);
1204 } else if (IS_CHERRYVIEW(dev)) {
1205 divisor = chv_dpll;
1206 count = ARRAY_SIZE(chv_dpll);
1207 } else if (IS_VALLEYVIEW(dev)) {
1208 divisor = vlv_dpll;
1209 count = ARRAY_SIZE(vlv_dpll);
1210 }
1211
1212 if (divisor && count) {
1213 for (i = 0; i < count; i++) {
1214 if (link_bw == divisor[i].link_bw) {
1215 pipe_config->dpll = divisor[i].dpll;
1216 pipe_config->clock_set = true;
1217 break;
1218 }
1219 }
1220 }
1221 }
1222
1223 static int intersect_rates(const int *source_rates, int source_len,
1224 const int *sink_rates, int sink_len,
1225 int *common_rates)
1226 {
1227 int i = 0, j = 0, k = 0;
1228
1229 while (i < source_len && j < sink_len) {
1230 if (source_rates[i] == sink_rates[j]) {
1231 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1232 return k;
1233 common_rates[k] = source_rates[i];
1234 ++k;
1235 ++i;
1236 ++j;
1237 } else if (source_rates[i] < sink_rates[j]) {
1238 ++i;
1239 } else {
1240 ++j;
1241 }
1242 }
1243 return k;
1244 }
1245
1246 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1247 int *common_rates)
1248 {
1249 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1250 const int *source_rates, *sink_rates;
1251 int source_len, sink_len;
1252
1253 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1254 source_len = intel_dp_source_rates(dev, &source_rates);
1255
1256 return intersect_rates(source_rates, source_len,
1257 sink_rates, sink_len,
1258 common_rates);
1259 }
1260
1261 static void snprintf_int_array(char *str, size_t len,
1262 const int *array, int nelem)
1263 {
1264 int i;
1265
1266 str[0] = '\0';
1267
1268 for (i = 0; i < nelem; i++) {
1269 int r = snprintf(str, len, "%d,", array[i]);
1270 if (r >= len)
1271 return;
1272 str += r;
1273 len -= r;
1274 }
1275 }
1276
1277 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1278 {
1279 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1280 const int *source_rates, *sink_rates;
1281 int source_len, sink_len, common_len;
1282 int common_rates[DP_MAX_SUPPORTED_RATES];
1283 char str[128]; /* FIXME: too big for stack? */
1284
1285 if ((drm_debug & DRM_UT_KMS) == 0)
1286 return;
1287
1288 source_len = intel_dp_source_rates(dev, &source_rates);
1289 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1290 DRM_DEBUG_KMS("source rates: %s\n", str);
1291
1292 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1293 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1294 DRM_DEBUG_KMS("sink rates: %s\n", str);
1295
1296 common_len = intel_dp_common_rates(intel_dp, common_rates);
1297 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1298 DRM_DEBUG_KMS("common rates: %s\n", str);
1299 }
1300
1301 static int rate_to_index(int find, const int *rates)
1302 {
1303 int i = 0;
1304
1305 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1306 if (find == rates[i])
1307 break;
1308
1309 return i;
1310 }
1311
1312 int
1313 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1314 {
1315 int rates[DP_MAX_SUPPORTED_RATES] = {};
1316 int len;
1317
1318 len = intel_dp_common_rates(intel_dp, rates);
1319 if (WARN_ON(len <= 0))
1320 return 162000;
1321
1322 return rates[rate_to_index(0, rates) - 1];
1323 }
1324
1325 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1326 {
1327 return rate_to_index(rate, intel_dp->sink_rates);
1328 }
1329
1330 bool
1331 intel_dp_compute_config(struct intel_encoder *encoder,
1332 struct intel_crtc_state *pipe_config)
1333 {
1334 struct drm_device *dev = encoder->base.dev;
1335 struct drm_i915_private *dev_priv = dev->dev_private;
1336 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1337 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1338 enum port port = dp_to_dig_port(intel_dp)->port;
1339 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1340 struct intel_connector *intel_connector = intel_dp->attached_connector;
1341 int lane_count, clock;
1342 int min_lane_count = 1;
1343 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1344 /* Conveniently, the link BW constants become indices with a shift...*/
1345 int min_clock = 0;
1346 int max_clock;
1347 int bpp, mode_rate;
1348 int link_avail, link_clock;
1349 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1350 int common_len;
1351
1352 common_len = intel_dp_common_rates(intel_dp, common_rates);
1353
1354 /* No common link rates between source and sink */
1355 WARN_ON(common_len <= 0);
1356
1357 max_clock = common_len - 1;
1358
1359 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1360 pipe_config->has_pch_encoder = true;
1361
1362 pipe_config->has_dp_encoder = true;
1363 pipe_config->has_drrs = false;
1364 pipe_config->has_audio = intel_dp->has_audio;
1365
1366 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1367 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1368 adjusted_mode);
1369
1370 if (INTEL_INFO(dev)->gen >= 9) {
1371 int ret;
1372 ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1373 if (ret)
1374 return ret;
1375 }
1376
1377 if (!HAS_PCH_SPLIT(dev))
1378 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1379 intel_connector->panel.fitting_mode);
1380 else
1381 intel_pch_panel_fitting(intel_crtc, pipe_config,
1382 intel_connector->panel.fitting_mode);
1383 }
1384
1385 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1386 return false;
1387
1388 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1389 "max bw %d pixel clock %iKHz\n",
1390 max_lane_count, common_rates[max_clock],
1391 adjusted_mode->crtc_clock);
1392
1393 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1394 * bpc in between. */
1395 bpp = pipe_config->pipe_bpp;
1396 if (is_edp(intel_dp)) {
1397 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1398 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1399 dev_priv->vbt.edp_bpp);
1400 bpp = dev_priv->vbt.edp_bpp;
1401 }
1402
1403 /*
1404 * Use the maximum clock and number of lanes the eDP panel
1405 * advertizes being capable of. The panels are generally
1406 * designed to support only a single clock and lane
1407 * configuration, and typically these values correspond to the
1408 * native resolution of the panel.
1409 */
1410 min_lane_count = max_lane_count;
1411 min_clock = max_clock;
1412 }
1413
1414 for (; bpp >= 6*3; bpp -= 2*3) {
1415 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1416 bpp);
1417
1418 for (clock = min_clock; clock <= max_clock; clock++) {
1419 for (lane_count = min_lane_count;
1420 lane_count <= max_lane_count;
1421 lane_count <<= 1) {
1422
1423 link_clock = common_rates[clock];
1424 link_avail = intel_dp_max_data_rate(link_clock,
1425 lane_count);
1426
1427 if (mode_rate <= link_avail) {
1428 goto found;
1429 }
1430 }
1431 }
1432 }
1433
1434 return false;
1435
1436 found:
1437 if (intel_dp->color_range_auto) {
1438 /*
1439 * See:
1440 * CEA-861-E - 5.1 Default Encoding Parameters
1441 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1442 */
1443 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1444 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1445 else
1446 intel_dp->color_range = 0;
1447 }
1448
1449 if (intel_dp->color_range)
1450 pipe_config->limited_color_range = true;
1451
1452 intel_dp->lane_count = lane_count;
1453
1454 if (intel_dp->num_sink_rates) {
1455 intel_dp->link_bw = 0;
1456 intel_dp->rate_select =
1457 intel_dp_rate_select(intel_dp, common_rates[clock]);
1458 } else {
1459 intel_dp->link_bw =
1460 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1461 intel_dp->rate_select = 0;
1462 }
1463
1464 pipe_config->pipe_bpp = bpp;
1465 pipe_config->port_clock = common_rates[clock];
1466
1467 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1468 intel_dp->link_bw, intel_dp->lane_count,
1469 pipe_config->port_clock, bpp);
1470 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1471 mode_rate, link_avail);
1472
1473 intel_link_compute_m_n(bpp, lane_count,
1474 adjusted_mode->crtc_clock,
1475 pipe_config->port_clock,
1476 &pipe_config->dp_m_n);
1477
1478 if (intel_connector->panel.downclock_mode != NULL &&
1479 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1480 pipe_config->has_drrs = true;
1481 intel_link_compute_m_n(bpp, lane_count,
1482 intel_connector->panel.downclock_mode->clock,
1483 pipe_config->port_clock,
1484 &pipe_config->dp_m2_n2);
1485 }
1486
1487 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1488 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1489 else if (IS_BROXTON(dev))
1490 /* handled in ddi */;
1491 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1492 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1493 else
1494 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1495
1496 return true;
1497 }
1498
1499 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1500 {
1501 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1502 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1503 struct drm_device *dev = crtc->base.dev;
1504 struct drm_i915_private *dev_priv = dev->dev_private;
1505 u32 dpa_ctl;
1506
1507 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1508 crtc->config->port_clock);
1509 dpa_ctl = I915_READ(DP_A);
1510 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1511
1512 if (crtc->config->port_clock == 162000) {
1513 /* For a long time we've carried around a ILK-DevA w/a for the
1514 * 160MHz clock. If we're really unlucky, it's still required.
1515 */
1516 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1517 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1518 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1519 } else {
1520 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1521 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1522 }
1523
1524 I915_WRITE(DP_A, dpa_ctl);
1525
1526 POSTING_READ(DP_A);
1527 udelay(500);
1528 }
1529
1530 static void intel_dp_prepare(struct intel_encoder *encoder)
1531 {
1532 struct drm_device *dev = encoder->base.dev;
1533 struct drm_i915_private *dev_priv = dev->dev_private;
1534 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1535 enum port port = dp_to_dig_port(intel_dp)->port;
1536 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1537 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1538
1539 /*
1540 * There are four kinds of DP registers:
1541 *
1542 * IBX PCH
1543 * SNB CPU
1544 * IVB CPU
1545 * CPT PCH
1546 *
1547 * IBX PCH and CPU are the same for almost everything,
1548 * except that the CPU DP PLL is configured in this
1549 * register
1550 *
1551 * CPT PCH is quite different, having many bits moved
1552 * to the TRANS_DP_CTL register instead. That
1553 * configuration happens (oddly) in ironlake_pch_enable
1554 */
1555
1556 /* Preserve the BIOS-computed detected bit. This is
1557 * supposed to be read-only.
1558 */
1559 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1560
1561 /* Handle DP bits in common between all three register formats */
1562 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1563 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1564
1565 if (crtc->config->has_audio)
1566 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1567
1568 /* Split out the IBX/CPU vs CPT settings */
1569
1570 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1571 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1572 intel_dp->DP |= DP_SYNC_HS_HIGH;
1573 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1574 intel_dp->DP |= DP_SYNC_VS_HIGH;
1575 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1576
1577 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1578 intel_dp->DP |= DP_ENHANCED_FRAMING;
1579
1580 intel_dp->DP |= crtc->pipe << 29;
1581 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1582 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1583 intel_dp->DP |= intel_dp->color_range;
1584
1585 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1586 intel_dp->DP |= DP_SYNC_HS_HIGH;
1587 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1588 intel_dp->DP |= DP_SYNC_VS_HIGH;
1589 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1590
1591 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1592 intel_dp->DP |= DP_ENHANCED_FRAMING;
1593
1594 if (!IS_CHERRYVIEW(dev)) {
1595 if (crtc->pipe == 1)
1596 intel_dp->DP |= DP_PIPEB_SELECT;
1597 } else {
1598 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1599 }
1600 } else {
1601 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1602 }
1603 }
1604
1605 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1606 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1607
1608 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1609 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1610
1611 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1612 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1613
1614 static void wait_panel_status(struct intel_dp *intel_dp,
1615 u32 mask,
1616 u32 value)
1617 {
1618 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1619 struct drm_i915_private *dev_priv = dev->dev_private;
1620 u32 pp_stat_reg, pp_ctrl_reg;
1621
1622 lockdep_assert_held(&dev_priv->pps_mutex);
1623
1624 pp_stat_reg = _pp_stat_reg(intel_dp);
1625 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1626
1627 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1628 mask, value,
1629 I915_READ(pp_stat_reg),
1630 I915_READ(pp_ctrl_reg));
1631
1632 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1633 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1634 I915_READ(pp_stat_reg),
1635 I915_READ(pp_ctrl_reg));
1636 }
1637
1638 DRM_DEBUG_KMS("Wait complete\n");
1639 }
1640
1641 static void wait_panel_on(struct intel_dp *intel_dp)
1642 {
1643 DRM_DEBUG_KMS("Wait for panel power on\n");
1644 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1645 }
1646
1647 static void wait_panel_off(struct intel_dp *intel_dp)
1648 {
1649 DRM_DEBUG_KMS("Wait for panel power off time\n");
1650 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1651 }
1652
1653 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1654 {
1655 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1656
1657 /* When we disable the VDD override bit last we have to do the manual
1658 * wait. */
1659 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1660 intel_dp->panel_power_cycle_delay);
1661
1662 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1663 }
1664
1665 static void wait_backlight_on(struct intel_dp *intel_dp)
1666 {
1667 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1668 intel_dp->backlight_on_delay);
1669 }
1670
1671 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1672 {
1673 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1674 intel_dp->backlight_off_delay);
1675 }
1676
1677 /* Read the current pp_control value, unlocking the register if it
1678 * is locked
1679 */
1680
1681 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1682 {
1683 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1684 struct drm_i915_private *dev_priv = dev->dev_private;
1685 u32 control;
1686
1687 lockdep_assert_held(&dev_priv->pps_mutex);
1688
1689 control = I915_READ(_pp_ctrl_reg(intel_dp));
1690 control &= ~PANEL_UNLOCK_MASK;
1691 control |= PANEL_UNLOCK_REGS;
1692 return control;
1693 }
1694
1695 /*
1696 * Must be paired with edp_panel_vdd_off().
1697 * Must hold pps_mutex around the whole on/off sequence.
1698 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1699 */
1700 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1701 {
1702 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1703 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1704 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1705 struct drm_i915_private *dev_priv = dev->dev_private;
1706 enum intel_display_power_domain power_domain;
1707 u32 pp;
1708 u32 pp_stat_reg, pp_ctrl_reg;
1709 bool need_to_disable = !intel_dp->want_panel_vdd;
1710
1711 lockdep_assert_held(&dev_priv->pps_mutex);
1712
1713 if (!is_edp(intel_dp))
1714 return false;
1715
1716 cancel_delayed_work(&intel_dp->panel_vdd_work);
1717 intel_dp->want_panel_vdd = true;
1718
1719 if (edp_have_panel_vdd(intel_dp))
1720 return need_to_disable;
1721
1722 power_domain = intel_display_port_power_domain(intel_encoder);
1723 intel_display_power_get(dev_priv, power_domain);
1724
1725 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1726 port_name(intel_dig_port->port));
1727
1728 if (!edp_have_panel_power(intel_dp))
1729 wait_panel_power_cycle(intel_dp);
1730
1731 pp = ironlake_get_pp_control(intel_dp);
1732 pp |= EDP_FORCE_VDD;
1733
1734 pp_stat_reg = _pp_stat_reg(intel_dp);
1735 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1736
1737 I915_WRITE(pp_ctrl_reg, pp);
1738 POSTING_READ(pp_ctrl_reg);
1739 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1740 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1741 /*
1742 * If the panel wasn't on, delay before accessing aux channel
1743 */
1744 if (!edp_have_panel_power(intel_dp)) {
1745 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1746 port_name(intel_dig_port->port));
1747 msleep(intel_dp->panel_power_up_delay);
1748 }
1749
1750 return need_to_disable;
1751 }
1752
1753 /*
1754 * Must be paired with intel_edp_panel_vdd_off() or
1755 * intel_edp_panel_off().
1756 * Nested calls to these functions are not allowed since
1757 * we drop the lock. Caller must use some higher level
1758 * locking to prevent nested calls from other threads.
1759 */
1760 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1761 {
1762 bool vdd;
1763
1764 if (!is_edp(intel_dp))
1765 return;
1766
1767 pps_lock(intel_dp);
1768 vdd = edp_panel_vdd_on(intel_dp);
1769 pps_unlock(intel_dp);
1770
1771 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1772 port_name(dp_to_dig_port(intel_dp)->port));
1773 }
1774
1775 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1776 {
1777 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1778 struct drm_i915_private *dev_priv = dev->dev_private;
1779 struct intel_digital_port *intel_dig_port =
1780 dp_to_dig_port(intel_dp);
1781 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1782 enum intel_display_power_domain power_domain;
1783 u32 pp;
1784 u32 pp_stat_reg, pp_ctrl_reg;
1785
1786 lockdep_assert_held(&dev_priv->pps_mutex);
1787
1788 WARN_ON(intel_dp->want_panel_vdd);
1789
1790 if (!edp_have_panel_vdd(intel_dp))
1791 return;
1792
1793 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1794 port_name(intel_dig_port->port));
1795
1796 pp = ironlake_get_pp_control(intel_dp);
1797 pp &= ~EDP_FORCE_VDD;
1798
1799 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1800 pp_stat_reg = _pp_stat_reg(intel_dp);
1801
1802 I915_WRITE(pp_ctrl_reg, pp);
1803 POSTING_READ(pp_ctrl_reg);
1804
1805 /* Make sure sequencer is idle before allowing subsequent activity */
1806 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1807 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1808
1809 if ((pp & POWER_TARGET_ON) == 0)
1810 intel_dp->last_power_cycle = jiffies;
1811
1812 power_domain = intel_display_port_power_domain(intel_encoder);
1813 intel_display_power_put(dev_priv, power_domain);
1814 }
1815
1816 static void edp_panel_vdd_work(struct work_struct *__work)
1817 {
1818 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1819 struct intel_dp, panel_vdd_work);
1820
1821 pps_lock(intel_dp);
1822 if (!intel_dp->want_panel_vdd)
1823 edp_panel_vdd_off_sync(intel_dp);
1824 pps_unlock(intel_dp);
1825 }
1826
1827 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1828 {
1829 unsigned long delay;
1830
1831 /*
1832 * Queue the timer to fire a long time from now (relative to the power
1833 * down delay) to keep the panel power up across a sequence of
1834 * operations.
1835 */
1836 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1837 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1838 }
1839
1840 /*
1841 * Must be paired with edp_panel_vdd_on().
1842 * Must hold pps_mutex around the whole on/off sequence.
1843 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1844 */
1845 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1846 {
1847 struct drm_i915_private *dev_priv =
1848 intel_dp_to_dev(intel_dp)->dev_private;
1849
1850 lockdep_assert_held(&dev_priv->pps_mutex);
1851
1852 if (!is_edp(intel_dp))
1853 return;
1854
1855 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1856 port_name(dp_to_dig_port(intel_dp)->port));
1857
1858 intel_dp->want_panel_vdd = false;
1859
1860 if (sync)
1861 edp_panel_vdd_off_sync(intel_dp);
1862 else
1863 edp_panel_vdd_schedule_off(intel_dp);
1864 }
1865
1866 static void edp_panel_on(struct intel_dp *intel_dp)
1867 {
1868 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1869 struct drm_i915_private *dev_priv = dev->dev_private;
1870 u32 pp;
1871 u32 pp_ctrl_reg;
1872
1873 lockdep_assert_held(&dev_priv->pps_mutex);
1874
1875 if (!is_edp(intel_dp))
1876 return;
1877
1878 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1879 port_name(dp_to_dig_port(intel_dp)->port));
1880
1881 if (WARN(edp_have_panel_power(intel_dp),
1882 "eDP port %c panel power already on\n",
1883 port_name(dp_to_dig_port(intel_dp)->port)))
1884 return;
1885
1886 wait_panel_power_cycle(intel_dp);
1887
1888 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1889 pp = ironlake_get_pp_control(intel_dp);
1890 if (IS_GEN5(dev)) {
1891 /* ILK workaround: disable reset around power sequence */
1892 pp &= ~PANEL_POWER_RESET;
1893 I915_WRITE(pp_ctrl_reg, pp);
1894 POSTING_READ(pp_ctrl_reg);
1895 }
1896
1897 pp |= POWER_TARGET_ON;
1898 if (!IS_GEN5(dev))
1899 pp |= PANEL_POWER_RESET;
1900
1901 I915_WRITE(pp_ctrl_reg, pp);
1902 POSTING_READ(pp_ctrl_reg);
1903
1904 wait_panel_on(intel_dp);
1905 intel_dp->last_power_on = jiffies;
1906
1907 if (IS_GEN5(dev)) {
1908 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1909 I915_WRITE(pp_ctrl_reg, pp);
1910 POSTING_READ(pp_ctrl_reg);
1911 }
1912 }
1913
1914 void intel_edp_panel_on(struct intel_dp *intel_dp)
1915 {
1916 if (!is_edp(intel_dp))
1917 return;
1918
1919 pps_lock(intel_dp);
1920 edp_panel_on(intel_dp);
1921 pps_unlock(intel_dp);
1922 }
1923
1924
1925 static void edp_panel_off(struct intel_dp *intel_dp)
1926 {
1927 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1928 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1929 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1930 struct drm_i915_private *dev_priv = dev->dev_private;
1931 enum intel_display_power_domain power_domain;
1932 u32 pp;
1933 u32 pp_ctrl_reg;
1934
1935 lockdep_assert_held(&dev_priv->pps_mutex);
1936
1937 if (!is_edp(intel_dp))
1938 return;
1939
1940 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1941 port_name(dp_to_dig_port(intel_dp)->port));
1942
1943 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1944 port_name(dp_to_dig_port(intel_dp)->port));
1945
1946 pp = ironlake_get_pp_control(intel_dp);
1947 /* We need to switch off panel power _and_ force vdd, for otherwise some
1948 * panels get very unhappy and cease to work. */
1949 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1950 EDP_BLC_ENABLE);
1951
1952 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1953
1954 intel_dp->want_panel_vdd = false;
1955
1956 I915_WRITE(pp_ctrl_reg, pp);
1957 POSTING_READ(pp_ctrl_reg);
1958
1959 intel_dp->last_power_cycle = jiffies;
1960 wait_panel_off(intel_dp);
1961
1962 /* We got a reference when we enabled the VDD. */
1963 power_domain = intel_display_port_power_domain(intel_encoder);
1964 intel_display_power_put(dev_priv, power_domain);
1965 }
1966
1967 void intel_edp_panel_off(struct intel_dp *intel_dp)
1968 {
1969 if (!is_edp(intel_dp))
1970 return;
1971
1972 pps_lock(intel_dp);
1973 edp_panel_off(intel_dp);
1974 pps_unlock(intel_dp);
1975 }
1976
1977 /* Enable backlight in the panel power control. */
1978 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1979 {
1980 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1981 struct drm_device *dev = intel_dig_port->base.base.dev;
1982 struct drm_i915_private *dev_priv = dev->dev_private;
1983 u32 pp;
1984 u32 pp_ctrl_reg;
1985
1986 /*
1987 * If we enable the backlight right away following a panel power
1988 * on, we may see slight flicker as the panel syncs with the eDP
1989 * link. So delay a bit to make sure the image is solid before
1990 * allowing it to appear.
1991 */
1992 wait_backlight_on(intel_dp);
1993
1994 pps_lock(intel_dp);
1995
1996 pp = ironlake_get_pp_control(intel_dp);
1997 pp |= EDP_BLC_ENABLE;
1998
1999 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2000
2001 I915_WRITE(pp_ctrl_reg, pp);
2002 POSTING_READ(pp_ctrl_reg);
2003
2004 pps_unlock(intel_dp);
2005 }
2006
2007 /* Enable backlight PWM and backlight PP control. */
2008 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2009 {
2010 if (!is_edp(intel_dp))
2011 return;
2012
2013 DRM_DEBUG_KMS("\n");
2014
2015 intel_panel_enable_backlight(intel_dp->attached_connector);
2016 _intel_edp_backlight_on(intel_dp);
2017 }
2018
2019 /* Disable backlight in the panel power control. */
2020 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2021 {
2022 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2023 struct drm_i915_private *dev_priv = dev->dev_private;
2024 u32 pp;
2025 u32 pp_ctrl_reg;
2026
2027 if (!is_edp(intel_dp))
2028 return;
2029
2030 pps_lock(intel_dp);
2031
2032 pp = ironlake_get_pp_control(intel_dp);
2033 pp &= ~EDP_BLC_ENABLE;
2034
2035 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2036
2037 I915_WRITE(pp_ctrl_reg, pp);
2038 POSTING_READ(pp_ctrl_reg);
2039
2040 pps_unlock(intel_dp);
2041
2042 intel_dp->last_backlight_off = jiffies;
2043 edp_wait_backlight_off(intel_dp);
2044 }
2045
2046 /* Disable backlight PP control and backlight PWM. */
2047 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2048 {
2049 if (!is_edp(intel_dp))
2050 return;
2051
2052 DRM_DEBUG_KMS("\n");
2053
2054 _intel_edp_backlight_off(intel_dp);
2055 intel_panel_disable_backlight(intel_dp->attached_connector);
2056 }
2057
2058 /*
2059 * Hook for controlling the panel power control backlight through the bl_power
2060 * sysfs attribute. Take care to handle multiple calls.
2061 */
2062 static void intel_edp_backlight_power(struct intel_connector *connector,
2063 bool enable)
2064 {
2065 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2066 bool is_enabled;
2067
2068 pps_lock(intel_dp);
2069 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2070 pps_unlock(intel_dp);
2071
2072 if (is_enabled == enable)
2073 return;
2074
2075 DRM_DEBUG_KMS("panel power control backlight %s\n",
2076 enable ? "enable" : "disable");
2077
2078 if (enable)
2079 _intel_edp_backlight_on(intel_dp);
2080 else
2081 _intel_edp_backlight_off(intel_dp);
2082 }
2083
2084 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2085 {
2086 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2087 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2088 struct drm_device *dev = crtc->dev;
2089 struct drm_i915_private *dev_priv = dev->dev_private;
2090 u32 dpa_ctl;
2091
2092 assert_pipe_disabled(dev_priv,
2093 to_intel_crtc(crtc)->pipe);
2094
2095 DRM_DEBUG_KMS("\n");
2096 dpa_ctl = I915_READ(DP_A);
2097 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2098 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2099
2100 /* We don't adjust intel_dp->DP while tearing down the link, to
2101 * facilitate link retraining (e.g. after hotplug). Hence clear all
2102 * enable bits here to ensure that we don't enable too much. */
2103 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2104 intel_dp->DP |= DP_PLL_ENABLE;
2105 I915_WRITE(DP_A, intel_dp->DP);
2106 POSTING_READ(DP_A);
2107 udelay(200);
2108 }
2109
2110 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2111 {
2112 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2113 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2114 struct drm_device *dev = crtc->dev;
2115 struct drm_i915_private *dev_priv = dev->dev_private;
2116 u32 dpa_ctl;
2117
2118 assert_pipe_disabled(dev_priv,
2119 to_intel_crtc(crtc)->pipe);
2120
2121 dpa_ctl = I915_READ(DP_A);
2122 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2123 "dp pll off, should be on\n");
2124 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2125
2126 /* We can't rely on the value tracked for the DP register in
2127 * intel_dp->DP because link_down must not change that (otherwise link
2128 * re-training will fail. */
2129 dpa_ctl &= ~DP_PLL_ENABLE;
2130 I915_WRITE(DP_A, dpa_ctl);
2131 POSTING_READ(DP_A);
2132 udelay(200);
2133 }
2134
2135 /* If the sink supports it, try to set the power state appropriately */
2136 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2137 {
2138 int ret, i;
2139
2140 /* Should have a valid DPCD by this point */
2141 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2142 return;
2143
2144 if (mode != DRM_MODE_DPMS_ON) {
2145 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2146 DP_SET_POWER_D3);
2147 } else {
2148 /*
2149 * When turning on, we need to retry for 1ms to give the sink
2150 * time to wake up.
2151 */
2152 for (i = 0; i < 3; i++) {
2153 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2154 DP_SET_POWER_D0);
2155 if (ret == 1)
2156 break;
2157 msleep(1);
2158 }
2159 }
2160
2161 if (ret != 1)
2162 DRM_DEBUG_KMS("failed to %s sink power state\n",
2163 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2164 }
2165
2166 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2167 enum pipe *pipe)
2168 {
2169 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2170 enum port port = dp_to_dig_port(intel_dp)->port;
2171 struct drm_device *dev = encoder->base.dev;
2172 struct drm_i915_private *dev_priv = dev->dev_private;
2173 enum intel_display_power_domain power_domain;
2174 u32 tmp;
2175
2176 power_domain = intel_display_port_power_domain(encoder);
2177 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2178 return false;
2179
2180 tmp = I915_READ(intel_dp->output_reg);
2181
2182 if (!(tmp & DP_PORT_EN))
2183 return false;
2184
2185 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2186 *pipe = PORT_TO_PIPE_CPT(tmp);
2187 } else if (IS_CHERRYVIEW(dev)) {
2188 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2189 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2190 *pipe = PORT_TO_PIPE(tmp);
2191 } else {
2192 u32 trans_sel;
2193 u32 trans_dp;
2194 int i;
2195
2196 switch (intel_dp->output_reg) {
2197 case PCH_DP_B:
2198 trans_sel = TRANS_DP_PORT_SEL_B;
2199 break;
2200 case PCH_DP_C:
2201 trans_sel = TRANS_DP_PORT_SEL_C;
2202 break;
2203 case PCH_DP_D:
2204 trans_sel = TRANS_DP_PORT_SEL_D;
2205 break;
2206 default:
2207 return true;
2208 }
2209
2210 for_each_pipe(dev_priv, i) {
2211 trans_dp = I915_READ(TRANS_DP_CTL(i));
2212 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2213 *pipe = i;
2214 return true;
2215 }
2216 }
2217
2218 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2219 intel_dp->output_reg);
2220 }
2221
2222 return true;
2223 }
2224
2225 static void intel_dp_get_config(struct intel_encoder *encoder,
2226 struct intel_crtc_state *pipe_config)
2227 {
2228 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2229 u32 tmp, flags = 0;
2230 struct drm_device *dev = encoder->base.dev;
2231 struct drm_i915_private *dev_priv = dev->dev_private;
2232 enum port port = dp_to_dig_port(intel_dp)->port;
2233 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2234 int dotclock;
2235
2236 tmp = I915_READ(intel_dp->output_reg);
2237 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2238 pipe_config->has_audio = true;
2239
2240 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2241 if (tmp & DP_SYNC_HS_HIGH)
2242 flags |= DRM_MODE_FLAG_PHSYNC;
2243 else
2244 flags |= DRM_MODE_FLAG_NHSYNC;
2245
2246 if (tmp & DP_SYNC_VS_HIGH)
2247 flags |= DRM_MODE_FLAG_PVSYNC;
2248 else
2249 flags |= DRM_MODE_FLAG_NVSYNC;
2250 } else {
2251 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2252 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2253 flags |= DRM_MODE_FLAG_PHSYNC;
2254 else
2255 flags |= DRM_MODE_FLAG_NHSYNC;
2256
2257 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2258 flags |= DRM_MODE_FLAG_PVSYNC;
2259 else
2260 flags |= DRM_MODE_FLAG_NVSYNC;
2261 }
2262
2263 pipe_config->base.adjusted_mode.flags |= flags;
2264
2265 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2266 tmp & DP_COLOR_RANGE_16_235)
2267 pipe_config->limited_color_range = true;
2268
2269 pipe_config->has_dp_encoder = true;
2270
2271 intel_dp_get_m_n(crtc, pipe_config);
2272
2273 if (port == PORT_A) {
2274 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2275 pipe_config->port_clock = 162000;
2276 else
2277 pipe_config->port_clock = 270000;
2278 }
2279
2280 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2281 &pipe_config->dp_m_n);
2282
2283 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2284 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2285
2286 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2287
2288 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2289 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2290 /*
2291 * This is a big fat ugly hack.
2292 *
2293 * Some machines in UEFI boot mode provide us a VBT that has 18
2294 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2295 * unknown we fail to light up. Yet the same BIOS boots up with
2296 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2297 * max, not what it tells us to use.
2298 *
2299 * Note: This will still be broken if the eDP panel is not lit
2300 * up by the BIOS, and thus we can't get the mode at module
2301 * load.
2302 */
2303 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2304 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2305 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2306 }
2307 }
2308
2309 static void intel_disable_dp(struct intel_encoder *encoder)
2310 {
2311 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2312 struct drm_device *dev = encoder->base.dev;
2313 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2314
2315 if (crtc->config->has_audio)
2316 intel_audio_codec_disable(encoder);
2317
2318 if (HAS_PSR(dev) && !HAS_DDI(dev))
2319 intel_psr_disable(intel_dp);
2320
2321 /* Make sure the panel is off before trying to change the mode. But also
2322 * ensure that we have vdd while we switch off the panel. */
2323 intel_edp_panel_vdd_on(intel_dp);
2324 intel_edp_backlight_off(intel_dp);
2325 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2326 intel_edp_panel_off(intel_dp);
2327
2328 /* disable the port before the pipe on g4x */
2329 if (INTEL_INFO(dev)->gen < 5)
2330 intel_dp_link_down(intel_dp);
2331 }
2332
2333 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2334 {
2335 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2336 enum port port = dp_to_dig_port(intel_dp)->port;
2337
2338 intel_dp_link_down(intel_dp);
2339 if (port == PORT_A)
2340 ironlake_edp_pll_off(intel_dp);
2341 }
2342
2343 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2344 {
2345 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2346
2347 intel_dp_link_down(intel_dp);
2348 }
2349
2350 static void chv_post_disable_dp(struct intel_encoder *encoder)
2351 {
2352 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2353 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2354 struct drm_device *dev = encoder->base.dev;
2355 struct drm_i915_private *dev_priv = dev->dev_private;
2356 struct intel_crtc *intel_crtc =
2357 to_intel_crtc(encoder->base.crtc);
2358 enum dpio_channel ch = vlv_dport_to_channel(dport);
2359 enum pipe pipe = intel_crtc->pipe;
2360 u32 val;
2361
2362 intel_dp_link_down(intel_dp);
2363
2364 mutex_lock(&dev_priv->dpio_lock);
2365
2366 /* Propagate soft reset to data lane reset */
2367 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2368 val |= CHV_PCS_REQ_SOFTRESET_EN;
2369 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2370
2371 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2372 val |= CHV_PCS_REQ_SOFTRESET_EN;
2373 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2374
2375 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2376 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2377 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2378
2379 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2380 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2381 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2382
2383 mutex_unlock(&dev_priv->dpio_lock);
2384 }
2385
2386 static void
2387 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2388 uint32_t *DP,
2389 uint8_t dp_train_pat)
2390 {
2391 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2392 struct drm_device *dev = intel_dig_port->base.base.dev;
2393 struct drm_i915_private *dev_priv = dev->dev_private;
2394 enum port port = intel_dig_port->port;
2395
2396 if (HAS_DDI(dev)) {
2397 uint32_t temp = I915_READ(DP_TP_CTL(port));
2398
2399 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2400 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2401 else
2402 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2403
2404 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2405 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2406 case DP_TRAINING_PATTERN_DISABLE:
2407 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2408
2409 break;
2410 case DP_TRAINING_PATTERN_1:
2411 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2412 break;
2413 case DP_TRAINING_PATTERN_2:
2414 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2415 break;
2416 case DP_TRAINING_PATTERN_3:
2417 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2418 break;
2419 }
2420 I915_WRITE(DP_TP_CTL(port), temp);
2421
2422 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2423 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2424
2425 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2426 case DP_TRAINING_PATTERN_DISABLE:
2427 *DP |= DP_LINK_TRAIN_OFF_CPT;
2428 break;
2429 case DP_TRAINING_PATTERN_1:
2430 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2431 break;
2432 case DP_TRAINING_PATTERN_2:
2433 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2434 break;
2435 case DP_TRAINING_PATTERN_3:
2436 DRM_ERROR("DP training pattern 3 not supported\n");
2437 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2438 break;
2439 }
2440
2441 } else {
2442 if (IS_CHERRYVIEW(dev))
2443 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2444 else
2445 *DP &= ~DP_LINK_TRAIN_MASK;
2446
2447 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2448 case DP_TRAINING_PATTERN_DISABLE:
2449 *DP |= DP_LINK_TRAIN_OFF;
2450 break;
2451 case DP_TRAINING_PATTERN_1:
2452 *DP |= DP_LINK_TRAIN_PAT_1;
2453 break;
2454 case DP_TRAINING_PATTERN_2:
2455 *DP |= DP_LINK_TRAIN_PAT_2;
2456 break;
2457 case DP_TRAINING_PATTERN_3:
2458 if (IS_CHERRYVIEW(dev)) {
2459 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2460 } else {
2461 DRM_ERROR("DP training pattern 3 not supported\n");
2462 *DP |= DP_LINK_TRAIN_PAT_2;
2463 }
2464 break;
2465 }
2466 }
2467 }
2468
2469 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2470 {
2471 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2472 struct drm_i915_private *dev_priv = dev->dev_private;
2473
2474 /* enable with pattern 1 (as per spec) */
2475 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2476 DP_TRAINING_PATTERN_1);
2477
2478 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2479 POSTING_READ(intel_dp->output_reg);
2480
2481 /*
2482 * Magic for VLV/CHV. We _must_ first set up the register
2483 * without actually enabling the port, and then do another
2484 * write to enable the port. Otherwise link training will
2485 * fail when the power sequencer is freshly used for this port.
2486 */
2487 intel_dp->DP |= DP_PORT_EN;
2488
2489 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2490 POSTING_READ(intel_dp->output_reg);
2491 }
2492
2493 static void intel_enable_dp(struct intel_encoder *encoder)
2494 {
2495 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2496 struct drm_device *dev = encoder->base.dev;
2497 struct drm_i915_private *dev_priv = dev->dev_private;
2498 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2499 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2500 unsigned int lane_mask = 0x0;
2501
2502 if (WARN_ON(dp_reg & DP_PORT_EN))
2503 return;
2504
2505 pps_lock(intel_dp);
2506
2507 if (IS_VALLEYVIEW(dev))
2508 vlv_init_panel_power_sequencer(intel_dp);
2509
2510 intel_dp_enable_port(intel_dp);
2511
2512 edp_panel_vdd_on(intel_dp);
2513 edp_panel_on(intel_dp);
2514 edp_panel_vdd_off(intel_dp, true);
2515
2516 pps_unlock(intel_dp);
2517
2518 if (IS_VALLEYVIEW(dev))
2519 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2520 lane_mask);
2521
2522 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2523 intel_dp_start_link_train(intel_dp);
2524 intel_dp_complete_link_train(intel_dp);
2525 intel_dp_stop_link_train(intel_dp);
2526
2527 if (crtc->config->has_audio) {
2528 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2529 pipe_name(crtc->pipe));
2530 intel_audio_codec_enable(encoder);
2531 }
2532 }
2533
2534 static void g4x_enable_dp(struct intel_encoder *encoder)
2535 {
2536 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2537
2538 intel_enable_dp(encoder);
2539 intel_edp_backlight_on(intel_dp);
2540 }
2541
2542 static void vlv_enable_dp(struct intel_encoder *encoder)
2543 {
2544 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2545
2546 intel_edp_backlight_on(intel_dp);
2547 intel_psr_enable(intel_dp);
2548 }
2549
2550 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2551 {
2552 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2553 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2554
2555 intel_dp_prepare(encoder);
2556
2557 /* Only ilk+ has port A */
2558 if (dport->port == PORT_A) {
2559 ironlake_set_pll_cpu_edp(intel_dp);
2560 ironlake_edp_pll_on(intel_dp);
2561 }
2562 }
2563
2564 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2565 {
2566 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2567 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2568 enum pipe pipe = intel_dp->pps_pipe;
2569 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2570
2571 edp_panel_vdd_off_sync(intel_dp);
2572
2573 /*
2574 * VLV seems to get confused when multiple power seqeuencers
2575 * have the same port selected (even if only one has power/vdd
2576 * enabled). The failure manifests as vlv_wait_port_ready() failing
2577 * CHV on the other hand doesn't seem to mind having the same port
2578 * selected in multiple power seqeuencers, but let's clear the
2579 * port select always when logically disconnecting a power sequencer
2580 * from a port.
2581 */
2582 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2583 pipe_name(pipe), port_name(intel_dig_port->port));
2584 I915_WRITE(pp_on_reg, 0);
2585 POSTING_READ(pp_on_reg);
2586
2587 intel_dp->pps_pipe = INVALID_PIPE;
2588 }
2589
2590 static void vlv_steal_power_sequencer(struct drm_device *dev,
2591 enum pipe pipe)
2592 {
2593 struct drm_i915_private *dev_priv = dev->dev_private;
2594 struct intel_encoder *encoder;
2595
2596 lockdep_assert_held(&dev_priv->pps_mutex);
2597
2598 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2599 return;
2600
2601 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2602 base.head) {
2603 struct intel_dp *intel_dp;
2604 enum port port;
2605
2606 if (encoder->type != INTEL_OUTPUT_EDP)
2607 continue;
2608
2609 intel_dp = enc_to_intel_dp(&encoder->base);
2610 port = dp_to_dig_port(intel_dp)->port;
2611
2612 if (intel_dp->pps_pipe != pipe)
2613 continue;
2614
2615 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2616 pipe_name(pipe), port_name(port));
2617
2618 WARN(encoder->connectors_active,
2619 "stealing pipe %c power sequencer from active eDP port %c\n",
2620 pipe_name(pipe), port_name(port));
2621
2622 /* make sure vdd is off before we steal it */
2623 vlv_detach_power_sequencer(intel_dp);
2624 }
2625 }
2626
2627 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2628 {
2629 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2630 struct intel_encoder *encoder = &intel_dig_port->base;
2631 struct drm_device *dev = encoder->base.dev;
2632 struct drm_i915_private *dev_priv = dev->dev_private;
2633 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2634
2635 lockdep_assert_held(&dev_priv->pps_mutex);
2636
2637 if (!is_edp(intel_dp))
2638 return;
2639
2640 if (intel_dp->pps_pipe == crtc->pipe)
2641 return;
2642
2643 /*
2644 * If another power sequencer was being used on this
2645 * port previously make sure to turn off vdd there while
2646 * we still have control of it.
2647 */
2648 if (intel_dp->pps_pipe != INVALID_PIPE)
2649 vlv_detach_power_sequencer(intel_dp);
2650
2651 /*
2652 * We may be stealing the power
2653 * sequencer from another port.
2654 */
2655 vlv_steal_power_sequencer(dev, crtc->pipe);
2656
2657 /* now it's all ours */
2658 intel_dp->pps_pipe = crtc->pipe;
2659
2660 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2661 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2662
2663 /* init power sequencer on this pipe and port */
2664 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2665 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2666 }
2667
2668 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2669 {
2670 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2671 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2672 struct drm_device *dev = encoder->base.dev;
2673 struct drm_i915_private *dev_priv = dev->dev_private;
2674 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2675 enum dpio_channel port = vlv_dport_to_channel(dport);
2676 int pipe = intel_crtc->pipe;
2677 u32 val;
2678
2679 mutex_lock(&dev_priv->dpio_lock);
2680
2681 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2682 val = 0;
2683 if (pipe)
2684 val |= (1<<21);
2685 else
2686 val &= ~(1<<21);
2687 val |= 0x001000c4;
2688 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2689 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2690 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2691
2692 mutex_unlock(&dev_priv->dpio_lock);
2693
2694 intel_enable_dp(encoder);
2695 }
2696
2697 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2698 {
2699 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2700 struct drm_device *dev = encoder->base.dev;
2701 struct drm_i915_private *dev_priv = dev->dev_private;
2702 struct intel_crtc *intel_crtc =
2703 to_intel_crtc(encoder->base.crtc);
2704 enum dpio_channel port = vlv_dport_to_channel(dport);
2705 int pipe = intel_crtc->pipe;
2706
2707 intel_dp_prepare(encoder);
2708
2709 /* Program Tx lane resets to default */
2710 mutex_lock(&dev_priv->dpio_lock);
2711 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2712 DPIO_PCS_TX_LANE2_RESET |
2713 DPIO_PCS_TX_LANE1_RESET);
2714 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2715 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2716 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2717 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2718 DPIO_PCS_CLK_SOFT_RESET);
2719
2720 /* Fix up inter-pair skew failure */
2721 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2722 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2723 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2724 mutex_unlock(&dev_priv->dpio_lock);
2725 }
2726
2727 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2728 {
2729 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2730 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2731 struct drm_device *dev = encoder->base.dev;
2732 struct drm_i915_private *dev_priv = dev->dev_private;
2733 struct intel_crtc *intel_crtc =
2734 to_intel_crtc(encoder->base.crtc);
2735 enum dpio_channel ch = vlv_dport_to_channel(dport);
2736 int pipe = intel_crtc->pipe;
2737 int data, i, stagger;
2738 u32 val;
2739
2740 mutex_lock(&dev_priv->dpio_lock);
2741
2742 /* allow hardware to manage TX FIFO reset source */
2743 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2744 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2745 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2746
2747 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2748 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2749 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2750
2751 /* Deassert soft data lane reset*/
2752 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2753 val |= CHV_PCS_REQ_SOFTRESET_EN;
2754 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2755
2756 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2757 val |= CHV_PCS_REQ_SOFTRESET_EN;
2758 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2759
2760 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2761 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2762 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2763
2764 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2765 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2766 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2767
2768 /* Program Tx lane latency optimal setting*/
2769 for (i = 0; i < 4; i++) {
2770 /* Set the upar bit */
2771 data = (i == 1) ? 0x0 : 0x1;
2772 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2773 data << DPIO_UPAR_SHIFT);
2774 }
2775
2776 /* Data lane stagger programming */
2777 if (intel_crtc->config->port_clock > 270000)
2778 stagger = 0x18;
2779 else if (intel_crtc->config->port_clock > 135000)
2780 stagger = 0xd;
2781 else if (intel_crtc->config->port_clock > 67500)
2782 stagger = 0x7;
2783 else if (intel_crtc->config->port_clock > 33750)
2784 stagger = 0x4;
2785 else
2786 stagger = 0x2;
2787
2788 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2789 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2790 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2791
2792 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2793 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2794 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2795
2796 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2797 DPIO_LANESTAGGER_STRAP(stagger) |
2798 DPIO_LANESTAGGER_STRAP_OVRD |
2799 DPIO_TX1_STAGGER_MASK(0x1f) |
2800 DPIO_TX1_STAGGER_MULT(6) |
2801 DPIO_TX2_STAGGER_MULT(0));
2802
2803 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2804 DPIO_LANESTAGGER_STRAP(stagger) |
2805 DPIO_LANESTAGGER_STRAP_OVRD |
2806 DPIO_TX1_STAGGER_MASK(0x1f) |
2807 DPIO_TX1_STAGGER_MULT(7) |
2808 DPIO_TX2_STAGGER_MULT(5));
2809
2810 mutex_unlock(&dev_priv->dpio_lock);
2811
2812 intel_enable_dp(encoder);
2813 }
2814
2815 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2816 {
2817 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2818 struct drm_device *dev = encoder->base.dev;
2819 struct drm_i915_private *dev_priv = dev->dev_private;
2820 struct intel_crtc *intel_crtc =
2821 to_intel_crtc(encoder->base.crtc);
2822 enum dpio_channel ch = vlv_dport_to_channel(dport);
2823 enum pipe pipe = intel_crtc->pipe;
2824 u32 val;
2825
2826 intel_dp_prepare(encoder);
2827
2828 mutex_lock(&dev_priv->dpio_lock);
2829
2830 /* program left/right clock distribution */
2831 if (pipe != PIPE_B) {
2832 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2833 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2834 if (ch == DPIO_CH0)
2835 val |= CHV_BUFLEFTENA1_FORCE;
2836 if (ch == DPIO_CH1)
2837 val |= CHV_BUFRIGHTENA1_FORCE;
2838 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2839 } else {
2840 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2841 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2842 if (ch == DPIO_CH0)
2843 val |= CHV_BUFLEFTENA2_FORCE;
2844 if (ch == DPIO_CH1)
2845 val |= CHV_BUFRIGHTENA2_FORCE;
2846 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2847 }
2848
2849 /* program clock channel usage */
2850 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2851 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2852 if (pipe != PIPE_B)
2853 val &= ~CHV_PCS_USEDCLKCHANNEL;
2854 else
2855 val |= CHV_PCS_USEDCLKCHANNEL;
2856 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2857
2858 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2859 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2860 if (pipe != PIPE_B)
2861 val &= ~CHV_PCS_USEDCLKCHANNEL;
2862 else
2863 val |= CHV_PCS_USEDCLKCHANNEL;
2864 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2865
2866 /*
2867 * This a a bit weird since generally CL
2868 * matches the pipe, but here we need to
2869 * pick the CL based on the port.
2870 */
2871 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2872 if (pipe != PIPE_B)
2873 val &= ~CHV_CMN_USEDCLKCHANNEL;
2874 else
2875 val |= CHV_CMN_USEDCLKCHANNEL;
2876 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2877
2878 mutex_unlock(&dev_priv->dpio_lock);
2879 }
2880
2881 /*
2882 * Native read with retry for link status and receiver capability reads for
2883 * cases where the sink may still be asleep.
2884 *
2885 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2886 * supposed to retry 3 times per the spec.
2887 */
2888 static ssize_t
2889 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2890 void *buffer, size_t size)
2891 {
2892 ssize_t ret;
2893 int i;
2894
2895 /*
2896 * Sometime we just get the same incorrect byte repeated
2897 * over the entire buffer. Doing just one throw away read
2898 * initially seems to "solve" it.
2899 */
2900 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2901
2902 for (i = 0; i < 3; i++) {
2903 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2904 if (ret == size)
2905 return ret;
2906 msleep(1);
2907 }
2908
2909 return ret;
2910 }
2911
2912 /*
2913 * Fetch AUX CH registers 0x202 - 0x207 which contain
2914 * link status information
2915 */
2916 static bool
2917 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2918 {
2919 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2920 DP_LANE0_1_STATUS,
2921 link_status,
2922 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2923 }
2924
2925 /* These are source-specific values. */
2926 static uint8_t
2927 intel_dp_voltage_max(struct intel_dp *intel_dp)
2928 {
2929 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2930 struct drm_i915_private *dev_priv = dev->dev_private;
2931 enum port port = dp_to_dig_port(intel_dp)->port;
2932
2933 if (IS_BROXTON(dev))
2934 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2935 else if (INTEL_INFO(dev)->gen >= 9) {
2936 if (dev_priv->edp_low_vswing && port == PORT_A)
2937 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2938 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2939 } else if (IS_VALLEYVIEW(dev))
2940 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2941 else if (IS_GEN7(dev) && port == PORT_A)
2942 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2943 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2944 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2945 else
2946 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2947 }
2948
2949 static uint8_t
2950 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2951 {
2952 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2953 enum port port = dp_to_dig_port(intel_dp)->port;
2954
2955 if (INTEL_INFO(dev)->gen >= 9) {
2956 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2957 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2958 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2959 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2960 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2961 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2962 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2963 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2964 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2965 default:
2966 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2967 }
2968 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2969 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2970 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2971 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2972 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2973 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2974 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2975 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2976 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2977 default:
2978 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2979 }
2980 } else if (IS_VALLEYVIEW(dev)) {
2981 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2982 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2983 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2984 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2985 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2986 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2987 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2988 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2989 default:
2990 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2991 }
2992 } else if (IS_GEN7(dev) && port == PORT_A) {
2993 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2994 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2995 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2996 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2997 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2998 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2999 default:
3000 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3001 }
3002 } else {
3003 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3004 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3005 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3006 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3007 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3008 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3009 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3010 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3011 default:
3012 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3013 }
3014 }
3015 }
3016
3017 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3018 {
3019 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3020 struct drm_i915_private *dev_priv = dev->dev_private;
3021 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3022 struct intel_crtc *intel_crtc =
3023 to_intel_crtc(dport->base.base.crtc);
3024 unsigned long demph_reg_value, preemph_reg_value,
3025 uniqtranscale_reg_value;
3026 uint8_t train_set = intel_dp->train_set[0];
3027 enum dpio_channel port = vlv_dport_to_channel(dport);
3028 int pipe = intel_crtc->pipe;
3029
3030 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3031 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3032 preemph_reg_value = 0x0004000;
3033 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3034 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3035 demph_reg_value = 0x2B405555;
3036 uniqtranscale_reg_value = 0x552AB83A;
3037 break;
3038 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3039 demph_reg_value = 0x2B404040;
3040 uniqtranscale_reg_value = 0x5548B83A;
3041 break;
3042 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3043 demph_reg_value = 0x2B245555;
3044 uniqtranscale_reg_value = 0x5560B83A;
3045 break;
3046 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3047 demph_reg_value = 0x2B405555;
3048 uniqtranscale_reg_value = 0x5598DA3A;
3049 break;
3050 default:
3051 return 0;
3052 }
3053 break;
3054 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3055 preemph_reg_value = 0x0002000;
3056 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3057 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3058 demph_reg_value = 0x2B404040;
3059 uniqtranscale_reg_value = 0x5552B83A;
3060 break;
3061 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3062 demph_reg_value = 0x2B404848;
3063 uniqtranscale_reg_value = 0x5580B83A;
3064 break;
3065 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3066 demph_reg_value = 0x2B404040;
3067 uniqtranscale_reg_value = 0x55ADDA3A;
3068 break;
3069 default:
3070 return 0;
3071 }
3072 break;
3073 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3074 preemph_reg_value = 0x0000000;
3075 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3076 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3077 demph_reg_value = 0x2B305555;
3078 uniqtranscale_reg_value = 0x5570B83A;
3079 break;
3080 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3081 demph_reg_value = 0x2B2B4040;
3082 uniqtranscale_reg_value = 0x55ADDA3A;
3083 break;
3084 default:
3085 return 0;
3086 }
3087 break;
3088 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3089 preemph_reg_value = 0x0006000;
3090 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3092 demph_reg_value = 0x1B405555;
3093 uniqtranscale_reg_value = 0x55ADDA3A;
3094 break;
3095 default:
3096 return 0;
3097 }
3098 break;
3099 default:
3100 return 0;
3101 }
3102
3103 mutex_lock(&dev_priv->dpio_lock);
3104 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3105 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3106 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3107 uniqtranscale_reg_value);
3108 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3109 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3110 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3111 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3112 mutex_unlock(&dev_priv->dpio_lock);
3113
3114 return 0;
3115 }
3116
3117 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3118 {
3119 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3120 struct drm_i915_private *dev_priv = dev->dev_private;
3121 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3122 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3123 u32 deemph_reg_value, margin_reg_value, val;
3124 uint8_t train_set = intel_dp->train_set[0];
3125 enum dpio_channel ch = vlv_dport_to_channel(dport);
3126 enum pipe pipe = intel_crtc->pipe;
3127 int i;
3128
3129 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3130 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3131 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3132 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3133 deemph_reg_value = 128;
3134 margin_reg_value = 52;
3135 break;
3136 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3137 deemph_reg_value = 128;
3138 margin_reg_value = 77;
3139 break;
3140 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3141 deemph_reg_value = 128;
3142 margin_reg_value = 102;
3143 break;
3144 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3145 deemph_reg_value = 128;
3146 margin_reg_value = 154;
3147 /* FIXME extra to set for 1200 */
3148 break;
3149 default:
3150 return 0;
3151 }
3152 break;
3153 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3154 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3155 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3156 deemph_reg_value = 85;
3157 margin_reg_value = 78;
3158 break;
3159 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3160 deemph_reg_value = 85;
3161 margin_reg_value = 116;
3162 break;
3163 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3164 deemph_reg_value = 85;
3165 margin_reg_value = 154;
3166 break;
3167 default:
3168 return 0;
3169 }
3170 break;
3171 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3172 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3173 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3174 deemph_reg_value = 64;
3175 margin_reg_value = 104;
3176 break;
3177 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3178 deemph_reg_value = 64;
3179 margin_reg_value = 154;
3180 break;
3181 default:
3182 return 0;
3183 }
3184 break;
3185 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3186 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3187 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3188 deemph_reg_value = 43;
3189 margin_reg_value = 154;
3190 break;
3191 default:
3192 return 0;
3193 }
3194 break;
3195 default:
3196 return 0;
3197 }
3198
3199 mutex_lock(&dev_priv->dpio_lock);
3200
3201 /* Clear calc init */
3202 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3203 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3204 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3205 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3206 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3207
3208 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3209 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3210 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3211 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3212 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3213
3214 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3215 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3216 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3217 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3218
3219 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3220 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3221 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3222 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3223
3224 /* Program swing deemph */
3225 for (i = 0; i < 4; i++) {
3226 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3227 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3228 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3229 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3230 }
3231
3232 /* Program swing margin */
3233 for (i = 0; i < 4; i++) {
3234 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3235 val &= ~DPIO_SWING_MARGIN000_MASK;
3236 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3237 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3238 }
3239
3240 /* Disable unique transition scale */
3241 for (i = 0; i < 4; i++) {
3242 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3243 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3244 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3245 }
3246
3247 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3248 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3249 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3250 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3251
3252 /*
3253 * The document said it needs to set bit 27 for ch0 and bit 26
3254 * for ch1. Might be a typo in the doc.
3255 * For now, for this unique transition scale selection, set bit
3256 * 27 for ch0 and ch1.
3257 */
3258 for (i = 0; i < 4; i++) {
3259 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3260 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3261 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3262 }
3263
3264 for (i = 0; i < 4; i++) {
3265 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3266 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3267 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3268 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3269 }
3270 }
3271
3272 /* Start swing calculation */
3273 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3274 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3275 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3276
3277 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3278 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3279 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3280
3281 /* LRC Bypass */
3282 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3283 val |= DPIO_LRC_BYPASS;
3284 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3285
3286 mutex_unlock(&dev_priv->dpio_lock);
3287
3288 return 0;
3289 }
3290
3291 static void
3292 intel_get_adjust_train(struct intel_dp *intel_dp,
3293 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3294 {
3295 uint8_t v = 0;
3296 uint8_t p = 0;
3297 int lane;
3298 uint8_t voltage_max;
3299 uint8_t preemph_max;
3300
3301 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3302 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3303 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3304
3305 if (this_v > v)
3306 v = this_v;
3307 if (this_p > p)
3308 p = this_p;
3309 }
3310
3311 voltage_max = intel_dp_voltage_max(intel_dp);
3312 if (v >= voltage_max)
3313 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3314
3315 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3316 if (p >= preemph_max)
3317 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3318
3319 for (lane = 0; lane < 4; lane++)
3320 intel_dp->train_set[lane] = v | p;
3321 }
3322
3323 static uint32_t
3324 gen4_signal_levels(uint8_t train_set)
3325 {
3326 uint32_t signal_levels = 0;
3327
3328 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3329 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3330 default:
3331 signal_levels |= DP_VOLTAGE_0_4;
3332 break;
3333 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3334 signal_levels |= DP_VOLTAGE_0_6;
3335 break;
3336 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3337 signal_levels |= DP_VOLTAGE_0_8;
3338 break;
3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3340 signal_levels |= DP_VOLTAGE_1_2;
3341 break;
3342 }
3343 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3344 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3345 default:
3346 signal_levels |= DP_PRE_EMPHASIS_0;
3347 break;
3348 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3349 signal_levels |= DP_PRE_EMPHASIS_3_5;
3350 break;
3351 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3352 signal_levels |= DP_PRE_EMPHASIS_6;
3353 break;
3354 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3355 signal_levels |= DP_PRE_EMPHASIS_9_5;
3356 break;
3357 }
3358 return signal_levels;
3359 }
3360
3361 /* Gen6's DP voltage swing and pre-emphasis control */
3362 static uint32_t
3363 gen6_edp_signal_levels(uint8_t train_set)
3364 {
3365 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3366 DP_TRAIN_PRE_EMPHASIS_MASK);
3367 switch (signal_levels) {
3368 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3369 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3370 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3372 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3375 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3376 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3377 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3378 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3379 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3380 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3381 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3382 default:
3383 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3384 "0x%x\n", signal_levels);
3385 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3386 }
3387 }
3388
3389 /* Gen7's DP voltage swing and pre-emphasis control */
3390 static uint32_t
3391 gen7_edp_signal_levels(uint8_t train_set)
3392 {
3393 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3394 DP_TRAIN_PRE_EMPHASIS_MASK);
3395 switch (signal_levels) {
3396 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3397 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3398 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3399 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3400 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3401 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3402
3403 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3404 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3405 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3406 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3407
3408 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3409 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3410 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3411 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3412
3413 default:
3414 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3415 "0x%x\n", signal_levels);
3416 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3417 }
3418 }
3419
3420 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3421 static uint32_t
3422 hsw_signal_levels(uint8_t train_set)
3423 {
3424 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3425 DP_TRAIN_PRE_EMPHASIS_MASK);
3426 switch (signal_levels) {
3427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3428 return DDI_BUF_TRANS_SELECT(0);
3429 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3430 return DDI_BUF_TRANS_SELECT(1);
3431 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3432 return DDI_BUF_TRANS_SELECT(2);
3433 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3434 return DDI_BUF_TRANS_SELECT(3);
3435
3436 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3437 return DDI_BUF_TRANS_SELECT(4);
3438 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3439 return DDI_BUF_TRANS_SELECT(5);
3440 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3441 return DDI_BUF_TRANS_SELECT(6);
3442
3443 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3444 return DDI_BUF_TRANS_SELECT(7);
3445 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3446 return DDI_BUF_TRANS_SELECT(8);
3447
3448 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3449 return DDI_BUF_TRANS_SELECT(9);
3450 default:
3451 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3452 "0x%x\n", signal_levels);
3453 return DDI_BUF_TRANS_SELECT(0);
3454 }
3455 }
3456
3457 static void bxt_signal_levels(struct intel_dp *intel_dp)
3458 {
3459 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3460 enum port port = dport->port;
3461 struct drm_device *dev = dport->base.base.dev;
3462 struct intel_encoder *encoder = &dport->base;
3463 uint8_t train_set = intel_dp->train_set[0];
3464 uint32_t level = 0;
3465
3466 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3467 DP_TRAIN_PRE_EMPHASIS_MASK);
3468 switch (signal_levels) {
3469 default:
3470 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3471 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3472 level = 0;
3473 break;
3474 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3475 level = 1;
3476 break;
3477 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3478 level = 2;
3479 break;
3480 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3481 level = 3;
3482 break;
3483 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3484 level = 4;
3485 break;
3486 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3487 level = 5;
3488 break;
3489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3490 level = 6;
3491 break;
3492 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3493 level = 7;
3494 break;
3495 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3496 level = 8;
3497 break;
3498 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3499 level = 9;
3500 break;
3501 }
3502
3503 bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3504 }
3505
3506 /* Properly updates "DP" with the correct signal levels. */
3507 static void
3508 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3509 {
3510 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3511 enum port port = intel_dig_port->port;
3512 struct drm_device *dev = intel_dig_port->base.base.dev;
3513 uint32_t signal_levels, mask;
3514 uint8_t train_set = intel_dp->train_set[0];
3515
3516 if (IS_BROXTON(dev)) {
3517 signal_levels = 0;
3518 bxt_signal_levels(intel_dp);
3519 mask = 0;
3520 } else if (HAS_DDI(dev)) {
3521 signal_levels = hsw_signal_levels(train_set);
3522 mask = DDI_BUF_EMP_MASK;
3523 } else if (IS_CHERRYVIEW(dev)) {
3524 signal_levels = chv_signal_levels(intel_dp);
3525 mask = 0;
3526 } else if (IS_VALLEYVIEW(dev)) {
3527 signal_levels = vlv_signal_levels(intel_dp);
3528 mask = 0;
3529 } else if (IS_GEN7(dev) && port == PORT_A) {
3530 signal_levels = gen7_edp_signal_levels(train_set);
3531 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3532 } else if (IS_GEN6(dev) && port == PORT_A) {
3533 signal_levels = gen6_edp_signal_levels(train_set);
3534 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3535 } else {
3536 signal_levels = gen4_signal_levels(train_set);
3537 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3538 }
3539
3540 if (mask)
3541 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3542
3543 DRM_DEBUG_KMS("Using vswing level %d\n",
3544 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3545 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3546 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3547 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3548
3549 *DP = (*DP & ~mask) | signal_levels;
3550 }
3551
3552 static bool
3553 intel_dp_set_link_train(struct intel_dp *intel_dp,
3554 uint32_t *DP,
3555 uint8_t dp_train_pat)
3556 {
3557 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3558 struct drm_device *dev = intel_dig_port->base.base.dev;
3559 struct drm_i915_private *dev_priv = dev->dev_private;
3560 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3561 int ret, len;
3562
3563 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3564
3565 I915_WRITE(intel_dp->output_reg, *DP);
3566 POSTING_READ(intel_dp->output_reg);
3567
3568 buf[0] = dp_train_pat;
3569 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3570 DP_TRAINING_PATTERN_DISABLE) {
3571 /* don't write DP_TRAINING_LANEx_SET on disable */
3572 len = 1;
3573 } else {
3574 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3575 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3576 len = intel_dp->lane_count + 1;
3577 }
3578
3579 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3580 buf, len);
3581
3582 return ret == len;
3583 }
3584
3585 static bool
3586 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3587 uint8_t dp_train_pat)
3588 {
3589 if (!intel_dp->train_set_valid)
3590 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3591 intel_dp_set_signal_levels(intel_dp, DP);
3592 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3593 }
3594
3595 static bool
3596 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3597 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3598 {
3599 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3600 struct drm_device *dev = intel_dig_port->base.base.dev;
3601 struct drm_i915_private *dev_priv = dev->dev_private;
3602 int ret;
3603
3604 intel_get_adjust_train(intel_dp, link_status);
3605 intel_dp_set_signal_levels(intel_dp, DP);
3606
3607 I915_WRITE(intel_dp->output_reg, *DP);
3608 POSTING_READ(intel_dp->output_reg);
3609
3610 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3611 intel_dp->train_set, intel_dp->lane_count);
3612
3613 return ret == intel_dp->lane_count;
3614 }
3615
3616 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3617 {
3618 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3619 struct drm_device *dev = intel_dig_port->base.base.dev;
3620 struct drm_i915_private *dev_priv = dev->dev_private;
3621 enum port port = intel_dig_port->port;
3622 uint32_t val;
3623
3624 if (!HAS_DDI(dev))
3625 return;
3626
3627 val = I915_READ(DP_TP_CTL(port));
3628 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3629 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3630 I915_WRITE(DP_TP_CTL(port), val);
3631
3632 /*
3633 * On PORT_A we can have only eDP in SST mode. There the only reason
3634 * we need to set idle transmission mode is to work around a HW issue
3635 * where we enable the pipe while not in idle link-training mode.
3636 * In this case there is requirement to wait for a minimum number of
3637 * idle patterns to be sent.
3638 */
3639 if (port == PORT_A)
3640 return;
3641
3642 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3643 1))
3644 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3645 }
3646
3647 /* Enable corresponding port and start training pattern 1 */
3648 void
3649 intel_dp_start_link_train(struct intel_dp *intel_dp)
3650 {
3651 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3652 struct drm_device *dev = encoder->dev;
3653 int i;
3654 uint8_t voltage;
3655 int voltage_tries, loop_tries;
3656 uint32_t DP = intel_dp->DP;
3657 uint8_t link_config[2];
3658
3659 if (HAS_DDI(dev))
3660 intel_ddi_prepare_link_retrain(encoder);
3661
3662 /* Write the link configuration data */
3663 link_config[0] = intel_dp->link_bw;
3664 link_config[1] = intel_dp->lane_count;
3665 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3666 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3667 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3668 if (intel_dp->num_sink_rates)
3669 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3670 &intel_dp->rate_select, 1);
3671
3672 link_config[0] = 0;
3673 link_config[1] = DP_SET_ANSI_8B10B;
3674 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3675
3676 DP |= DP_PORT_EN;
3677
3678 /* clock recovery */
3679 if (!intel_dp_reset_link_train(intel_dp, &DP,
3680 DP_TRAINING_PATTERN_1 |
3681 DP_LINK_SCRAMBLING_DISABLE)) {
3682 DRM_ERROR("failed to enable link training\n");
3683 return;
3684 }
3685
3686 voltage = 0xff;
3687 voltage_tries = 0;
3688 loop_tries = 0;
3689 for (;;) {
3690 uint8_t link_status[DP_LINK_STATUS_SIZE];
3691
3692 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3693 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3694 DRM_ERROR("failed to get link status\n");
3695 break;
3696 }
3697
3698 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3699 DRM_DEBUG_KMS("clock recovery OK\n");
3700 break;
3701 }
3702
3703 /*
3704 * if we used previously trained voltage and pre-emphasis values
3705 * and we don't get clock recovery, reset link training values
3706 */
3707 if (intel_dp->train_set_valid) {
3708 DRM_DEBUG_KMS("clock recovery not ok, reset");
3709 /* clear the flag as we are not reusing train set */
3710 intel_dp->train_set_valid = false;
3711 if (!intel_dp_reset_link_train(intel_dp, &DP,
3712 DP_TRAINING_PATTERN_1 |
3713 DP_LINK_SCRAMBLING_DISABLE)) {
3714 DRM_ERROR("failed to enable link training\n");
3715 return;
3716 }
3717 continue;
3718 }
3719
3720 /* Check to see if we've tried the max voltage */
3721 for (i = 0; i < intel_dp->lane_count; i++)
3722 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3723 break;
3724 if (i == intel_dp->lane_count) {
3725 ++loop_tries;
3726 if (loop_tries == 5) {
3727 DRM_ERROR("too many full retries, give up\n");
3728 break;
3729 }
3730 intel_dp_reset_link_train(intel_dp, &DP,
3731 DP_TRAINING_PATTERN_1 |
3732 DP_LINK_SCRAMBLING_DISABLE);
3733 voltage_tries = 0;
3734 continue;
3735 }
3736
3737 /* Check to see if we've tried the same voltage 5 times */
3738 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3739 ++voltage_tries;
3740 if (voltage_tries == 5) {
3741 DRM_ERROR("too many voltage retries, give up\n");
3742 break;
3743 }
3744 } else
3745 voltage_tries = 0;
3746 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3747
3748 /* Update training set as requested by target */
3749 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3750 DRM_ERROR("failed to update link training\n");
3751 break;
3752 }
3753 }
3754
3755 intel_dp->DP = DP;
3756 }
3757
3758 void
3759 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3760 {
3761 bool channel_eq = false;
3762 int tries, cr_tries;
3763 uint32_t DP = intel_dp->DP;
3764 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3765
3766 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3767 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3768 training_pattern = DP_TRAINING_PATTERN_3;
3769
3770 /* channel equalization */
3771 if (!intel_dp_set_link_train(intel_dp, &DP,
3772 training_pattern |
3773 DP_LINK_SCRAMBLING_DISABLE)) {
3774 DRM_ERROR("failed to start channel equalization\n");
3775 return;
3776 }
3777
3778 tries = 0;
3779 cr_tries = 0;
3780 channel_eq = false;
3781 for (;;) {
3782 uint8_t link_status[DP_LINK_STATUS_SIZE];
3783
3784 if (cr_tries > 5) {
3785 DRM_ERROR("failed to train DP, aborting\n");
3786 break;
3787 }
3788
3789 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3790 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3791 DRM_ERROR("failed to get link status\n");
3792 break;
3793 }
3794
3795 /* Make sure clock is still ok */
3796 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3797 intel_dp->train_set_valid = false;
3798 intel_dp_start_link_train(intel_dp);
3799 intel_dp_set_link_train(intel_dp, &DP,
3800 training_pattern |
3801 DP_LINK_SCRAMBLING_DISABLE);
3802 cr_tries++;
3803 continue;
3804 }
3805
3806 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3807 channel_eq = true;
3808 break;
3809 }
3810
3811 /* Try 5 times, then try clock recovery if that fails */
3812 if (tries > 5) {
3813 intel_dp->train_set_valid = false;
3814 intel_dp_start_link_train(intel_dp);
3815 intel_dp_set_link_train(intel_dp, &DP,
3816 training_pattern |
3817 DP_LINK_SCRAMBLING_DISABLE);
3818 tries = 0;
3819 cr_tries++;
3820 continue;
3821 }
3822
3823 /* Update training set as requested by target */
3824 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3825 DRM_ERROR("failed to update link training\n");
3826 break;
3827 }
3828 ++tries;
3829 }
3830
3831 intel_dp_set_idle_link_train(intel_dp);
3832
3833 intel_dp->DP = DP;
3834
3835 if (channel_eq) {
3836 intel_dp->train_set_valid = true;
3837 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3838 }
3839 }
3840
3841 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3842 {
3843 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3844 DP_TRAINING_PATTERN_DISABLE);
3845 }
3846
3847 static void
3848 intel_dp_link_down(struct intel_dp *intel_dp)
3849 {
3850 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3851 enum port port = intel_dig_port->port;
3852 struct drm_device *dev = intel_dig_port->base.base.dev;
3853 struct drm_i915_private *dev_priv = dev->dev_private;
3854 uint32_t DP = intel_dp->DP;
3855
3856 if (WARN_ON(HAS_DDI(dev)))
3857 return;
3858
3859 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3860 return;
3861
3862 DRM_DEBUG_KMS("\n");
3863
3864 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3865 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3866 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3867 } else {
3868 if (IS_CHERRYVIEW(dev))
3869 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3870 else
3871 DP &= ~DP_LINK_TRAIN_MASK;
3872 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3873 }
3874 POSTING_READ(intel_dp->output_reg);
3875
3876 if (HAS_PCH_IBX(dev) &&
3877 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3878 /* Hardware workaround: leaving our transcoder select
3879 * set to transcoder B while it's off will prevent the
3880 * corresponding HDMI output on transcoder A.
3881 *
3882 * Combine this with another hardware workaround:
3883 * transcoder select bit can only be cleared while the
3884 * port is enabled.
3885 */
3886 DP &= ~DP_PIPEB_SELECT;
3887 I915_WRITE(intel_dp->output_reg, DP);
3888 POSTING_READ(intel_dp->output_reg);
3889 }
3890
3891 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3892 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3893 POSTING_READ(intel_dp->output_reg);
3894 msleep(intel_dp->panel_power_down_delay);
3895 }
3896
3897 static bool
3898 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3899 {
3900 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3901 struct drm_device *dev = dig_port->base.base.dev;
3902 struct drm_i915_private *dev_priv = dev->dev_private;
3903 uint8_t rev;
3904
3905 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3906 sizeof(intel_dp->dpcd)) < 0)
3907 return false; /* aux transfer failed */
3908
3909 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3910
3911 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3912 return false; /* DPCD not present */
3913
3914 /* Check if the panel supports PSR */
3915 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3916 if (is_edp(intel_dp)) {
3917 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3918 intel_dp->psr_dpcd,
3919 sizeof(intel_dp->psr_dpcd));
3920 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3921 dev_priv->psr.sink_support = true;
3922 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3923 }
3924
3925 if (INTEL_INFO(dev)->gen >= 9 &&
3926 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3927 uint8_t frame_sync_cap;
3928
3929 dev_priv->psr.sink_support = true;
3930 intel_dp_dpcd_read_wake(&intel_dp->aux,
3931 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3932 &frame_sync_cap, 1);
3933 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3934 /* PSR2 needs frame sync as well */
3935 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3936 DRM_DEBUG_KMS("PSR2 %s on sink",
3937 dev_priv->psr.psr2_support ? "supported" : "not supported");
3938 }
3939 }
3940
3941 /* Training Pattern 3 support, both source and sink */
3942 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3943 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3944 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3945 intel_dp->use_tps3 = true;
3946 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3947 } else
3948 intel_dp->use_tps3 = false;
3949
3950 /* Intermediate frequency support */
3951 if (is_edp(intel_dp) &&
3952 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3953 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3954 (rev >= 0x03)) { /* eDp v1.4 or higher */
3955 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3956 int i;
3957
3958 intel_dp_dpcd_read_wake(&intel_dp->aux,
3959 DP_SUPPORTED_LINK_RATES,
3960 sink_rates,
3961 sizeof(sink_rates));
3962
3963 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3964 int val = le16_to_cpu(sink_rates[i]);
3965
3966 if (val == 0)
3967 break;
3968
3969 intel_dp->sink_rates[i] = val * 200;
3970 }
3971 intel_dp->num_sink_rates = i;
3972 }
3973
3974 intel_dp_print_rates(intel_dp);
3975
3976 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3977 DP_DWN_STRM_PORT_PRESENT))
3978 return true; /* native DP sink */
3979
3980 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3981 return true; /* no per-port downstream info */
3982
3983 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3984 intel_dp->downstream_ports,
3985 DP_MAX_DOWNSTREAM_PORTS) < 0)
3986 return false; /* downstream port status fetch failed */
3987
3988 return true;
3989 }
3990
3991 static void
3992 intel_dp_probe_oui(struct intel_dp *intel_dp)
3993 {
3994 u8 buf[3];
3995
3996 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3997 return;
3998
3999 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4000 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4001 buf[0], buf[1], buf[2]);
4002
4003 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4004 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4005 buf[0], buf[1], buf[2]);
4006 }
4007
4008 static bool
4009 intel_dp_probe_mst(struct intel_dp *intel_dp)
4010 {
4011 u8 buf[1];
4012
4013 if (!intel_dp->can_mst)
4014 return false;
4015
4016 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4017 return false;
4018
4019 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4020 if (buf[0] & DP_MST_CAP) {
4021 DRM_DEBUG_KMS("Sink is MST capable\n");
4022 intel_dp->is_mst = true;
4023 } else {
4024 DRM_DEBUG_KMS("Sink is not MST capable\n");
4025 intel_dp->is_mst = false;
4026 }
4027 }
4028
4029 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4030 return intel_dp->is_mst;
4031 }
4032
4033 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4034 {
4035 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4036 struct drm_device *dev = intel_dig_port->base.base.dev;
4037 struct intel_crtc *intel_crtc =
4038 to_intel_crtc(intel_dig_port->base.base.crtc);
4039 u8 buf;
4040 int test_crc_count;
4041 int attempts = 6;
4042
4043 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4044 return -EIO;
4045
4046 if (!(buf & DP_TEST_CRC_SUPPORTED))
4047 return -ENOTTY;
4048
4049 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4050 return -EIO;
4051
4052 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4053 buf | DP_TEST_SINK_START) < 0)
4054 return -EIO;
4055
4056 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4057 return -EIO;
4058 test_crc_count = buf & DP_TEST_COUNT_MASK;
4059
4060 do {
4061 if (drm_dp_dpcd_readb(&intel_dp->aux,
4062 DP_TEST_SINK_MISC, &buf) < 0)
4063 return -EIO;
4064 intel_wait_for_vblank(dev, intel_crtc->pipe);
4065 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4066
4067 if (attempts == 0) {
4068 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4069 return -ETIMEDOUT;
4070 }
4071
4072 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
4073 return -EIO;
4074
4075 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4076 return -EIO;
4077 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4078 buf & ~DP_TEST_SINK_START) < 0)
4079 return -EIO;
4080
4081 return 0;
4082 }
4083
4084 static bool
4085 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4086 {
4087 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4088 DP_DEVICE_SERVICE_IRQ_VECTOR,
4089 sink_irq_vector, 1) == 1;
4090 }
4091
4092 static bool
4093 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4094 {
4095 int ret;
4096
4097 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4098 DP_SINK_COUNT_ESI,
4099 sink_irq_vector, 14);
4100 if (ret != 14)
4101 return false;
4102
4103 return true;
4104 }
4105
4106 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4107 {
4108 uint8_t test_result = DP_TEST_ACK;
4109 return test_result;
4110 }
4111
4112 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4113 {
4114 uint8_t test_result = DP_TEST_NAK;
4115 return test_result;
4116 }
4117
4118 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4119 {
4120 uint8_t test_result = DP_TEST_NAK;
4121 struct intel_connector *intel_connector = intel_dp->attached_connector;
4122 struct drm_connector *connector = &intel_connector->base;
4123
4124 if (intel_connector->detect_edid == NULL ||
4125 connector->edid_corrupt ||
4126 intel_dp->aux.i2c_defer_count > 6) {
4127 /* Check EDID read for NACKs, DEFERs and corruption
4128 * (DP CTS 1.2 Core r1.1)
4129 * 4.2.2.4 : Failed EDID read, I2C_NAK
4130 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4131 * 4.2.2.6 : EDID corruption detected
4132 * Use failsafe mode for all cases
4133 */
4134 if (intel_dp->aux.i2c_nack_count > 0 ||
4135 intel_dp->aux.i2c_defer_count > 0)
4136 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4137 intel_dp->aux.i2c_nack_count,
4138 intel_dp->aux.i2c_defer_count);
4139 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4140 } else {
4141 if (!drm_dp_dpcd_write(&intel_dp->aux,
4142 DP_TEST_EDID_CHECKSUM,
4143 &intel_connector->detect_edid->checksum,
4144 1));
4145 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4146
4147 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4148 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4149 }
4150
4151 /* Set test active flag here so userspace doesn't interrupt things */
4152 intel_dp->compliance_test_active = 1;
4153
4154 return test_result;
4155 }
4156
4157 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4158 {
4159 uint8_t test_result = DP_TEST_NAK;
4160 return test_result;
4161 }
4162
4163 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4164 {
4165 uint8_t response = DP_TEST_NAK;
4166 uint8_t rxdata = 0;
4167 int status = 0;
4168
4169 intel_dp->compliance_test_active = 0;
4170 intel_dp->compliance_test_type = 0;
4171 intel_dp->compliance_test_data = 0;
4172
4173 intel_dp->aux.i2c_nack_count = 0;
4174 intel_dp->aux.i2c_defer_count = 0;
4175
4176 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4177 if (status <= 0) {
4178 DRM_DEBUG_KMS("Could not read test request from sink\n");
4179 goto update_status;
4180 }
4181
4182 switch (rxdata) {
4183 case DP_TEST_LINK_TRAINING:
4184 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4185 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4186 response = intel_dp_autotest_link_training(intel_dp);
4187 break;
4188 case DP_TEST_LINK_VIDEO_PATTERN:
4189 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4190 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4191 response = intel_dp_autotest_video_pattern(intel_dp);
4192 break;
4193 case DP_TEST_LINK_EDID_READ:
4194 DRM_DEBUG_KMS("EDID test requested\n");
4195 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4196 response = intel_dp_autotest_edid(intel_dp);
4197 break;
4198 case DP_TEST_LINK_PHY_TEST_PATTERN:
4199 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4200 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4201 response = intel_dp_autotest_phy_pattern(intel_dp);
4202 break;
4203 default:
4204 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4205 break;
4206 }
4207
4208 update_status:
4209 status = drm_dp_dpcd_write(&intel_dp->aux,
4210 DP_TEST_RESPONSE,
4211 &response, 1);
4212 if (status <= 0)
4213 DRM_DEBUG_KMS("Could not write test response to sink\n");
4214 }
4215
4216 static int
4217 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4218 {
4219 bool bret;
4220
4221 if (intel_dp->is_mst) {
4222 u8 esi[16] = { 0 };
4223 int ret = 0;
4224 int retry;
4225 bool handled;
4226 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4227 go_again:
4228 if (bret == true) {
4229
4230 /* check link status - esi[10] = 0x200c */
4231 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4232 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4233 intel_dp_start_link_train(intel_dp);
4234 intel_dp_complete_link_train(intel_dp);
4235 intel_dp_stop_link_train(intel_dp);
4236 }
4237
4238 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4239 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4240
4241 if (handled) {
4242 for (retry = 0; retry < 3; retry++) {
4243 int wret;
4244 wret = drm_dp_dpcd_write(&intel_dp->aux,
4245 DP_SINK_COUNT_ESI+1,
4246 &esi[1], 3);
4247 if (wret == 3) {
4248 break;
4249 }
4250 }
4251
4252 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4253 if (bret == true) {
4254 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4255 goto go_again;
4256 }
4257 } else
4258 ret = 0;
4259
4260 return ret;
4261 } else {
4262 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4263 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4264 intel_dp->is_mst = false;
4265 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4266 /* send a hotplug event */
4267 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4268 }
4269 }
4270 return -EINVAL;
4271 }
4272
4273 /*
4274 * According to DP spec
4275 * 5.1.2:
4276 * 1. Read DPCD
4277 * 2. Configure link according to Receiver Capabilities
4278 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4279 * 4. Check link status on receipt of hot-plug interrupt
4280 */
4281 static void
4282 intel_dp_check_link_status(struct intel_dp *intel_dp)
4283 {
4284 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4285 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4286 u8 sink_irq_vector;
4287 u8 link_status[DP_LINK_STATUS_SIZE];
4288
4289 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4290
4291 if (!intel_encoder->connectors_active)
4292 return;
4293
4294 if (WARN_ON(!intel_encoder->base.crtc))
4295 return;
4296
4297 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4298 return;
4299
4300 /* Try to read receiver status if the link appears to be up */
4301 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4302 return;
4303 }
4304
4305 /* Now read the DPCD to see if it's actually running */
4306 if (!intel_dp_get_dpcd(intel_dp)) {
4307 return;
4308 }
4309
4310 /* Try to read the source of the interrupt */
4311 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4312 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4313 /* Clear interrupt source */
4314 drm_dp_dpcd_writeb(&intel_dp->aux,
4315 DP_DEVICE_SERVICE_IRQ_VECTOR,
4316 sink_irq_vector);
4317
4318 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4319 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4320 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4321 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4322 }
4323
4324 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4325 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4326 intel_encoder->base.name);
4327 intel_dp_start_link_train(intel_dp);
4328 intel_dp_complete_link_train(intel_dp);
4329 intel_dp_stop_link_train(intel_dp);
4330 }
4331 }
4332
4333 /* XXX this is probably wrong for multiple downstream ports */
4334 static enum drm_connector_status
4335 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4336 {
4337 uint8_t *dpcd = intel_dp->dpcd;
4338 uint8_t type;
4339
4340 if (!intel_dp_get_dpcd(intel_dp))
4341 return connector_status_disconnected;
4342
4343 /* if there's no downstream port, we're done */
4344 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4345 return connector_status_connected;
4346
4347 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4348 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4349 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4350 uint8_t reg;
4351
4352 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4353 &reg, 1) < 0)
4354 return connector_status_unknown;
4355
4356 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4357 : connector_status_disconnected;
4358 }
4359
4360 /* If no HPD, poke DDC gently */
4361 if (drm_probe_ddc(&intel_dp->aux.ddc))
4362 return connector_status_connected;
4363
4364 /* Well we tried, say unknown for unreliable port types */
4365 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4366 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4367 if (type == DP_DS_PORT_TYPE_VGA ||
4368 type == DP_DS_PORT_TYPE_NON_EDID)
4369 return connector_status_unknown;
4370 } else {
4371 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4372 DP_DWN_STRM_PORT_TYPE_MASK;
4373 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4374 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4375 return connector_status_unknown;
4376 }
4377
4378 /* Anything else is out of spec, warn and ignore */
4379 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4380 return connector_status_disconnected;
4381 }
4382
4383 static enum drm_connector_status
4384 edp_detect(struct intel_dp *intel_dp)
4385 {
4386 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4387 enum drm_connector_status status;
4388
4389 status = intel_panel_detect(dev);
4390 if (status == connector_status_unknown)
4391 status = connector_status_connected;
4392
4393 return status;
4394 }
4395
4396 static enum drm_connector_status
4397 ironlake_dp_detect(struct intel_dp *intel_dp)
4398 {
4399 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4400 struct drm_i915_private *dev_priv = dev->dev_private;
4401 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4402
4403 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4404 return connector_status_disconnected;
4405
4406 return intel_dp_detect_dpcd(intel_dp);
4407 }
4408
4409 static int g4x_digital_port_connected(struct drm_device *dev,
4410 struct intel_digital_port *intel_dig_port)
4411 {
4412 struct drm_i915_private *dev_priv = dev->dev_private;
4413 uint32_t bit;
4414
4415 if (IS_VALLEYVIEW(dev)) {
4416 switch (intel_dig_port->port) {
4417 case PORT_B:
4418 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4419 break;
4420 case PORT_C:
4421 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4422 break;
4423 case PORT_D:
4424 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4425 break;
4426 default:
4427 return -EINVAL;
4428 }
4429 } else {
4430 switch (intel_dig_port->port) {
4431 case PORT_B:
4432 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4433 break;
4434 case PORT_C:
4435 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4436 break;
4437 case PORT_D:
4438 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4439 break;
4440 default:
4441 return -EINVAL;
4442 }
4443 }
4444
4445 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4446 return 0;
4447 return 1;
4448 }
4449
4450 static enum drm_connector_status
4451 g4x_dp_detect(struct intel_dp *intel_dp)
4452 {
4453 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4454 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4455 int ret;
4456
4457 /* Can't disconnect eDP, but you can close the lid... */
4458 if (is_edp(intel_dp)) {
4459 enum drm_connector_status status;
4460
4461 status = intel_panel_detect(dev);
4462 if (status == connector_status_unknown)
4463 status = connector_status_connected;
4464 return status;
4465 }
4466
4467 ret = g4x_digital_port_connected(dev, intel_dig_port);
4468 if (ret == -EINVAL)
4469 return connector_status_unknown;
4470 else if (ret == 0)
4471 return connector_status_disconnected;
4472
4473 return intel_dp_detect_dpcd(intel_dp);
4474 }
4475
4476 static struct edid *
4477 intel_dp_get_edid(struct intel_dp *intel_dp)
4478 {
4479 struct intel_connector *intel_connector = intel_dp->attached_connector;
4480
4481 /* use cached edid if we have one */
4482 if (intel_connector->edid) {
4483 /* invalid edid */
4484 if (IS_ERR(intel_connector->edid))
4485 return NULL;
4486
4487 return drm_edid_duplicate(intel_connector->edid);
4488 } else
4489 return drm_get_edid(&intel_connector->base,
4490 &intel_dp->aux.ddc);
4491 }
4492
4493 static void
4494 intel_dp_set_edid(struct intel_dp *intel_dp)
4495 {
4496 struct intel_connector *intel_connector = intel_dp->attached_connector;
4497 struct edid *edid;
4498
4499 edid = intel_dp_get_edid(intel_dp);
4500 intel_connector->detect_edid = edid;
4501
4502 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4503 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4504 else
4505 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4506 }
4507
4508 static void
4509 intel_dp_unset_edid(struct intel_dp *intel_dp)
4510 {
4511 struct intel_connector *intel_connector = intel_dp->attached_connector;
4512
4513 kfree(intel_connector->detect_edid);
4514 intel_connector->detect_edid = NULL;
4515
4516 intel_dp->has_audio = false;
4517 }
4518
4519 static enum intel_display_power_domain
4520 intel_dp_power_get(struct intel_dp *dp)
4521 {
4522 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4523 enum intel_display_power_domain power_domain;
4524
4525 power_domain = intel_display_port_power_domain(encoder);
4526 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4527
4528 return power_domain;
4529 }
4530
4531 static void
4532 intel_dp_power_put(struct intel_dp *dp,
4533 enum intel_display_power_domain power_domain)
4534 {
4535 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4536 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4537 }
4538
4539 static enum drm_connector_status
4540 intel_dp_detect(struct drm_connector *connector, bool force)
4541 {
4542 struct intel_dp *intel_dp = intel_attached_dp(connector);
4543 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4544 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4545 struct drm_device *dev = connector->dev;
4546 enum drm_connector_status status;
4547 enum intel_display_power_domain power_domain;
4548 bool ret;
4549 u8 sink_irq_vector;
4550
4551 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4552 connector->base.id, connector->name);
4553 intel_dp_unset_edid(intel_dp);
4554
4555 if (intel_dp->is_mst) {
4556 /* MST devices are disconnected from a monitor POV */
4557 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4558 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4559 return connector_status_disconnected;
4560 }
4561
4562 power_domain = intel_dp_power_get(intel_dp);
4563
4564 /* Can't disconnect eDP, but you can close the lid... */
4565 if (is_edp(intel_dp))
4566 status = edp_detect(intel_dp);
4567 else if (HAS_PCH_SPLIT(dev))
4568 status = ironlake_dp_detect(intel_dp);
4569 else
4570 status = g4x_dp_detect(intel_dp);
4571 if (status != connector_status_connected)
4572 goto out;
4573
4574 intel_dp_probe_oui(intel_dp);
4575
4576 ret = intel_dp_probe_mst(intel_dp);
4577 if (ret) {
4578 /* if we are in MST mode then this connector
4579 won't appear connected or have anything with EDID on it */
4580 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4581 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4582 status = connector_status_disconnected;
4583 goto out;
4584 }
4585
4586 intel_dp_set_edid(intel_dp);
4587
4588 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4589 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4590 status = connector_status_connected;
4591
4592 /* Try to read the source of the interrupt */
4593 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4594 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4595 /* Clear interrupt source */
4596 drm_dp_dpcd_writeb(&intel_dp->aux,
4597 DP_DEVICE_SERVICE_IRQ_VECTOR,
4598 sink_irq_vector);
4599
4600 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4601 intel_dp_handle_test_request(intel_dp);
4602 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4603 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4604 }
4605
4606 out:
4607 intel_dp_power_put(intel_dp, power_domain);
4608 return status;
4609 }
4610
4611 static void
4612 intel_dp_force(struct drm_connector *connector)
4613 {
4614 struct intel_dp *intel_dp = intel_attached_dp(connector);
4615 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4616 enum intel_display_power_domain power_domain;
4617
4618 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4619 connector->base.id, connector->name);
4620 intel_dp_unset_edid(intel_dp);
4621
4622 if (connector->status != connector_status_connected)
4623 return;
4624
4625 power_domain = intel_dp_power_get(intel_dp);
4626
4627 intel_dp_set_edid(intel_dp);
4628
4629 intel_dp_power_put(intel_dp, power_domain);
4630
4631 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4632 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4633 }
4634
4635 static int intel_dp_get_modes(struct drm_connector *connector)
4636 {
4637 struct intel_connector *intel_connector = to_intel_connector(connector);
4638 struct edid *edid;
4639
4640 edid = intel_connector->detect_edid;
4641 if (edid) {
4642 int ret = intel_connector_update_modes(connector, edid);
4643 if (ret)
4644 return ret;
4645 }
4646
4647 /* if eDP has no EDID, fall back to fixed mode */
4648 if (is_edp(intel_attached_dp(connector)) &&
4649 intel_connector->panel.fixed_mode) {
4650 struct drm_display_mode *mode;
4651
4652 mode = drm_mode_duplicate(connector->dev,
4653 intel_connector->panel.fixed_mode);
4654 if (mode) {
4655 drm_mode_probed_add(connector, mode);
4656 return 1;
4657 }
4658 }
4659
4660 return 0;
4661 }
4662
4663 static bool
4664 intel_dp_detect_audio(struct drm_connector *connector)
4665 {
4666 bool has_audio = false;
4667 struct edid *edid;
4668
4669 edid = to_intel_connector(connector)->detect_edid;
4670 if (edid)
4671 has_audio = drm_detect_monitor_audio(edid);
4672
4673 return has_audio;
4674 }
4675
4676 static int
4677 intel_dp_set_property(struct drm_connector *connector,
4678 struct drm_property *property,
4679 uint64_t val)
4680 {
4681 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4682 struct intel_connector *intel_connector = to_intel_connector(connector);
4683 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4684 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4685 int ret;
4686
4687 ret = drm_object_property_set_value(&connector->base, property, val);
4688 if (ret)
4689 return ret;
4690
4691 if (property == dev_priv->force_audio_property) {
4692 int i = val;
4693 bool has_audio;
4694
4695 if (i == intel_dp->force_audio)
4696 return 0;
4697
4698 intel_dp->force_audio = i;
4699
4700 if (i == HDMI_AUDIO_AUTO)
4701 has_audio = intel_dp_detect_audio(connector);
4702 else
4703 has_audio = (i == HDMI_AUDIO_ON);
4704
4705 if (has_audio == intel_dp->has_audio)
4706 return 0;
4707
4708 intel_dp->has_audio = has_audio;
4709 goto done;
4710 }
4711
4712 if (property == dev_priv->broadcast_rgb_property) {
4713 bool old_auto = intel_dp->color_range_auto;
4714 uint32_t old_range = intel_dp->color_range;
4715
4716 switch (val) {
4717 case INTEL_BROADCAST_RGB_AUTO:
4718 intel_dp->color_range_auto = true;
4719 break;
4720 case INTEL_BROADCAST_RGB_FULL:
4721 intel_dp->color_range_auto = false;
4722 intel_dp->color_range = 0;
4723 break;
4724 case INTEL_BROADCAST_RGB_LIMITED:
4725 intel_dp->color_range_auto = false;
4726 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4727 break;
4728 default:
4729 return -EINVAL;
4730 }
4731
4732 if (old_auto == intel_dp->color_range_auto &&
4733 old_range == intel_dp->color_range)
4734 return 0;
4735
4736 goto done;
4737 }
4738
4739 if (is_edp(intel_dp) &&
4740 property == connector->dev->mode_config.scaling_mode_property) {
4741 if (val == DRM_MODE_SCALE_NONE) {
4742 DRM_DEBUG_KMS("no scaling not supported\n");
4743 return -EINVAL;
4744 }
4745
4746 if (intel_connector->panel.fitting_mode == val) {
4747 /* the eDP scaling property is not changed */
4748 return 0;
4749 }
4750 intel_connector->panel.fitting_mode = val;
4751
4752 goto done;
4753 }
4754
4755 return -EINVAL;
4756
4757 done:
4758 if (intel_encoder->base.crtc)
4759 intel_crtc_restore_mode(intel_encoder->base.crtc);
4760
4761 return 0;
4762 }
4763
4764 static void
4765 intel_dp_connector_destroy(struct drm_connector *connector)
4766 {
4767 struct intel_connector *intel_connector = to_intel_connector(connector);
4768
4769 kfree(intel_connector->detect_edid);
4770
4771 if (!IS_ERR_OR_NULL(intel_connector->edid))
4772 kfree(intel_connector->edid);
4773
4774 /* Can't call is_edp() since the encoder may have been destroyed
4775 * already. */
4776 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4777 intel_panel_fini(&intel_connector->panel);
4778
4779 drm_connector_cleanup(connector);
4780 kfree(connector);
4781 }
4782
4783 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4784 {
4785 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4786 struct intel_dp *intel_dp = &intel_dig_port->dp;
4787
4788 drm_dp_aux_unregister(&intel_dp->aux);
4789 intel_dp_mst_encoder_cleanup(intel_dig_port);
4790 if (is_edp(intel_dp)) {
4791 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4792 /*
4793 * vdd might still be enabled do to the delayed vdd off.
4794 * Make sure vdd is actually turned off here.
4795 */
4796 pps_lock(intel_dp);
4797 edp_panel_vdd_off_sync(intel_dp);
4798 pps_unlock(intel_dp);
4799
4800 if (intel_dp->edp_notifier.notifier_call) {
4801 unregister_reboot_notifier(&intel_dp->edp_notifier);
4802 intel_dp->edp_notifier.notifier_call = NULL;
4803 }
4804 }
4805 drm_encoder_cleanup(encoder);
4806 kfree(intel_dig_port);
4807 }
4808
4809 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4810 {
4811 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4812
4813 if (!is_edp(intel_dp))
4814 return;
4815
4816 /*
4817 * vdd might still be enabled do to the delayed vdd off.
4818 * Make sure vdd is actually turned off here.
4819 */
4820 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4821 pps_lock(intel_dp);
4822 edp_panel_vdd_off_sync(intel_dp);
4823 pps_unlock(intel_dp);
4824 }
4825
4826 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4827 {
4828 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4829 struct drm_device *dev = intel_dig_port->base.base.dev;
4830 struct drm_i915_private *dev_priv = dev->dev_private;
4831 enum intel_display_power_domain power_domain;
4832
4833 lockdep_assert_held(&dev_priv->pps_mutex);
4834
4835 if (!edp_have_panel_vdd(intel_dp))
4836 return;
4837
4838 /*
4839 * The VDD bit needs a power domain reference, so if the bit is
4840 * already enabled when we boot or resume, grab this reference and
4841 * schedule a vdd off, so we don't hold on to the reference
4842 * indefinitely.
4843 */
4844 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4845 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4846 intel_display_power_get(dev_priv, power_domain);
4847
4848 edp_panel_vdd_schedule_off(intel_dp);
4849 }
4850
4851 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4852 {
4853 struct intel_dp *intel_dp;
4854
4855 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4856 return;
4857
4858 intel_dp = enc_to_intel_dp(encoder);
4859
4860 pps_lock(intel_dp);
4861
4862 /*
4863 * Read out the current power sequencer assignment,
4864 * in case the BIOS did something with it.
4865 */
4866 if (IS_VALLEYVIEW(encoder->dev))
4867 vlv_initial_power_sequencer_setup(intel_dp);
4868
4869 intel_edp_panel_vdd_sanitize(intel_dp);
4870
4871 pps_unlock(intel_dp);
4872 }
4873
4874 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4875 .dpms = intel_connector_dpms,
4876 .detect = intel_dp_detect,
4877 .force = intel_dp_force,
4878 .fill_modes = drm_helper_probe_single_connector_modes,
4879 .set_property = intel_dp_set_property,
4880 .atomic_get_property = intel_connector_atomic_get_property,
4881 .destroy = intel_dp_connector_destroy,
4882 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4883 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4884 };
4885
4886 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4887 .get_modes = intel_dp_get_modes,
4888 .mode_valid = intel_dp_mode_valid,
4889 .best_encoder = intel_best_encoder,
4890 };
4891
4892 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4893 .reset = intel_dp_encoder_reset,
4894 .destroy = intel_dp_encoder_destroy,
4895 };
4896
4897 void
4898 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4899 {
4900 return;
4901 }
4902
4903 enum irqreturn
4904 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4905 {
4906 struct intel_dp *intel_dp = &intel_dig_port->dp;
4907 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4908 struct drm_device *dev = intel_dig_port->base.base.dev;
4909 struct drm_i915_private *dev_priv = dev->dev_private;
4910 enum intel_display_power_domain power_domain;
4911 enum irqreturn ret = IRQ_NONE;
4912
4913 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4914 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4915
4916 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4917 /*
4918 * vdd off can generate a long pulse on eDP which
4919 * would require vdd on to handle it, and thus we
4920 * would end up in an endless cycle of
4921 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4922 */
4923 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4924 port_name(intel_dig_port->port));
4925 return IRQ_HANDLED;
4926 }
4927
4928 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4929 port_name(intel_dig_port->port),
4930 long_hpd ? "long" : "short");
4931
4932 power_domain = intel_display_port_power_domain(intel_encoder);
4933 intel_display_power_get(dev_priv, power_domain);
4934
4935 if (long_hpd) {
4936 /* indicate that we need to restart link training */
4937 intel_dp->train_set_valid = false;
4938
4939 if (HAS_PCH_SPLIT(dev)) {
4940 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4941 goto mst_fail;
4942 } else {
4943 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4944 goto mst_fail;
4945 }
4946
4947 if (!intel_dp_get_dpcd(intel_dp)) {
4948 goto mst_fail;
4949 }
4950
4951 intel_dp_probe_oui(intel_dp);
4952
4953 if (!intel_dp_probe_mst(intel_dp))
4954 goto mst_fail;
4955
4956 } else {
4957 if (intel_dp->is_mst) {
4958 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4959 goto mst_fail;
4960 }
4961
4962 if (!intel_dp->is_mst) {
4963 /*
4964 * we'll check the link status via the normal hot plug path later -
4965 * but for short hpds we should check it now
4966 */
4967 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4968 intel_dp_check_link_status(intel_dp);
4969 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4970 }
4971 }
4972
4973 ret = IRQ_HANDLED;
4974
4975 goto put_power;
4976 mst_fail:
4977 /* if we were in MST mode, and device is not there get out of MST mode */
4978 if (intel_dp->is_mst) {
4979 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4980 intel_dp->is_mst = false;
4981 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4982 }
4983 put_power:
4984 intel_display_power_put(dev_priv, power_domain);
4985
4986 return ret;
4987 }
4988
4989 /* Return which DP Port should be selected for Transcoder DP control */
4990 int
4991 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4992 {
4993 struct drm_device *dev = crtc->dev;
4994 struct intel_encoder *intel_encoder;
4995 struct intel_dp *intel_dp;
4996
4997 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4998 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4999
5000 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5001 intel_encoder->type == INTEL_OUTPUT_EDP)
5002 return intel_dp->output_reg;
5003 }
5004
5005 return -1;
5006 }
5007
5008 /* check the VBT to see whether the eDP is on DP-D port */
5009 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5010 {
5011 struct drm_i915_private *dev_priv = dev->dev_private;
5012 union child_device_config *p_child;
5013 int i;
5014 static const short port_mapping[] = {
5015 [PORT_B] = PORT_IDPB,
5016 [PORT_C] = PORT_IDPC,
5017 [PORT_D] = PORT_IDPD,
5018 };
5019
5020 if (port == PORT_A)
5021 return true;
5022
5023 if (!dev_priv->vbt.child_dev_num)
5024 return false;
5025
5026 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5027 p_child = dev_priv->vbt.child_dev + i;
5028
5029 if (p_child->common.dvo_port == port_mapping[port] &&
5030 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5031 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5032 return true;
5033 }
5034 return false;
5035 }
5036
5037 void
5038 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5039 {
5040 struct intel_connector *intel_connector = to_intel_connector(connector);
5041
5042 intel_attach_force_audio_property(connector);
5043 intel_attach_broadcast_rgb_property(connector);
5044 intel_dp->color_range_auto = true;
5045
5046 if (is_edp(intel_dp)) {
5047 drm_mode_create_scaling_mode_property(connector->dev);
5048 drm_object_attach_property(
5049 &connector->base,
5050 connector->dev->mode_config.scaling_mode_property,
5051 DRM_MODE_SCALE_ASPECT);
5052 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5053 }
5054 }
5055
5056 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5057 {
5058 intel_dp->last_power_cycle = jiffies;
5059 intel_dp->last_power_on = jiffies;
5060 intel_dp->last_backlight_off = jiffies;
5061 }
5062
5063 static void
5064 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5065 struct intel_dp *intel_dp)
5066 {
5067 struct drm_i915_private *dev_priv = dev->dev_private;
5068 struct edp_power_seq cur, vbt, spec,
5069 *final = &intel_dp->pps_delays;
5070 u32 pp_on, pp_off, pp_div, pp;
5071 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5072
5073 lockdep_assert_held(&dev_priv->pps_mutex);
5074
5075 /* already initialized? */
5076 if (final->t11_t12 != 0)
5077 return;
5078
5079 if (HAS_PCH_SPLIT(dev)) {
5080 pp_ctrl_reg = PCH_PP_CONTROL;
5081 pp_on_reg = PCH_PP_ON_DELAYS;
5082 pp_off_reg = PCH_PP_OFF_DELAYS;
5083 pp_div_reg = PCH_PP_DIVISOR;
5084 } else {
5085 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5086
5087 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5088 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5089 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5090 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5091 }
5092
5093 /* Workaround: Need to write PP_CONTROL with the unlock key as
5094 * the very first thing. */
5095 pp = ironlake_get_pp_control(intel_dp);
5096 I915_WRITE(pp_ctrl_reg, pp);
5097
5098 pp_on = I915_READ(pp_on_reg);
5099 pp_off = I915_READ(pp_off_reg);
5100 pp_div = I915_READ(pp_div_reg);
5101
5102 /* Pull timing values out of registers */
5103 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5104 PANEL_POWER_UP_DELAY_SHIFT;
5105
5106 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5107 PANEL_LIGHT_ON_DELAY_SHIFT;
5108
5109 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5110 PANEL_LIGHT_OFF_DELAY_SHIFT;
5111
5112 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5113 PANEL_POWER_DOWN_DELAY_SHIFT;
5114
5115 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5116 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5117
5118 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5119 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5120
5121 vbt = dev_priv->vbt.edp_pps;
5122
5123 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5124 * our hw here, which are all in 100usec. */
5125 spec.t1_t3 = 210 * 10;
5126 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5127 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5128 spec.t10 = 500 * 10;
5129 /* This one is special and actually in units of 100ms, but zero
5130 * based in the hw (so we need to add 100 ms). But the sw vbt
5131 * table multiplies it with 1000 to make it in units of 100usec,
5132 * too. */
5133 spec.t11_t12 = (510 + 100) * 10;
5134
5135 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5136 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5137
5138 /* Use the max of the register settings and vbt. If both are
5139 * unset, fall back to the spec limits. */
5140 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5141 spec.field : \
5142 max(cur.field, vbt.field))
5143 assign_final(t1_t3);
5144 assign_final(t8);
5145 assign_final(t9);
5146 assign_final(t10);
5147 assign_final(t11_t12);
5148 #undef assign_final
5149
5150 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5151 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5152 intel_dp->backlight_on_delay = get_delay(t8);
5153 intel_dp->backlight_off_delay = get_delay(t9);
5154 intel_dp->panel_power_down_delay = get_delay(t10);
5155 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5156 #undef get_delay
5157
5158 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5159 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5160 intel_dp->panel_power_cycle_delay);
5161
5162 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5163 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5164 }
5165
5166 static void
5167 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5168 struct intel_dp *intel_dp)
5169 {
5170 struct drm_i915_private *dev_priv = dev->dev_private;
5171 u32 pp_on, pp_off, pp_div, port_sel = 0;
5172 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5173 int pp_on_reg, pp_off_reg, pp_div_reg;
5174 enum port port = dp_to_dig_port(intel_dp)->port;
5175 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5176
5177 lockdep_assert_held(&dev_priv->pps_mutex);
5178
5179 if (HAS_PCH_SPLIT(dev)) {
5180 pp_on_reg = PCH_PP_ON_DELAYS;
5181 pp_off_reg = PCH_PP_OFF_DELAYS;
5182 pp_div_reg = PCH_PP_DIVISOR;
5183 } else {
5184 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5185
5186 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5187 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5188 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5189 }
5190
5191 /*
5192 * And finally store the new values in the power sequencer. The
5193 * backlight delays are set to 1 because we do manual waits on them. For
5194 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5195 * we'll end up waiting for the backlight off delay twice: once when we
5196 * do the manual sleep, and once when we disable the panel and wait for
5197 * the PP_STATUS bit to become zero.
5198 */
5199 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5200 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5201 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5202 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5203 /* Compute the divisor for the pp clock, simply match the Bspec
5204 * formula. */
5205 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5206 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5207 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5208
5209 /* Haswell doesn't have any port selection bits for the panel
5210 * power sequencer any more. */
5211 if (IS_VALLEYVIEW(dev)) {
5212 port_sel = PANEL_PORT_SELECT_VLV(port);
5213 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5214 if (port == PORT_A)
5215 port_sel = PANEL_PORT_SELECT_DPA;
5216 else
5217 port_sel = PANEL_PORT_SELECT_DPD;
5218 }
5219
5220 pp_on |= port_sel;
5221
5222 I915_WRITE(pp_on_reg, pp_on);
5223 I915_WRITE(pp_off_reg, pp_off);
5224 I915_WRITE(pp_div_reg, pp_div);
5225
5226 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5227 I915_READ(pp_on_reg),
5228 I915_READ(pp_off_reg),
5229 I915_READ(pp_div_reg));
5230 }
5231
5232 /**
5233 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5234 * @dev: DRM device
5235 * @refresh_rate: RR to be programmed
5236 *
5237 * This function gets called when refresh rate (RR) has to be changed from
5238 * one frequency to another. Switches can be between high and low RR
5239 * supported by the panel or to any other RR based on media playback (in
5240 * this case, RR value needs to be passed from user space).
5241 *
5242 * The caller of this function needs to take a lock on dev_priv->drrs.
5243 */
5244 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5245 {
5246 struct drm_i915_private *dev_priv = dev->dev_private;
5247 struct intel_encoder *encoder;
5248 struct intel_digital_port *dig_port = NULL;
5249 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5250 struct intel_crtc_state *config = NULL;
5251 struct intel_crtc *intel_crtc = NULL;
5252 u32 reg, val;
5253 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5254
5255 if (refresh_rate <= 0) {
5256 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5257 return;
5258 }
5259
5260 if (intel_dp == NULL) {
5261 DRM_DEBUG_KMS("DRRS not supported.\n");
5262 return;
5263 }
5264
5265 /*
5266 * FIXME: This needs proper synchronization with psr state for some
5267 * platforms that cannot have PSR and DRRS enabled at the same time.
5268 */
5269
5270 dig_port = dp_to_dig_port(intel_dp);
5271 encoder = &dig_port->base;
5272 intel_crtc = to_intel_crtc(encoder->base.crtc);
5273
5274 if (!intel_crtc) {
5275 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5276 return;
5277 }
5278
5279 config = intel_crtc->config;
5280
5281 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5282 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5283 return;
5284 }
5285
5286 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5287 refresh_rate)
5288 index = DRRS_LOW_RR;
5289
5290 if (index == dev_priv->drrs.refresh_rate_type) {
5291 DRM_DEBUG_KMS(
5292 "DRRS requested for previously set RR...ignoring\n");
5293 return;
5294 }
5295
5296 if (!intel_crtc->active) {
5297 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5298 return;
5299 }
5300
5301 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5302 switch (index) {
5303 case DRRS_HIGH_RR:
5304 intel_dp_set_m_n(intel_crtc, M1_N1);
5305 break;
5306 case DRRS_LOW_RR:
5307 intel_dp_set_m_n(intel_crtc, M2_N2);
5308 break;
5309 case DRRS_MAX_RR:
5310 default:
5311 DRM_ERROR("Unsupported refreshrate type\n");
5312 }
5313 } else if (INTEL_INFO(dev)->gen > 6) {
5314 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5315 val = I915_READ(reg);
5316
5317 if (index > DRRS_HIGH_RR) {
5318 if (IS_VALLEYVIEW(dev))
5319 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5320 else
5321 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5322 } else {
5323 if (IS_VALLEYVIEW(dev))
5324 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5325 else
5326 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5327 }
5328 I915_WRITE(reg, val);
5329 }
5330
5331 dev_priv->drrs.refresh_rate_type = index;
5332
5333 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5334 }
5335
5336 /**
5337 * intel_edp_drrs_enable - init drrs struct if supported
5338 * @intel_dp: DP struct
5339 *
5340 * Initializes frontbuffer_bits and drrs.dp
5341 */
5342 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5343 {
5344 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5345 struct drm_i915_private *dev_priv = dev->dev_private;
5346 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5347 struct drm_crtc *crtc = dig_port->base.base.crtc;
5348 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5349
5350 if (!intel_crtc->config->has_drrs) {
5351 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5352 return;
5353 }
5354
5355 mutex_lock(&dev_priv->drrs.mutex);
5356 if (WARN_ON(dev_priv->drrs.dp)) {
5357 DRM_ERROR("DRRS already enabled\n");
5358 goto unlock;
5359 }
5360
5361 dev_priv->drrs.busy_frontbuffer_bits = 0;
5362
5363 dev_priv->drrs.dp = intel_dp;
5364
5365 unlock:
5366 mutex_unlock(&dev_priv->drrs.mutex);
5367 }
5368
5369 /**
5370 * intel_edp_drrs_disable - Disable DRRS
5371 * @intel_dp: DP struct
5372 *
5373 */
5374 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5375 {
5376 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5377 struct drm_i915_private *dev_priv = dev->dev_private;
5378 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5379 struct drm_crtc *crtc = dig_port->base.base.crtc;
5380 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5381
5382 if (!intel_crtc->config->has_drrs)
5383 return;
5384
5385 mutex_lock(&dev_priv->drrs.mutex);
5386 if (!dev_priv->drrs.dp) {
5387 mutex_unlock(&dev_priv->drrs.mutex);
5388 return;
5389 }
5390
5391 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5392 intel_dp_set_drrs_state(dev_priv->dev,
5393 intel_dp->attached_connector->panel.
5394 fixed_mode->vrefresh);
5395
5396 dev_priv->drrs.dp = NULL;
5397 mutex_unlock(&dev_priv->drrs.mutex);
5398
5399 cancel_delayed_work_sync(&dev_priv->drrs.work);
5400 }
5401
5402 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5403 {
5404 struct drm_i915_private *dev_priv =
5405 container_of(work, typeof(*dev_priv), drrs.work.work);
5406 struct intel_dp *intel_dp;
5407
5408 mutex_lock(&dev_priv->drrs.mutex);
5409
5410 intel_dp = dev_priv->drrs.dp;
5411
5412 if (!intel_dp)
5413 goto unlock;
5414
5415 /*
5416 * The delayed work can race with an invalidate hence we need to
5417 * recheck.
5418 */
5419
5420 if (dev_priv->drrs.busy_frontbuffer_bits)
5421 goto unlock;
5422
5423 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5424 intel_dp_set_drrs_state(dev_priv->dev,
5425 intel_dp->attached_connector->panel.
5426 downclock_mode->vrefresh);
5427
5428 unlock:
5429 mutex_unlock(&dev_priv->drrs.mutex);
5430 }
5431
5432 /**
5433 * intel_edp_drrs_invalidate - Invalidate DRRS
5434 * @dev: DRM device
5435 * @frontbuffer_bits: frontbuffer plane tracking bits
5436 *
5437 * When there is a disturbance on screen (due to cursor movement/time
5438 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5439 * high RR.
5440 *
5441 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5442 */
5443 void intel_edp_drrs_invalidate(struct drm_device *dev,
5444 unsigned frontbuffer_bits)
5445 {
5446 struct drm_i915_private *dev_priv = dev->dev_private;
5447 struct drm_crtc *crtc;
5448 enum pipe pipe;
5449
5450 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5451 return;
5452
5453 cancel_delayed_work(&dev_priv->drrs.work);
5454
5455 mutex_lock(&dev_priv->drrs.mutex);
5456 if (!dev_priv->drrs.dp) {
5457 mutex_unlock(&dev_priv->drrs.mutex);
5458 return;
5459 }
5460
5461 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5462 pipe = to_intel_crtc(crtc)->pipe;
5463
5464 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5465 intel_dp_set_drrs_state(dev_priv->dev,
5466 dev_priv->drrs.dp->attached_connector->panel.
5467 fixed_mode->vrefresh);
5468 }
5469
5470 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5471
5472 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5473 mutex_unlock(&dev_priv->drrs.mutex);
5474 }
5475
5476 /**
5477 * intel_edp_drrs_flush - Flush DRRS
5478 * @dev: DRM device
5479 * @frontbuffer_bits: frontbuffer plane tracking bits
5480 *
5481 * When there is no movement on screen, DRRS work can be scheduled.
5482 * This DRRS work is responsible for setting relevant registers after a
5483 * timeout of 1 second.
5484 *
5485 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5486 */
5487 void intel_edp_drrs_flush(struct drm_device *dev,
5488 unsigned frontbuffer_bits)
5489 {
5490 struct drm_i915_private *dev_priv = dev->dev_private;
5491 struct drm_crtc *crtc;
5492 enum pipe pipe;
5493
5494 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5495 return;
5496
5497 cancel_delayed_work(&dev_priv->drrs.work);
5498
5499 mutex_lock(&dev_priv->drrs.mutex);
5500 if (!dev_priv->drrs.dp) {
5501 mutex_unlock(&dev_priv->drrs.mutex);
5502 return;
5503 }
5504
5505 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5506 pipe = to_intel_crtc(crtc)->pipe;
5507 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5508
5509 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5510 !dev_priv->drrs.busy_frontbuffer_bits)
5511 schedule_delayed_work(&dev_priv->drrs.work,
5512 msecs_to_jiffies(1000));
5513 mutex_unlock(&dev_priv->drrs.mutex);
5514 }
5515
5516 /**
5517 * DOC: Display Refresh Rate Switching (DRRS)
5518 *
5519 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5520 * which enables swtching between low and high refresh rates,
5521 * dynamically, based on the usage scenario. This feature is applicable
5522 * for internal panels.
5523 *
5524 * Indication that the panel supports DRRS is given by the panel EDID, which
5525 * would list multiple refresh rates for one resolution.
5526 *
5527 * DRRS is of 2 types - static and seamless.
5528 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5529 * (may appear as a blink on screen) and is used in dock-undock scenario.
5530 * Seamless DRRS involves changing RR without any visual effect to the user
5531 * and can be used during normal system usage. This is done by programming
5532 * certain registers.
5533 *
5534 * Support for static/seamless DRRS may be indicated in the VBT based on
5535 * inputs from the panel spec.
5536 *
5537 * DRRS saves power by switching to low RR based on usage scenarios.
5538 *
5539 * eDP DRRS:-
5540 * The implementation is based on frontbuffer tracking implementation.
5541 * When there is a disturbance on the screen triggered by user activity or a
5542 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5543 * When there is no movement on screen, after a timeout of 1 second, a switch
5544 * to low RR is made.
5545 * For integration with frontbuffer tracking code,
5546 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5547 *
5548 * DRRS can be further extended to support other internal panels and also
5549 * the scenario of video playback wherein RR is set based on the rate
5550 * requested by userspace.
5551 */
5552
5553 /**
5554 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5555 * @intel_connector: eDP connector
5556 * @fixed_mode: preferred mode of panel
5557 *
5558 * This function is called only once at driver load to initialize basic
5559 * DRRS stuff.
5560 *
5561 * Returns:
5562 * Downclock mode if panel supports it, else return NULL.
5563 * DRRS support is determined by the presence of downclock mode (apart
5564 * from VBT setting).
5565 */
5566 static struct drm_display_mode *
5567 intel_dp_drrs_init(struct intel_connector *intel_connector,
5568 struct drm_display_mode *fixed_mode)
5569 {
5570 struct drm_connector *connector = &intel_connector->base;
5571 struct drm_device *dev = connector->dev;
5572 struct drm_i915_private *dev_priv = dev->dev_private;
5573 struct drm_display_mode *downclock_mode = NULL;
5574
5575 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5576 mutex_init(&dev_priv->drrs.mutex);
5577
5578 if (INTEL_INFO(dev)->gen <= 6) {
5579 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5580 return NULL;
5581 }
5582
5583 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5584 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5585 return NULL;
5586 }
5587
5588 downclock_mode = intel_find_panel_downclock
5589 (dev, fixed_mode, connector);
5590
5591 if (!downclock_mode) {
5592 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5593 return NULL;
5594 }
5595
5596 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5597
5598 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5599 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5600 return downclock_mode;
5601 }
5602
5603 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5604 struct intel_connector *intel_connector)
5605 {
5606 struct drm_connector *connector = &intel_connector->base;
5607 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5608 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5609 struct drm_device *dev = intel_encoder->base.dev;
5610 struct drm_i915_private *dev_priv = dev->dev_private;
5611 struct drm_display_mode *fixed_mode = NULL;
5612 struct drm_display_mode *downclock_mode = NULL;
5613 bool has_dpcd;
5614 struct drm_display_mode *scan;
5615 struct edid *edid;
5616 enum pipe pipe = INVALID_PIPE;
5617
5618 if (!is_edp(intel_dp))
5619 return true;
5620
5621 pps_lock(intel_dp);
5622 intel_edp_panel_vdd_sanitize(intel_dp);
5623 pps_unlock(intel_dp);
5624
5625 /* Cache DPCD and EDID for edp. */
5626 has_dpcd = intel_dp_get_dpcd(intel_dp);
5627
5628 if (has_dpcd) {
5629 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5630 dev_priv->no_aux_handshake =
5631 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5632 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5633 } else {
5634 /* if this fails, presume the device is a ghost */
5635 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5636 return false;
5637 }
5638
5639 /* We now know it's not a ghost, init power sequence regs. */
5640 pps_lock(intel_dp);
5641 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5642 pps_unlock(intel_dp);
5643
5644 mutex_lock(&dev->mode_config.mutex);
5645 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5646 if (edid) {
5647 if (drm_add_edid_modes(connector, edid)) {
5648 drm_mode_connector_update_edid_property(connector,
5649 edid);
5650 drm_edid_to_eld(connector, edid);
5651 } else {
5652 kfree(edid);
5653 edid = ERR_PTR(-EINVAL);
5654 }
5655 } else {
5656 edid = ERR_PTR(-ENOENT);
5657 }
5658 intel_connector->edid = edid;
5659
5660 /* prefer fixed mode from EDID if available */
5661 list_for_each_entry(scan, &connector->probed_modes, head) {
5662 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5663 fixed_mode = drm_mode_duplicate(dev, scan);
5664 downclock_mode = intel_dp_drrs_init(
5665 intel_connector, fixed_mode);
5666 break;
5667 }
5668 }
5669
5670 /* fallback to VBT if available for eDP */
5671 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5672 fixed_mode = drm_mode_duplicate(dev,
5673 dev_priv->vbt.lfp_lvds_vbt_mode);
5674 if (fixed_mode)
5675 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5676 }
5677 mutex_unlock(&dev->mode_config.mutex);
5678
5679 if (IS_VALLEYVIEW(dev)) {
5680 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5681 register_reboot_notifier(&intel_dp->edp_notifier);
5682
5683 /*
5684 * Figure out the current pipe for the initial backlight setup.
5685 * If the current pipe isn't valid, try the PPS pipe, and if that
5686 * fails just assume pipe A.
5687 */
5688 if (IS_CHERRYVIEW(dev))
5689 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5690 else
5691 pipe = PORT_TO_PIPE(intel_dp->DP);
5692
5693 if (pipe != PIPE_A && pipe != PIPE_B)
5694 pipe = intel_dp->pps_pipe;
5695
5696 if (pipe != PIPE_A && pipe != PIPE_B)
5697 pipe = PIPE_A;
5698
5699 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5700 pipe_name(pipe));
5701 }
5702
5703 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5704 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5705 intel_panel_setup_backlight(connector, pipe);
5706
5707 return true;
5708 }
5709
5710 bool
5711 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5712 struct intel_connector *intel_connector)
5713 {
5714 struct drm_connector *connector = &intel_connector->base;
5715 struct intel_dp *intel_dp = &intel_dig_port->dp;
5716 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5717 struct drm_device *dev = intel_encoder->base.dev;
5718 struct drm_i915_private *dev_priv = dev->dev_private;
5719 enum port port = intel_dig_port->port;
5720 int type;
5721
5722 intel_dp->pps_pipe = INVALID_PIPE;
5723
5724 /* intel_dp vfuncs */
5725 if (INTEL_INFO(dev)->gen >= 9)
5726 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5727 else if (IS_VALLEYVIEW(dev))
5728 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5729 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5730 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5731 else if (HAS_PCH_SPLIT(dev))
5732 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5733 else
5734 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5735
5736 if (INTEL_INFO(dev)->gen >= 9)
5737 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5738 else
5739 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5740
5741 /* Preserve the current hw state. */
5742 intel_dp->DP = I915_READ(intel_dp->output_reg);
5743 intel_dp->attached_connector = intel_connector;
5744
5745 if (intel_dp_is_edp(dev, port))
5746 type = DRM_MODE_CONNECTOR_eDP;
5747 else
5748 type = DRM_MODE_CONNECTOR_DisplayPort;
5749
5750 /*
5751 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5752 * for DP the encoder type can be set by the caller to
5753 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5754 */
5755 if (type == DRM_MODE_CONNECTOR_eDP)
5756 intel_encoder->type = INTEL_OUTPUT_EDP;
5757
5758 /* eDP only on port B and/or C on vlv/chv */
5759 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5760 port != PORT_B && port != PORT_C))
5761 return false;
5762
5763 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5764 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5765 port_name(port));
5766
5767 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5768 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5769
5770 connector->interlace_allowed = true;
5771 connector->doublescan_allowed = 0;
5772
5773 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5774 edp_panel_vdd_work);
5775
5776 intel_connector_attach_encoder(intel_connector, intel_encoder);
5777 drm_connector_register(connector);
5778
5779 if (HAS_DDI(dev))
5780 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5781 else
5782 intel_connector->get_hw_state = intel_connector_get_hw_state;
5783 intel_connector->unregister = intel_dp_connector_unregister;
5784
5785 /* Set up the hotplug pin. */
5786 switch (port) {
5787 case PORT_A:
5788 intel_encoder->hpd_pin = HPD_PORT_A;
5789 break;
5790 case PORT_B:
5791 intel_encoder->hpd_pin = HPD_PORT_B;
5792 break;
5793 case PORT_C:
5794 intel_encoder->hpd_pin = HPD_PORT_C;
5795 break;
5796 case PORT_D:
5797 intel_encoder->hpd_pin = HPD_PORT_D;
5798 break;
5799 default:
5800 BUG();
5801 }
5802
5803 if (is_edp(intel_dp)) {
5804 pps_lock(intel_dp);
5805 intel_dp_init_panel_power_timestamps(intel_dp);
5806 if (IS_VALLEYVIEW(dev))
5807 vlv_initial_power_sequencer_setup(intel_dp);
5808 else
5809 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5810 pps_unlock(intel_dp);
5811 }
5812
5813 intel_dp_aux_init(intel_dp, intel_connector);
5814
5815 /* init MST on ports that can support it */
5816 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5817 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5818 intel_dp_mst_encoder_init(intel_dig_port,
5819 intel_connector->base.base.id);
5820 }
5821 }
5822
5823 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5824 drm_dp_aux_unregister(&intel_dp->aux);
5825 if (is_edp(intel_dp)) {
5826 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5827 /*
5828 * vdd might still be enabled do to the delayed vdd off.
5829 * Make sure vdd is actually turned off here.
5830 */
5831 pps_lock(intel_dp);
5832 edp_panel_vdd_off_sync(intel_dp);
5833 pps_unlock(intel_dp);
5834 }
5835 drm_connector_unregister(connector);
5836 drm_connector_cleanup(connector);
5837 return false;
5838 }
5839
5840 intel_dp_add_properties(intel_dp, connector);
5841
5842 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5843 * 0xd. Failure to do so will result in spurious interrupts being
5844 * generated on the port when a cable is not attached.
5845 */
5846 if (IS_G4X(dev) && !IS_GM45(dev)) {
5847 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5848 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5849 }
5850
5851 i915_debugfs_connector_add(connector);
5852
5853 return true;
5854 }
5855
5856 void
5857 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5858 {
5859 struct drm_i915_private *dev_priv = dev->dev_private;
5860 struct intel_digital_port *intel_dig_port;
5861 struct intel_encoder *intel_encoder;
5862 struct drm_encoder *encoder;
5863 struct intel_connector *intel_connector;
5864
5865 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5866 if (!intel_dig_port)
5867 return;
5868
5869 intel_connector = intel_connector_alloc();
5870 if (!intel_connector) {
5871 kfree(intel_dig_port);
5872 return;
5873 }
5874
5875 intel_encoder = &intel_dig_port->base;
5876 encoder = &intel_encoder->base;
5877
5878 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5879 DRM_MODE_ENCODER_TMDS);
5880
5881 intel_encoder->compute_config = intel_dp_compute_config;
5882 intel_encoder->disable = intel_disable_dp;
5883 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5884 intel_encoder->get_config = intel_dp_get_config;
5885 intel_encoder->suspend = intel_dp_encoder_suspend;
5886 if (IS_CHERRYVIEW(dev)) {
5887 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5888 intel_encoder->pre_enable = chv_pre_enable_dp;
5889 intel_encoder->enable = vlv_enable_dp;
5890 intel_encoder->post_disable = chv_post_disable_dp;
5891 } else if (IS_VALLEYVIEW(dev)) {
5892 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5893 intel_encoder->pre_enable = vlv_pre_enable_dp;
5894 intel_encoder->enable = vlv_enable_dp;
5895 intel_encoder->post_disable = vlv_post_disable_dp;
5896 } else {
5897 intel_encoder->pre_enable = g4x_pre_enable_dp;
5898 intel_encoder->enable = g4x_enable_dp;
5899 if (INTEL_INFO(dev)->gen >= 5)
5900 intel_encoder->post_disable = ilk_post_disable_dp;
5901 }
5902
5903 intel_dig_port->port = port;
5904 intel_dig_port->dp.output_reg = output_reg;
5905
5906 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5907 if (IS_CHERRYVIEW(dev)) {
5908 if (port == PORT_D)
5909 intel_encoder->crtc_mask = 1 << 2;
5910 else
5911 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5912 } else {
5913 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5914 }
5915 intel_encoder->cloneable = 0;
5916 intel_encoder->hot_plug = intel_dp_hot_plug;
5917
5918 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5919 dev_priv->hpd_irq_port[port] = intel_dig_port;
5920
5921 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5922 drm_encoder_cleanup(encoder);
5923 kfree(intel_dig_port);
5924 kfree(intel_connector);
5925 }
5926 }
5927
5928 void intel_dp_mst_suspend(struct drm_device *dev)
5929 {
5930 struct drm_i915_private *dev_priv = dev->dev_private;
5931 int i;
5932
5933 /* disable MST */
5934 for (i = 0; i < I915_MAX_PORTS; i++) {
5935 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5936 if (!intel_dig_port)
5937 continue;
5938
5939 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5940 if (!intel_dig_port->dp.can_mst)
5941 continue;
5942 if (intel_dig_port->dp.is_mst)
5943 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5944 }
5945 }
5946 }
5947
5948 void intel_dp_mst_resume(struct drm_device *dev)
5949 {
5950 struct drm_i915_private *dev_priv = dev->dev_private;
5951 int i;
5952
5953 for (i = 0; i < I915_MAX_PORTS; i++) {
5954 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5955 if (!intel_dig_port)
5956 continue;
5957 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5958 int ret;
5959
5960 if (!intel_dig_port->dp.can_mst)
5961 continue;
5962
5963 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5964 if (ret != 0) {
5965 intel_dp_check_mst_status(&intel_dig_port->dp);
5966 }
5967 }
5968 }
5969 }
This page took 0.231337 seconds and 6 git commands to generate.