drm/i915: Clarfify the DP code platform checks
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51 int link_bw;
52 struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56 { DP_LINK_BW_1_62,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { DP_LINK_BW_2_7,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63 { DP_LINK_BW_1_62,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { DP_LINK_BW_2_7,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70 { DP_LINK_BW_1_62,
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72 { DP_LINK_BW_2_7,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80 static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int skl_rates[] = { 162000, 216000, 270000,
95 324000, 432000, 540000 };
96 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
97 243000, 270000, 324000, 405000,
98 420000, 432000, 540000 };
99 static const int default_rates[] = { 162000, 270000, 540000 };
100
101 /**
102 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
103 * @intel_dp: DP struct
104 *
105 * If a CPU or PCH DP output is attached to an eDP panel, this function
106 * will return true, and false otherwise.
107 */
108 static bool is_edp(struct intel_dp *intel_dp)
109 {
110 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
111
112 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
113 }
114
115 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
116 {
117 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
118
119 return intel_dig_port->base.base.dev;
120 }
121
122 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
123 {
124 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
125 }
126
127 static void intel_dp_link_down(struct intel_dp *intel_dp);
128 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
129 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
130 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
131 static void vlv_steal_power_sequencer(struct drm_device *dev,
132 enum pipe pipe);
133
134 static int
135 intel_dp_max_link_bw(struct intel_dp *intel_dp)
136 {
137 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
138
139 switch (max_link_bw) {
140 case DP_LINK_BW_1_62:
141 case DP_LINK_BW_2_7:
142 case DP_LINK_BW_5_4:
143 break;
144 default:
145 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
146 max_link_bw);
147 max_link_bw = DP_LINK_BW_1_62;
148 break;
149 }
150 return max_link_bw;
151 }
152
153 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
154 {
155 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
156 struct drm_device *dev = intel_dig_port->base.base.dev;
157 u8 source_max, sink_max;
158
159 source_max = 4;
160 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
161 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
162 source_max = 2;
163
164 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
165
166 return min(source_max, sink_max);
167 }
168
169 /*
170 * The units on the numbers in the next two are... bizarre. Examples will
171 * make it clearer; this one parallels an example in the eDP spec.
172 *
173 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
174 *
175 * 270000 * 1 * 8 / 10 == 216000
176 *
177 * The actual data capacity of that configuration is 2.16Gbit/s, so the
178 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
179 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
180 * 119000. At 18bpp that's 2142000 kilobits per second.
181 *
182 * Thus the strange-looking division by 10 in intel_dp_link_required, to
183 * get the result in decakilobits instead of kilobits.
184 */
185
186 static int
187 intel_dp_link_required(int pixel_clock, int bpp)
188 {
189 return (pixel_clock * bpp + 9) / 10;
190 }
191
192 static int
193 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
194 {
195 return (max_link_clock * max_lanes * 8) / 10;
196 }
197
198 static enum drm_mode_status
199 intel_dp_mode_valid(struct drm_connector *connector,
200 struct drm_display_mode *mode)
201 {
202 struct intel_dp *intel_dp = intel_attached_dp(connector);
203 struct intel_connector *intel_connector = to_intel_connector(connector);
204 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
205 int target_clock = mode->clock;
206 int max_rate, mode_rate, max_lanes, max_link_clock;
207
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
210 return MODE_PANEL;
211
212 if (mode->vdisplay > fixed_mode->vdisplay)
213 return MODE_PANEL;
214
215 target_clock = fixed_mode->clock;
216 }
217
218 max_link_clock = intel_dp_max_link_rate(intel_dp);
219 max_lanes = intel_dp_max_lane_count(intel_dp);
220
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
223
224 if (mode_rate > max_rate)
225 return MODE_CLOCK_HIGH;
226
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
229
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
232
233 return MODE_OK;
234 }
235
236 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
237 {
238 int i;
239 uint32_t v = 0;
240
241 if (src_bytes > 4)
242 src_bytes = 4;
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 return v;
246 }
247
248 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
249 {
250 int i;
251 if (dst_bytes > 4)
252 dst_bytes = 4;
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
255 }
256
257 /* hrawclock is 1/4 the FSB frequency */
258 static int
259 intel_hrawclk(struct drm_device *dev)
260 {
261 struct drm_i915_private *dev_priv = dev->dev_private;
262 uint32_t clkcfg;
263
264 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
265 if (IS_VALLEYVIEW(dev))
266 return 200;
267
268 clkcfg = I915_READ(CLKCFG);
269 switch (clkcfg & CLKCFG_FSB_MASK) {
270 case CLKCFG_FSB_400:
271 return 100;
272 case CLKCFG_FSB_533:
273 return 133;
274 case CLKCFG_FSB_667:
275 return 166;
276 case CLKCFG_FSB_800:
277 return 200;
278 case CLKCFG_FSB_1067:
279 return 266;
280 case CLKCFG_FSB_1333:
281 return 333;
282 /* these two are just a guess; one of them might be right */
283 case CLKCFG_FSB_1600:
284 case CLKCFG_FSB_1600_ALT:
285 return 400;
286 default:
287 return 133;
288 }
289 }
290
291 static void
292 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
293 struct intel_dp *intel_dp);
294 static void
295 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
296 struct intel_dp *intel_dp);
297
298 static void pps_lock(struct intel_dp *intel_dp)
299 {
300 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
301 struct intel_encoder *encoder = &intel_dig_port->base;
302 struct drm_device *dev = encoder->base.dev;
303 struct drm_i915_private *dev_priv = dev->dev_private;
304 enum intel_display_power_domain power_domain;
305
306 /*
307 * See vlv_power_sequencer_reset() why we need
308 * a power domain reference here.
309 */
310 power_domain = intel_display_port_power_domain(encoder);
311 intel_display_power_get(dev_priv, power_domain);
312
313 mutex_lock(&dev_priv->pps_mutex);
314 }
315
316 static void pps_unlock(struct intel_dp *intel_dp)
317 {
318 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
319 struct intel_encoder *encoder = &intel_dig_port->base;
320 struct drm_device *dev = encoder->base.dev;
321 struct drm_i915_private *dev_priv = dev->dev_private;
322 enum intel_display_power_domain power_domain;
323
324 mutex_unlock(&dev_priv->pps_mutex);
325
326 power_domain = intel_display_port_power_domain(encoder);
327 intel_display_power_put(dev_priv, power_domain);
328 }
329
330 static void
331 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
332 {
333 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334 struct drm_device *dev = intel_dig_port->base.base.dev;
335 struct drm_i915_private *dev_priv = dev->dev_private;
336 enum pipe pipe = intel_dp->pps_pipe;
337 bool pll_enabled;
338 uint32_t DP;
339
340 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
341 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
342 pipe_name(pipe), port_name(intel_dig_port->port)))
343 return;
344
345 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
346 pipe_name(pipe), port_name(intel_dig_port->port));
347
348 /* Preserve the BIOS-computed detected bit. This is
349 * supposed to be read-only.
350 */
351 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
352 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
353 DP |= DP_PORT_WIDTH(1);
354 DP |= DP_LINK_TRAIN_PAT_1;
355
356 if (IS_CHERRYVIEW(dev))
357 DP |= DP_PIPE_SELECT_CHV(pipe);
358 else if (pipe == PIPE_B)
359 DP |= DP_PIPEB_SELECT;
360
361 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
362
363 /*
364 * The DPLL for the pipe must be enabled for this to work.
365 * So enable temporarily it if it's not already enabled.
366 */
367 if (!pll_enabled)
368 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
369 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
370
371 /*
372 * Similar magic as in intel_dp_enable_port().
373 * We _must_ do this port enable + disable trick
374 * to make this power seqeuencer lock onto the port.
375 * Otherwise even VDD force bit won't work.
376 */
377 I915_WRITE(intel_dp->output_reg, DP);
378 POSTING_READ(intel_dp->output_reg);
379
380 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
381 POSTING_READ(intel_dp->output_reg);
382
383 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
384 POSTING_READ(intel_dp->output_reg);
385
386 if (!pll_enabled)
387 vlv_force_pll_off(dev, pipe);
388 }
389
390 static enum pipe
391 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
392 {
393 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
394 struct drm_device *dev = intel_dig_port->base.base.dev;
395 struct drm_i915_private *dev_priv = dev->dev_private;
396 struct intel_encoder *encoder;
397 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
398 enum pipe pipe;
399
400 lockdep_assert_held(&dev_priv->pps_mutex);
401
402 /* We should never land here with regular DP ports */
403 WARN_ON(!is_edp(intel_dp));
404
405 if (intel_dp->pps_pipe != INVALID_PIPE)
406 return intel_dp->pps_pipe;
407
408 /*
409 * We don't have power sequencer currently.
410 * Pick one that's not used by other ports.
411 */
412 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
413 base.head) {
414 struct intel_dp *tmp;
415
416 if (encoder->type != INTEL_OUTPUT_EDP)
417 continue;
418
419 tmp = enc_to_intel_dp(&encoder->base);
420
421 if (tmp->pps_pipe != INVALID_PIPE)
422 pipes &= ~(1 << tmp->pps_pipe);
423 }
424
425 /*
426 * Didn't find one. This should not happen since there
427 * are two power sequencers and up to two eDP ports.
428 */
429 if (WARN_ON(pipes == 0))
430 pipe = PIPE_A;
431 else
432 pipe = ffs(pipes) - 1;
433
434 vlv_steal_power_sequencer(dev, pipe);
435 intel_dp->pps_pipe = pipe;
436
437 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
438 pipe_name(intel_dp->pps_pipe),
439 port_name(intel_dig_port->port));
440
441 /* init power sequencer on this pipe and port */
442 intel_dp_init_panel_power_sequencer(dev, intel_dp);
443 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
444
445 /*
446 * Even vdd force doesn't work until we've made
447 * the power sequencer lock in on the port.
448 */
449 vlv_power_sequencer_kick(intel_dp);
450
451 return intel_dp->pps_pipe;
452 }
453
454 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
455 enum pipe pipe);
456
457 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
458 enum pipe pipe)
459 {
460 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
461 }
462
463 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
464 enum pipe pipe)
465 {
466 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
467 }
468
469 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
470 enum pipe pipe)
471 {
472 return true;
473 }
474
475 static enum pipe
476 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
477 enum port port,
478 vlv_pipe_check pipe_check)
479 {
480 enum pipe pipe;
481
482 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
483 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
484 PANEL_PORT_SELECT_MASK;
485
486 if (port_sel != PANEL_PORT_SELECT_VLV(port))
487 continue;
488
489 if (!pipe_check(dev_priv, pipe))
490 continue;
491
492 return pipe;
493 }
494
495 return INVALID_PIPE;
496 }
497
498 static void
499 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
500 {
501 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
502 struct drm_device *dev = intel_dig_port->base.base.dev;
503 struct drm_i915_private *dev_priv = dev->dev_private;
504 enum port port = intel_dig_port->port;
505
506 lockdep_assert_held(&dev_priv->pps_mutex);
507
508 /* try to find a pipe with this port selected */
509 /* first pick one where the panel is on */
510 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
511 vlv_pipe_has_pp_on);
512 /* didn't find one? pick one where vdd is on */
513 if (intel_dp->pps_pipe == INVALID_PIPE)
514 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
515 vlv_pipe_has_vdd_on);
516 /* didn't find one? pick one with just the correct port */
517 if (intel_dp->pps_pipe == INVALID_PIPE)
518 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
519 vlv_pipe_any);
520
521 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
522 if (intel_dp->pps_pipe == INVALID_PIPE) {
523 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
524 port_name(port));
525 return;
526 }
527
528 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
529 port_name(port), pipe_name(intel_dp->pps_pipe));
530
531 intel_dp_init_panel_power_sequencer(dev, intel_dp);
532 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
533 }
534
535 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
536 {
537 struct drm_device *dev = dev_priv->dev;
538 struct intel_encoder *encoder;
539
540 if (WARN_ON(!IS_VALLEYVIEW(dev)))
541 return;
542
543 /*
544 * We can't grab pps_mutex here due to deadlock with power_domain
545 * mutex when power_domain functions are called while holding pps_mutex.
546 * That also means that in order to use pps_pipe the code needs to
547 * hold both a power domain reference and pps_mutex, and the power domain
548 * reference get/put must be done while _not_ holding pps_mutex.
549 * pps_{lock,unlock}() do these steps in the correct order, so one
550 * should use them always.
551 */
552
553 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
554 struct intel_dp *intel_dp;
555
556 if (encoder->type != INTEL_OUTPUT_EDP)
557 continue;
558
559 intel_dp = enc_to_intel_dp(&encoder->base);
560 intel_dp->pps_pipe = INVALID_PIPE;
561 }
562 }
563
564 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
565 {
566 struct drm_device *dev = intel_dp_to_dev(intel_dp);
567
568 if (HAS_PCH_SPLIT(dev))
569 return PCH_PP_CONTROL;
570 else
571 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
572 }
573
574 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
575 {
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
577
578 if (HAS_PCH_SPLIT(dev))
579 return PCH_PP_STATUS;
580 else
581 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
582 }
583
584 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
585 This function only applicable when panel PM state is not to be tracked */
586 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
587 void *unused)
588 {
589 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
590 edp_notifier);
591 struct drm_device *dev = intel_dp_to_dev(intel_dp);
592 struct drm_i915_private *dev_priv = dev->dev_private;
593 u32 pp_div;
594 u32 pp_ctrl_reg, pp_div_reg;
595
596 if (!is_edp(intel_dp) || code != SYS_RESTART)
597 return 0;
598
599 pps_lock(intel_dp);
600
601 if (IS_VALLEYVIEW(dev)) {
602 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
603
604 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
605 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
606 pp_div = I915_READ(pp_div_reg);
607 pp_div &= PP_REFERENCE_DIVIDER_MASK;
608
609 /* 0x1F write to PP_DIV_REG sets max cycle delay */
610 I915_WRITE(pp_div_reg, pp_div | 0x1F);
611 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
612 msleep(intel_dp->panel_power_cycle_delay);
613 }
614
615 pps_unlock(intel_dp);
616
617 return 0;
618 }
619
620 static bool edp_have_panel_power(struct intel_dp *intel_dp)
621 {
622 struct drm_device *dev = intel_dp_to_dev(intel_dp);
623 struct drm_i915_private *dev_priv = dev->dev_private;
624
625 lockdep_assert_held(&dev_priv->pps_mutex);
626
627 if (IS_VALLEYVIEW(dev) &&
628 intel_dp->pps_pipe == INVALID_PIPE)
629 return false;
630
631 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
632 }
633
634 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
635 {
636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
637 struct drm_i915_private *dev_priv = dev->dev_private;
638
639 lockdep_assert_held(&dev_priv->pps_mutex);
640
641 if (IS_VALLEYVIEW(dev) &&
642 intel_dp->pps_pipe == INVALID_PIPE)
643 return false;
644
645 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
646 }
647
648 static void
649 intel_dp_check_edp(struct intel_dp *intel_dp)
650 {
651 struct drm_device *dev = intel_dp_to_dev(intel_dp);
652 struct drm_i915_private *dev_priv = dev->dev_private;
653
654 if (!is_edp(intel_dp))
655 return;
656
657 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
658 WARN(1, "eDP powered off while attempting aux channel communication.\n");
659 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
660 I915_READ(_pp_stat_reg(intel_dp)),
661 I915_READ(_pp_ctrl_reg(intel_dp)));
662 }
663 }
664
665 static uint32_t
666 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
667 {
668 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
669 struct drm_device *dev = intel_dig_port->base.base.dev;
670 struct drm_i915_private *dev_priv = dev->dev_private;
671 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
672 uint32_t status;
673 bool done;
674
675 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
676 if (has_aux_irq)
677 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
678 msecs_to_jiffies_timeout(10));
679 else
680 done = wait_for_atomic(C, 10) == 0;
681 if (!done)
682 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
683 has_aux_irq);
684 #undef C
685
686 return status;
687 }
688
689 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
690 {
691 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692 struct drm_device *dev = intel_dig_port->base.base.dev;
693
694 /*
695 * The clock divider is based off the hrawclk, and would like to run at
696 * 2MHz. So, take the hrawclk value and divide by 2 and use that
697 */
698 return index ? 0 : intel_hrawclk(dev) / 2;
699 }
700
701 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
702 {
703 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
704 struct drm_device *dev = intel_dig_port->base.base.dev;
705 struct drm_i915_private *dev_priv = dev->dev_private;
706
707 if (index)
708 return 0;
709
710 if (intel_dig_port->port == PORT_A) {
711 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
712 } else {
713 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
714 }
715 }
716
717 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
718 {
719 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
720 struct drm_device *dev = intel_dig_port->base.base.dev;
721 struct drm_i915_private *dev_priv = dev->dev_private;
722
723 if (intel_dig_port->port == PORT_A) {
724 if (index)
725 return 0;
726 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
727 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
728 /* Workaround for non-ULT HSW */
729 switch (index) {
730 case 0: return 63;
731 case 1: return 72;
732 default: return 0;
733 }
734 } else {
735 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
736 }
737 }
738
739 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
740 {
741 return index ? 0 : 100;
742 }
743
744 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
745 {
746 /*
747 * SKL doesn't need us to program the AUX clock divider (Hardware will
748 * derive the clock from CDCLK automatically). We still implement the
749 * get_aux_clock_divider vfunc to plug-in into the existing code.
750 */
751 return index ? 0 : 1;
752 }
753
754 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
755 bool has_aux_irq,
756 int send_bytes,
757 uint32_t aux_clock_divider)
758 {
759 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
760 struct drm_device *dev = intel_dig_port->base.base.dev;
761 uint32_t precharge, timeout;
762
763 if (IS_GEN6(dev))
764 precharge = 3;
765 else
766 precharge = 5;
767
768 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
769 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
770 else
771 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
772
773 return DP_AUX_CH_CTL_SEND_BUSY |
774 DP_AUX_CH_CTL_DONE |
775 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
776 DP_AUX_CH_CTL_TIME_OUT_ERROR |
777 timeout |
778 DP_AUX_CH_CTL_RECEIVE_ERROR |
779 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
780 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
781 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
782 }
783
784 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
785 bool has_aux_irq,
786 int send_bytes,
787 uint32_t unused)
788 {
789 return DP_AUX_CH_CTL_SEND_BUSY |
790 DP_AUX_CH_CTL_DONE |
791 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
792 DP_AUX_CH_CTL_TIME_OUT_ERROR |
793 DP_AUX_CH_CTL_TIME_OUT_1600us |
794 DP_AUX_CH_CTL_RECEIVE_ERROR |
795 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
796 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
797 }
798
799 static int
800 intel_dp_aux_ch(struct intel_dp *intel_dp,
801 const uint8_t *send, int send_bytes,
802 uint8_t *recv, int recv_size)
803 {
804 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
805 struct drm_device *dev = intel_dig_port->base.base.dev;
806 struct drm_i915_private *dev_priv = dev->dev_private;
807 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
808 uint32_t ch_data = ch_ctl + 4;
809 uint32_t aux_clock_divider;
810 int i, ret, recv_bytes;
811 uint32_t status;
812 int try, clock = 0;
813 bool has_aux_irq = HAS_AUX_IRQ(dev);
814 bool vdd;
815
816 pps_lock(intel_dp);
817
818 /*
819 * We will be called with VDD already enabled for dpcd/edid/oui reads.
820 * In such cases we want to leave VDD enabled and it's up to upper layers
821 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
822 * ourselves.
823 */
824 vdd = edp_panel_vdd_on(intel_dp);
825
826 /* dp aux is extremely sensitive to irq latency, hence request the
827 * lowest possible wakeup latency and so prevent the cpu from going into
828 * deep sleep states.
829 */
830 pm_qos_update_request(&dev_priv->pm_qos, 0);
831
832 intel_dp_check_edp(intel_dp);
833
834 intel_aux_display_runtime_get(dev_priv);
835
836 /* Try to wait for any previous AUX channel activity */
837 for (try = 0; try < 3; try++) {
838 status = I915_READ_NOTRACE(ch_ctl);
839 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
840 break;
841 msleep(1);
842 }
843
844 if (try == 3) {
845 WARN(1, "dp_aux_ch not started status 0x%08x\n",
846 I915_READ(ch_ctl));
847 ret = -EBUSY;
848 goto out;
849 }
850
851 /* Only 5 data registers! */
852 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
853 ret = -E2BIG;
854 goto out;
855 }
856
857 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
858 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
859 has_aux_irq,
860 send_bytes,
861 aux_clock_divider);
862
863 /* Must try at least 3 times according to DP spec */
864 for (try = 0; try < 5; try++) {
865 /* Load the send data into the aux channel data registers */
866 for (i = 0; i < send_bytes; i += 4)
867 I915_WRITE(ch_data + i,
868 intel_dp_pack_aux(send + i,
869 send_bytes - i));
870
871 /* Send the command and wait for it to complete */
872 I915_WRITE(ch_ctl, send_ctl);
873
874 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
875
876 /* Clear done status and any errors */
877 I915_WRITE(ch_ctl,
878 status |
879 DP_AUX_CH_CTL_DONE |
880 DP_AUX_CH_CTL_TIME_OUT_ERROR |
881 DP_AUX_CH_CTL_RECEIVE_ERROR);
882
883 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
884 continue;
885
886 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
887 * 400us delay required for errors and timeouts
888 * Timeout errors from the HW already meet this
889 * requirement so skip to next iteration
890 */
891 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
892 usleep_range(400, 500);
893 continue;
894 }
895 if (status & DP_AUX_CH_CTL_DONE)
896 break;
897 }
898 if (status & DP_AUX_CH_CTL_DONE)
899 break;
900 }
901
902 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
903 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
904 ret = -EBUSY;
905 goto out;
906 }
907
908 /* Check for timeout or receive error.
909 * Timeouts occur when the sink is not connected
910 */
911 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
912 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
913 ret = -EIO;
914 goto out;
915 }
916
917 /* Timeouts occur when the device isn't connected, so they're
918 * "normal" -- don't fill the kernel log with these */
919 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
920 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
921 ret = -ETIMEDOUT;
922 goto out;
923 }
924
925 /* Unload any bytes sent back from the other side */
926 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
927 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
928 if (recv_bytes > recv_size)
929 recv_bytes = recv_size;
930
931 for (i = 0; i < recv_bytes; i += 4)
932 intel_dp_unpack_aux(I915_READ(ch_data + i),
933 recv + i, recv_bytes - i);
934
935 ret = recv_bytes;
936 out:
937 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
938 intel_aux_display_runtime_put(dev_priv);
939
940 if (vdd)
941 edp_panel_vdd_off(intel_dp, false);
942
943 pps_unlock(intel_dp);
944
945 return ret;
946 }
947
948 #define BARE_ADDRESS_SIZE 3
949 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
950 static ssize_t
951 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
952 {
953 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
954 uint8_t txbuf[20], rxbuf[20];
955 size_t txsize, rxsize;
956 int ret;
957
958 txbuf[0] = (msg->request << 4) |
959 ((msg->address >> 16) & 0xf);
960 txbuf[1] = (msg->address >> 8) & 0xff;
961 txbuf[2] = msg->address & 0xff;
962 txbuf[3] = msg->size - 1;
963
964 switch (msg->request & ~DP_AUX_I2C_MOT) {
965 case DP_AUX_NATIVE_WRITE:
966 case DP_AUX_I2C_WRITE:
967 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
968 rxsize = 2; /* 0 or 1 data bytes */
969
970 if (WARN_ON(txsize > 20))
971 return -E2BIG;
972
973 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
974
975 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976 if (ret > 0) {
977 msg->reply = rxbuf[0] >> 4;
978
979 if (ret > 1) {
980 /* Number of bytes written in a short write. */
981 ret = clamp_t(int, rxbuf[1], 0, msg->size);
982 } else {
983 /* Return payload size. */
984 ret = msg->size;
985 }
986 }
987 break;
988
989 case DP_AUX_NATIVE_READ:
990 case DP_AUX_I2C_READ:
991 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
992 rxsize = msg->size + 1;
993
994 if (WARN_ON(rxsize > 20))
995 return -E2BIG;
996
997 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
998 if (ret > 0) {
999 msg->reply = rxbuf[0] >> 4;
1000 /*
1001 * Assume happy day, and copy the data. The caller is
1002 * expected to check msg->reply before touching it.
1003 *
1004 * Return payload size.
1005 */
1006 ret--;
1007 memcpy(msg->buffer, rxbuf + 1, ret);
1008 }
1009 break;
1010
1011 default:
1012 ret = -EINVAL;
1013 break;
1014 }
1015
1016 return ret;
1017 }
1018
1019 static void
1020 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1021 {
1022 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1023 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1024 enum port port = intel_dig_port->port;
1025 const char *name = NULL;
1026 int ret;
1027
1028 switch (port) {
1029 case PORT_A:
1030 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1031 name = "DPDDC-A";
1032 break;
1033 case PORT_B:
1034 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1035 name = "DPDDC-B";
1036 break;
1037 case PORT_C:
1038 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1039 name = "DPDDC-C";
1040 break;
1041 case PORT_D:
1042 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1043 name = "DPDDC-D";
1044 break;
1045 default:
1046 BUG();
1047 }
1048
1049 /*
1050 * The AUX_CTL register is usually DP_CTL + 0x10.
1051 *
1052 * On Haswell and Broadwell though:
1053 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1054 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1055 *
1056 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1057 */
1058 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1059 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1060
1061 intel_dp->aux.name = name;
1062 intel_dp->aux.dev = dev->dev;
1063 intel_dp->aux.transfer = intel_dp_aux_transfer;
1064
1065 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1066 connector->base.kdev->kobj.name);
1067
1068 ret = drm_dp_aux_register(&intel_dp->aux);
1069 if (ret < 0) {
1070 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1071 name, ret);
1072 return;
1073 }
1074
1075 ret = sysfs_create_link(&connector->base.kdev->kobj,
1076 &intel_dp->aux.ddc.dev.kobj,
1077 intel_dp->aux.ddc.dev.kobj.name);
1078 if (ret < 0) {
1079 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1080 drm_dp_aux_unregister(&intel_dp->aux);
1081 }
1082 }
1083
1084 static void
1085 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1086 {
1087 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1088
1089 if (!intel_connector->mst_port)
1090 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1091 intel_dp->aux.ddc.dev.kobj.name);
1092 intel_connector_unregister(intel_connector);
1093 }
1094
1095 static void
1096 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1097 {
1098 u32 ctrl1;
1099
1100 memset(&pipe_config->dpll_hw_state, 0,
1101 sizeof(pipe_config->dpll_hw_state));
1102
1103 pipe_config->ddi_pll_sel = SKL_DPLL0;
1104 pipe_config->dpll_hw_state.cfgcr1 = 0;
1105 pipe_config->dpll_hw_state.cfgcr2 = 0;
1106
1107 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1108 switch (link_clock / 2) {
1109 case 81000:
1110 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1111 SKL_DPLL0);
1112 break;
1113 case 135000:
1114 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1115 SKL_DPLL0);
1116 break;
1117 case 270000:
1118 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1119 SKL_DPLL0);
1120 break;
1121 case 162000:
1122 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1123 SKL_DPLL0);
1124 break;
1125 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1126 results in CDCLK change. Need to handle the change of CDCLK by
1127 disabling pipes and re-enabling them */
1128 case 108000:
1129 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1130 SKL_DPLL0);
1131 break;
1132 case 216000:
1133 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1134 SKL_DPLL0);
1135 break;
1136
1137 }
1138 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1139 }
1140
1141 static void
1142 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1143 {
1144 switch (link_bw) {
1145 case DP_LINK_BW_1_62:
1146 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1147 break;
1148 case DP_LINK_BW_2_7:
1149 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1150 break;
1151 case DP_LINK_BW_5_4:
1152 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1153 break;
1154 }
1155 }
1156
1157 static int
1158 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1159 {
1160 if (intel_dp->num_sink_rates) {
1161 *sink_rates = intel_dp->sink_rates;
1162 return intel_dp->num_sink_rates;
1163 }
1164
1165 *sink_rates = default_rates;
1166
1167 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1168 }
1169
1170 static int
1171 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1172 {
1173 if (IS_SKYLAKE(dev)) {
1174 *source_rates = skl_rates;
1175 return ARRAY_SIZE(skl_rates);
1176 } else if (IS_CHERRYVIEW(dev)) {
1177 *source_rates = chv_rates;
1178 return ARRAY_SIZE(chv_rates);
1179 }
1180
1181 *source_rates = default_rates;
1182
1183 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1184 /* WaDisableHBR2:skl */
1185 return (DP_LINK_BW_2_7 >> 3) + 1;
1186 else if (INTEL_INFO(dev)->gen >= 8 ||
1187 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1188 return (DP_LINK_BW_5_4 >> 3) + 1;
1189 else
1190 return (DP_LINK_BW_2_7 >> 3) + 1;
1191 }
1192
1193 static void
1194 intel_dp_set_clock(struct intel_encoder *encoder,
1195 struct intel_crtc_state *pipe_config, int link_bw)
1196 {
1197 struct drm_device *dev = encoder->base.dev;
1198 const struct dp_link_dpll *divisor = NULL;
1199 int i, count = 0;
1200
1201 if (IS_G4X(dev)) {
1202 divisor = gen4_dpll;
1203 count = ARRAY_SIZE(gen4_dpll);
1204 } else if (HAS_PCH_SPLIT(dev)) {
1205 divisor = pch_dpll;
1206 count = ARRAY_SIZE(pch_dpll);
1207 } else if (IS_CHERRYVIEW(dev)) {
1208 divisor = chv_dpll;
1209 count = ARRAY_SIZE(chv_dpll);
1210 } else if (IS_VALLEYVIEW(dev)) {
1211 divisor = vlv_dpll;
1212 count = ARRAY_SIZE(vlv_dpll);
1213 }
1214
1215 if (divisor && count) {
1216 for (i = 0; i < count; i++) {
1217 if (link_bw == divisor[i].link_bw) {
1218 pipe_config->dpll = divisor[i].dpll;
1219 pipe_config->clock_set = true;
1220 break;
1221 }
1222 }
1223 }
1224 }
1225
1226 static int intersect_rates(const int *source_rates, int source_len,
1227 const int *sink_rates, int sink_len,
1228 int *common_rates)
1229 {
1230 int i = 0, j = 0, k = 0;
1231
1232 while (i < source_len && j < sink_len) {
1233 if (source_rates[i] == sink_rates[j]) {
1234 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1235 return k;
1236 common_rates[k] = source_rates[i];
1237 ++k;
1238 ++i;
1239 ++j;
1240 } else if (source_rates[i] < sink_rates[j]) {
1241 ++i;
1242 } else {
1243 ++j;
1244 }
1245 }
1246 return k;
1247 }
1248
1249 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1250 int *common_rates)
1251 {
1252 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1253 const int *source_rates, *sink_rates;
1254 int source_len, sink_len;
1255
1256 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1257 source_len = intel_dp_source_rates(dev, &source_rates);
1258
1259 return intersect_rates(source_rates, source_len,
1260 sink_rates, sink_len,
1261 common_rates);
1262 }
1263
1264 static void snprintf_int_array(char *str, size_t len,
1265 const int *array, int nelem)
1266 {
1267 int i;
1268
1269 str[0] = '\0';
1270
1271 for (i = 0; i < nelem; i++) {
1272 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1273 if (r >= len)
1274 return;
1275 str += r;
1276 len -= r;
1277 }
1278 }
1279
1280 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1281 {
1282 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1283 const int *source_rates, *sink_rates;
1284 int source_len, sink_len, common_len;
1285 int common_rates[DP_MAX_SUPPORTED_RATES];
1286 char str[128]; /* FIXME: too big for stack? */
1287
1288 if ((drm_debug & DRM_UT_KMS) == 0)
1289 return;
1290
1291 source_len = intel_dp_source_rates(dev, &source_rates);
1292 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1293 DRM_DEBUG_KMS("source rates: %s\n", str);
1294
1295 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1296 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1297 DRM_DEBUG_KMS("sink rates: %s\n", str);
1298
1299 common_len = intel_dp_common_rates(intel_dp, common_rates);
1300 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1301 DRM_DEBUG_KMS("common rates: %s\n", str);
1302 }
1303
1304 static int rate_to_index(int find, const int *rates)
1305 {
1306 int i = 0;
1307
1308 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1309 if (find == rates[i])
1310 break;
1311
1312 return i;
1313 }
1314
1315 int
1316 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1317 {
1318 int rates[DP_MAX_SUPPORTED_RATES] = {};
1319 int len;
1320
1321 len = intel_dp_common_rates(intel_dp, rates);
1322 if (WARN_ON(len <= 0))
1323 return 162000;
1324
1325 return rates[rate_to_index(0, rates) - 1];
1326 }
1327
1328 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1329 {
1330 return rate_to_index(rate, intel_dp->sink_rates);
1331 }
1332
1333 bool
1334 intel_dp_compute_config(struct intel_encoder *encoder,
1335 struct intel_crtc_state *pipe_config)
1336 {
1337 struct drm_device *dev = encoder->base.dev;
1338 struct drm_i915_private *dev_priv = dev->dev_private;
1339 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1340 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1341 enum port port = dp_to_dig_port(intel_dp)->port;
1342 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1343 struct intel_connector *intel_connector = intel_dp->attached_connector;
1344 int lane_count, clock;
1345 int min_lane_count = 1;
1346 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1347 /* Conveniently, the link BW constants become indices with a shift...*/
1348 int min_clock = 0;
1349 int max_clock;
1350 int bpp, mode_rate;
1351 int link_avail, link_clock;
1352 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1353 int common_len;
1354
1355 common_len = intel_dp_common_rates(intel_dp, common_rates);
1356
1357 /* No common link rates between source and sink */
1358 WARN_ON(common_len <= 0);
1359
1360 max_clock = common_len - 1;
1361
1362 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1363 pipe_config->has_pch_encoder = true;
1364
1365 pipe_config->has_dp_encoder = true;
1366 pipe_config->has_drrs = false;
1367 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1368
1369 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1370 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1371 adjusted_mode);
1372
1373 if (INTEL_INFO(dev)->gen >= 9) {
1374 int ret;
1375 ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1376 if (ret)
1377 return ret;
1378 }
1379
1380 if (!HAS_PCH_SPLIT(dev))
1381 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1382 intel_connector->panel.fitting_mode);
1383 else
1384 intel_pch_panel_fitting(intel_crtc, pipe_config,
1385 intel_connector->panel.fitting_mode);
1386 }
1387
1388 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1389 return false;
1390
1391 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1392 "max bw %d pixel clock %iKHz\n",
1393 max_lane_count, common_rates[max_clock],
1394 adjusted_mode->crtc_clock);
1395
1396 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1397 * bpc in between. */
1398 bpp = pipe_config->pipe_bpp;
1399 if (is_edp(intel_dp)) {
1400 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1401 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1402 dev_priv->vbt.edp_bpp);
1403 bpp = dev_priv->vbt.edp_bpp;
1404 }
1405
1406 /*
1407 * Use the maximum clock and number of lanes the eDP panel
1408 * advertizes being capable of. The panels are generally
1409 * designed to support only a single clock and lane
1410 * configuration, and typically these values correspond to the
1411 * native resolution of the panel.
1412 */
1413 min_lane_count = max_lane_count;
1414 min_clock = max_clock;
1415 }
1416
1417 for (; bpp >= 6*3; bpp -= 2*3) {
1418 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1419 bpp);
1420
1421 for (clock = min_clock; clock <= max_clock; clock++) {
1422 for (lane_count = min_lane_count;
1423 lane_count <= max_lane_count;
1424 lane_count <<= 1) {
1425
1426 link_clock = common_rates[clock];
1427 link_avail = intel_dp_max_data_rate(link_clock,
1428 lane_count);
1429
1430 if (mode_rate <= link_avail) {
1431 goto found;
1432 }
1433 }
1434 }
1435 }
1436
1437 return false;
1438
1439 found:
1440 if (intel_dp->color_range_auto) {
1441 /*
1442 * See:
1443 * CEA-861-E - 5.1 Default Encoding Parameters
1444 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1445 */
1446 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1447 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1448 else
1449 intel_dp->color_range = 0;
1450 }
1451
1452 if (intel_dp->color_range)
1453 pipe_config->limited_color_range = true;
1454
1455 intel_dp->lane_count = lane_count;
1456
1457 if (intel_dp->num_sink_rates) {
1458 intel_dp->link_bw = 0;
1459 intel_dp->rate_select =
1460 intel_dp_rate_select(intel_dp, common_rates[clock]);
1461 } else {
1462 intel_dp->link_bw =
1463 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1464 intel_dp->rate_select = 0;
1465 }
1466
1467 pipe_config->pipe_bpp = bpp;
1468 pipe_config->port_clock = common_rates[clock];
1469
1470 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1471 intel_dp->link_bw, intel_dp->lane_count,
1472 pipe_config->port_clock, bpp);
1473 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1474 mode_rate, link_avail);
1475
1476 intel_link_compute_m_n(bpp, lane_count,
1477 adjusted_mode->crtc_clock,
1478 pipe_config->port_clock,
1479 &pipe_config->dp_m_n);
1480
1481 if (intel_connector->panel.downclock_mode != NULL &&
1482 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1483 pipe_config->has_drrs = true;
1484 intel_link_compute_m_n(bpp, lane_count,
1485 intel_connector->panel.downclock_mode->clock,
1486 pipe_config->port_clock,
1487 &pipe_config->dp_m2_n2);
1488 }
1489
1490 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1491 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1492 else if (IS_BROXTON(dev))
1493 /* handled in ddi */;
1494 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1495 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1496 else
1497 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1498
1499 return true;
1500 }
1501
1502 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1503 {
1504 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1505 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1506 struct drm_device *dev = crtc->base.dev;
1507 struct drm_i915_private *dev_priv = dev->dev_private;
1508 u32 dpa_ctl;
1509
1510 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1511 crtc->config->port_clock);
1512 dpa_ctl = I915_READ(DP_A);
1513 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1514
1515 if (crtc->config->port_clock == 162000) {
1516 /* For a long time we've carried around a ILK-DevA w/a for the
1517 * 160MHz clock. If we're really unlucky, it's still required.
1518 */
1519 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1520 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1521 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1522 } else {
1523 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1524 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1525 }
1526
1527 I915_WRITE(DP_A, dpa_ctl);
1528
1529 POSTING_READ(DP_A);
1530 udelay(500);
1531 }
1532
1533 static void intel_dp_prepare(struct intel_encoder *encoder)
1534 {
1535 struct drm_device *dev = encoder->base.dev;
1536 struct drm_i915_private *dev_priv = dev->dev_private;
1537 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1538 enum port port = dp_to_dig_port(intel_dp)->port;
1539 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1540 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1541
1542 /*
1543 * There are four kinds of DP registers:
1544 *
1545 * IBX PCH
1546 * SNB CPU
1547 * IVB CPU
1548 * CPT PCH
1549 *
1550 * IBX PCH and CPU are the same for almost everything,
1551 * except that the CPU DP PLL is configured in this
1552 * register
1553 *
1554 * CPT PCH is quite different, having many bits moved
1555 * to the TRANS_DP_CTL register instead. That
1556 * configuration happens (oddly) in ironlake_pch_enable
1557 */
1558
1559 /* Preserve the BIOS-computed detected bit. This is
1560 * supposed to be read-only.
1561 */
1562 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1563
1564 /* Handle DP bits in common between all three register formats */
1565 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1566 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1567
1568 if (crtc->config->has_audio)
1569 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1570
1571 /* Split out the IBX/CPU vs CPT settings */
1572
1573 if (IS_GEN7(dev) && port == PORT_A) {
1574 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1575 intel_dp->DP |= DP_SYNC_HS_HIGH;
1576 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1577 intel_dp->DP |= DP_SYNC_VS_HIGH;
1578 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1579
1580 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1581 intel_dp->DP |= DP_ENHANCED_FRAMING;
1582
1583 intel_dp->DP |= crtc->pipe << 29;
1584 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1585 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1586 } else {
1587 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1588 intel_dp->DP |= intel_dp->color_range;
1589
1590 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1591 intel_dp->DP |= DP_SYNC_HS_HIGH;
1592 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1593 intel_dp->DP |= DP_SYNC_VS_HIGH;
1594 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1595
1596 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1597 intel_dp->DP |= DP_ENHANCED_FRAMING;
1598
1599 if (IS_CHERRYVIEW(dev))
1600 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1601 else if (crtc->pipe == PIPE_B)
1602 intel_dp->DP |= DP_PIPEB_SELECT;
1603 }
1604 }
1605
1606 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1607 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1608
1609 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1610 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1611
1612 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1613 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1614
1615 static void wait_panel_status(struct intel_dp *intel_dp,
1616 u32 mask,
1617 u32 value)
1618 {
1619 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1620 struct drm_i915_private *dev_priv = dev->dev_private;
1621 u32 pp_stat_reg, pp_ctrl_reg;
1622
1623 lockdep_assert_held(&dev_priv->pps_mutex);
1624
1625 pp_stat_reg = _pp_stat_reg(intel_dp);
1626 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1627
1628 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1629 mask, value,
1630 I915_READ(pp_stat_reg),
1631 I915_READ(pp_ctrl_reg));
1632
1633 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1634 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1635 I915_READ(pp_stat_reg),
1636 I915_READ(pp_ctrl_reg));
1637 }
1638
1639 DRM_DEBUG_KMS("Wait complete\n");
1640 }
1641
1642 static void wait_panel_on(struct intel_dp *intel_dp)
1643 {
1644 DRM_DEBUG_KMS("Wait for panel power on\n");
1645 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1646 }
1647
1648 static void wait_panel_off(struct intel_dp *intel_dp)
1649 {
1650 DRM_DEBUG_KMS("Wait for panel power off time\n");
1651 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1652 }
1653
1654 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1655 {
1656 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1657
1658 /* When we disable the VDD override bit last we have to do the manual
1659 * wait. */
1660 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1661 intel_dp->panel_power_cycle_delay);
1662
1663 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1664 }
1665
1666 static void wait_backlight_on(struct intel_dp *intel_dp)
1667 {
1668 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1669 intel_dp->backlight_on_delay);
1670 }
1671
1672 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1673 {
1674 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1675 intel_dp->backlight_off_delay);
1676 }
1677
1678 /* Read the current pp_control value, unlocking the register if it
1679 * is locked
1680 */
1681
1682 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1683 {
1684 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1685 struct drm_i915_private *dev_priv = dev->dev_private;
1686 u32 control;
1687
1688 lockdep_assert_held(&dev_priv->pps_mutex);
1689
1690 control = I915_READ(_pp_ctrl_reg(intel_dp));
1691 control &= ~PANEL_UNLOCK_MASK;
1692 control |= PANEL_UNLOCK_REGS;
1693 return control;
1694 }
1695
1696 /*
1697 * Must be paired with edp_panel_vdd_off().
1698 * Must hold pps_mutex around the whole on/off sequence.
1699 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1700 */
1701 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1702 {
1703 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1705 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1706 struct drm_i915_private *dev_priv = dev->dev_private;
1707 enum intel_display_power_domain power_domain;
1708 u32 pp;
1709 u32 pp_stat_reg, pp_ctrl_reg;
1710 bool need_to_disable = !intel_dp->want_panel_vdd;
1711
1712 lockdep_assert_held(&dev_priv->pps_mutex);
1713
1714 if (!is_edp(intel_dp))
1715 return false;
1716
1717 cancel_delayed_work(&intel_dp->panel_vdd_work);
1718 intel_dp->want_panel_vdd = true;
1719
1720 if (edp_have_panel_vdd(intel_dp))
1721 return need_to_disable;
1722
1723 power_domain = intel_display_port_power_domain(intel_encoder);
1724 intel_display_power_get(dev_priv, power_domain);
1725
1726 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1727 port_name(intel_dig_port->port));
1728
1729 if (!edp_have_panel_power(intel_dp))
1730 wait_panel_power_cycle(intel_dp);
1731
1732 pp = ironlake_get_pp_control(intel_dp);
1733 pp |= EDP_FORCE_VDD;
1734
1735 pp_stat_reg = _pp_stat_reg(intel_dp);
1736 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1737
1738 I915_WRITE(pp_ctrl_reg, pp);
1739 POSTING_READ(pp_ctrl_reg);
1740 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1741 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1742 /*
1743 * If the panel wasn't on, delay before accessing aux channel
1744 */
1745 if (!edp_have_panel_power(intel_dp)) {
1746 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1747 port_name(intel_dig_port->port));
1748 msleep(intel_dp->panel_power_up_delay);
1749 }
1750
1751 return need_to_disable;
1752 }
1753
1754 /*
1755 * Must be paired with intel_edp_panel_vdd_off() or
1756 * intel_edp_panel_off().
1757 * Nested calls to these functions are not allowed since
1758 * we drop the lock. Caller must use some higher level
1759 * locking to prevent nested calls from other threads.
1760 */
1761 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1762 {
1763 bool vdd;
1764
1765 if (!is_edp(intel_dp))
1766 return;
1767
1768 pps_lock(intel_dp);
1769 vdd = edp_panel_vdd_on(intel_dp);
1770 pps_unlock(intel_dp);
1771
1772 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1773 port_name(dp_to_dig_port(intel_dp)->port));
1774 }
1775
1776 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1777 {
1778 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1779 struct drm_i915_private *dev_priv = dev->dev_private;
1780 struct intel_digital_port *intel_dig_port =
1781 dp_to_dig_port(intel_dp);
1782 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1783 enum intel_display_power_domain power_domain;
1784 u32 pp;
1785 u32 pp_stat_reg, pp_ctrl_reg;
1786
1787 lockdep_assert_held(&dev_priv->pps_mutex);
1788
1789 WARN_ON(intel_dp->want_panel_vdd);
1790
1791 if (!edp_have_panel_vdd(intel_dp))
1792 return;
1793
1794 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1795 port_name(intel_dig_port->port));
1796
1797 pp = ironlake_get_pp_control(intel_dp);
1798 pp &= ~EDP_FORCE_VDD;
1799
1800 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1801 pp_stat_reg = _pp_stat_reg(intel_dp);
1802
1803 I915_WRITE(pp_ctrl_reg, pp);
1804 POSTING_READ(pp_ctrl_reg);
1805
1806 /* Make sure sequencer is idle before allowing subsequent activity */
1807 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1808 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1809
1810 if ((pp & POWER_TARGET_ON) == 0)
1811 intel_dp->last_power_cycle = jiffies;
1812
1813 power_domain = intel_display_port_power_domain(intel_encoder);
1814 intel_display_power_put(dev_priv, power_domain);
1815 }
1816
1817 static void edp_panel_vdd_work(struct work_struct *__work)
1818 {
1819 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1820 struct intel_dp, panel_vdd_work);
1821
1822 pps_lock(intel_dp);
1823 if (!intel_dp->want_panel_vdd)
1824 edp_panel_vdd_off_sync(intel_dp);
1825 pps_unlock(intel_dp);
1826 }
1827
1828 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1829 {
1830 unsigned long delay;
1831
1832 /*
1833 * Queue the timer to fire a long time from now (relative to the power
1834 * down delay) to keep the panel power up across a sequence of
1835 * operations.
1836 */
1837 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1838 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1839 }
1840
1841 /*
1842 * Must be paired with edp_panel_vdd_on().
1843 * Must hold pps_mutex around the whole on/off sequence.
1844 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1845 */
1846 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1847 {
1848 struct drm_i915_private *dev_priv =
1849 intel_dp_to_dev(intel_dp)->dev_private;
1850
1851 lockdep_assert_held(&dev_priv->pps_mutex);
1852
1853 if (!is_edp(intel_dp))
1854 return;
1855
1856 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1857 port_name(dp_to_dig_port(intel_dp)->port));
1858
1859 intel_dp->want_panel_vdd = false;
1860
1861 if (sync)
1862 edp_panel_vdd_off_sync(intel_dp);
1863 else
1864 edp_panel_vdd_schedule_off(intel_dp);
1865 }
1866
1867 static void edp_panel_on(struct intel_dp *intel_dp)
1868 {
1869 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1870 struct drm_i915_private *dev_priv = dev->dev_private;
1871 u32 pp;
1872 u32 pp_ctrl_reg;
1873
1874 lockdep_assert_held(&dev_priv->pps_mutex);
1875
1876 if (!is_edp(intel_dp))
1877 return;
1878
1879 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1880 port_name(dp_to_dig_port(intel_dp)->port));
1881
1882 if (WARN(edp_have_panel_power(intel_dp),
1883 "eDP port %c panel power already on\n",
1884 port_name(dp_to_dig_port(intel_dp)->port)))
1885 return;
1886
1887 wait_panel_power_cycle(intel_dp);
1888
1889 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1890 pp = ironlake_get_pp_control(intel_dp);
1891 if (IS_GEN5(dev)) {
1892 /* ILK workaround: disable reset around power sequence */
1893 pp &= ~PANEL_POWER_RESET;
1894 I915_WRITE(pp_ctrl_reg, pp);
1895 POSTING_READ(pp_ctrl_reg);
1896 }
1897
1898 pp |= POWER_TARGET_ON;
1899 if (!IS_GEN5(dev))
1900 pp |= PANEL_POWER_RESET;
1901
1902 I915_WRITE(pp_ctrl_reg, pp);
1903 POSTING_READ(pp_ctrl_reg);
1904
1905 wait_panel_on(intel_dp);
1906 intel_dp->last_power_on = jiffies;
1907
1908 if (IS_GEN5(dev)) {
1909 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1910 I915_WRITE(pp_ctrl_reg, pp);
1911 POSTING_READ(pp_ctrl_reg);
1912 }
1913 }
1914
1915 void intel_edp_panel_on(struct intel_dp *intel_dp)
1916 {
1917 if (!is_edp(intel_dp))
1918 return;
1919
1920 pps_lock(intel_dp);
1921 edp_panel_on(intel_dp);
1922 pps_unlock(intel_dp);
1923 }
1924
1925
1926 static void edp_panel_off(struct intel_dp *intel_dp)
1927 {
1928 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1929 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1930 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1931 struct drm_i915_private *dev_priv = dev->dev_private;
1932 enum intel_display_power_domain power_domain;
1933 u32 pp;
1934 u32 pp_ctrl_reg;
1935
1936 lockdep_assert_held(&dev_priv->pps_mutex);
1937
1938 if (!is_edp(intel_dp))
1939 return;
1940
1941 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1942 port_name(dp_to_dig_port(intel_dp)->port));
1943
1944 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1945 port_name(dp_to_dig_port(intel_dp)->port));
1946
1947 pp = ironlake_get_pp_control(intel_dp);
1948 /* We need to switch off panel power _and_ force vdd, for otherwise some
1949 * panels get very unhappy and cease to work. */
1950 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1951 EDP_BLC_ENABLE);
1952
1953 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1954
1955 intel_dp->want_panel_vdd = false;
1956
1957 I915_WRITE(pp_ctrl_reg, pp);
1958 POSTING_READ(pp_ctrl_reg);
1959
1960 intel_dp->last_power_cycle = jiffies;
1961 wait_panel_off(intel_dp);
1962
1963 /* We got a reference when we enabled the VDD. */
1964 power_domain = intel_display_port_power_domain(intel_encoder);
1965 intel_display_power_put(dev_priv, power_domain);
1966 }
1967
1968 void intel_edp_panel_off(struct intel_dp *intel_dp)
1969 {
1970 if (!is_edp(intel_dp))
1971 return;
1972
1973 pps_lock(intel_dp);
1974 edp_panel_off(intel_dp);
1975 pps_unlock(intel_dp);
1976 }
1977
1978 /* Enable backlight in the panel power control. */
1979 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1980 {
1981 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1982 struct drm_device *dev = intel_dig_port->base.base.dev;
1983 struct drm_i915_private *dev_priv = dev->dev_private;
1984 u32 pp;
1985 u32 pp_ctrl_reg;
1986
1987 /*
1988 * If we enable the backlight right away following a panel power
1989 * on, we may see slight flicker as the panel syncs with the eDP
1990 * link. So delay a bit to make sure the image is solid before
1991 * allowing it to appear.
1992 */
1993 wait_backlight_on(intel_dp);
1994
1995 pps_lock(intel_dp);
1996
1997 pp = ironlake_get_pp_control(intel_dp);
1998 pp |= EDP_BLC_ENABLE;
1999
2000 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2001
2002 I915_WRITE(pp_ctrl_reg, pp);
2003 POSTING_READ(pp_ctrl_reg);
2004
2005 pps_unlock(intel_dp);
2006 }
2007
2008 /* Enable backlight PWM and backlight PP control. */
2009 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2010 {
2011 if (!is_edp(intel_dp))
2012 return;
2013
2014 DRM_DEBUG_KMS("\n");
2015
2016 intel_panel_enable_backlight(intel_dp->attached_connector);
2017 _intel_edp_backlight_on(intel_dp);
2018 }
2019
2020 /* Disable backlight in the panel power control. */
2021 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2022 {
2023 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2024 struct drm_i915_private *dev_priv = dev->dev_private;
2025 u32 pp;
2026 u32 pp_ctrl_reg;
2027
2028 if (!is_edp(intel_dp))
2029 return;
2030
2031 pps_lock(intel_dp);
2032
2033 pp = ironlake_get_pp_control(intel_dp);
2034 pp &= ~EDP_BLC_ENABLE;
2035
2036 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2037
2038 I915_WRITE(pp_ctrl_reg, pp);
2039 POSTING_READ(pp_ctrl_reg);
2040
2041 pps_unlock(intel_dp);
2042
2043 intel_dp->last_backlight_off = jiffies;
2044 edp_wait_backlight_off(intel_dp);
2045 }
2046
2047 /* Disable backlight PP control and backlight PWM. */
2048 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2049 {
2050 if (!is_edp(intel_dp))
2051 return;
2052
2053 DRM_DEBUG_KMS("\n");
2054
2055 _intel_edp_backlight_off(intel_dp);
2056 intel_panel_disable_backlight(intel_dp->attached_connector);
2057 }
2058
2059 /*
2060 * Hook for controlling the panel power control backlight through the bl_power
2061 * sysfs attribute. Take care to handle multiple calls.
2062 */
2063 static void intel_edp_backlight_power(struct intel_connector *connector,
2064 bool enable)
2065 {
2066 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2067 bool is_enabled;
2068
2069 pps_lock(intel_dp);
2070 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2071 pps_unlock(intel_dp);
2072
2073 if (is_enabled == enable)
2074 return;
2075
2076 DRM_DEBUG_KMS("panel power control backlight %s\n",
2077 enable ? "enable" : "disable");
2078
2079 if (enable)
2080 _intel_edp_backlight_on(intel_dp);
2081 else
2082 _intel_edp_backlight_off(intel_dp);
2083 }
2084
2085 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2086 {
2087 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2088 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2089 struct drm_device *dev = crtc->dev;
2090 struct drm_i915_private *dev_priv = dev->dev_private;
2091 u32 dpa_ctl;
2092
2093 assert_pipe_disabled(dev_priv,
2094 to_intel_crtc(crtc)->pipe);
2095
2096 DRM_DEBUG_KMS("\n");
2097 dpa_ctl = I915_READ(DP_A);
2098 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2099 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2100
2101 /* We don't adjust intel_dp->DP while tearing down the link, to
2102 * facilitate link retraining (e.g. after hotplug). Hence clear all
2103 * enable bits here to ensure that we don't enable too much. */
2104 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2105 intel_dp->DP |= DP_PLL_ENABLE;
2106 I915_WRITE(DP_A, intel_dp->DP);
2107 POSTING_READ(DP_A);
2108 udelay(200);
2109 }
2110
2111 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2112 {
2113 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2114 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2115 struct drm_device *dev = crtc->dev;
2116 struct drm_i915_private *dev_priv = dev->dev_private;
2117 u32 dpa_ctl;
2118
2119 assert_pipe_disabled(dev_priv,
2120 to_intel_crtc(crtc)->pipe);
2121
2122 dpa_ctl = I915_READ(DP_A);
2123 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2124 "dp pll off, should be on\n");
2125 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2126
2127 /* We can't rely on the value tracked for the DP register in
2128 * intel_dp->DP because link_down must not change that (otherwise link
2129 * re-training will fail. */
2130 dpa_ctl &= ~DP_PLL_ENABLE;
2131 I915_WRITE(DP_A, dpa_ctl);
2132 POSTING_READ(DP_A);
2133 udelay(200);
2134 }
2135
2136 /* If the sink supports it, try to set the power state appropriately */
2137 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2138 {
2139 int ret, i;
2140
2141 /* Should have a valid DPCD by this point */
2142 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2143 return;
2144
2145 if (mode != DRM_MODE_DPMS_ON) {
2146 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2147 DP_SET_POWER_D3);
2148 } else {
2149 /*
2150 * When turning on, we need to retry for 1ms to give the sink
2151 * time to wake up.
2152 */
2153 for (i = 0; i < 3; i++) {
2154 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2155 DP_SET_POWER_D0);
2156 if (ret == 1)
2157 break;
2158 msleep(1);
2159 }
2160 }
2161
2162 if (ret != 1)
2163 DRM_DEBUG_KMS("failed to %s sink power state\n",
2164 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2165 }
2166
2167 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2168 enum pipe *pipe)
2169 {
2170 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2171 enum port port = dp_to_dig_port(intel_dp)->port;
2172 struct drm_device *dev = encoder->base.dev;
2173 struct drm_i915_private *dev_priv = dev->dev_private;
2174 enum intel_display_power_domain power_domain;
2175 u32 tmp;
2176
2177 power_domain = intel_display_port_power_domain(encoder);
2178 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2179 return false;
2180
2181 tmp = I915_READ(intel_dp->output_reg);
2182
2183 if (!(tmp & DP_PORT_EN))
2184 return false;
2185
2186 if (IS_GEN7(dev) && port == PORT_A) {
2187 *pipe = PORT_TO_PIPE_CPT(tmp);
2188 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2189 u32 trans_sel;
2190 u32 trans_dp;
2191 int i;
2192
2193 switch (intel_dp->output_reg) {
2194 case PCH_DP_B:
2195 trans_sel = TRANS_DP_PORT_SEL_B;
2196 break;
2197 case PCH_DP_C:
2198 trans_sel = TRANS_DP_PORT_SEL_C;
2199 break;
2200 case PCH_DP_D:
2201 trans_sel = TRANS_DP_PORT_SEL_D;
2202 break;
2203 default:
2204 return true;
2205 }
2206
2207 for_each_pipe(dev_priv, i) {
2208 trans_dp = I915_READ(TRANS_DP_CTL(i));
2209 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2210 *pipe = i;
2211 return true;
2212 }
2213 }
2214
2215 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2216 intel_dp->output_reg);
2217 } else if (IS_CHERRYVIEW(dev)) {
2218 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2219 } else {
2220 *pipe = PORT_TO_PIPE(tmp);
2221 }
2222
2223 return true;
2224 }
2225
2226 static void intel_dp_get_config(struct intel_encoder *encoder,
2227 struct intel_crtc_state *pipe_config)
2228 {
2229 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2230 u32 tmp, flags = 0;
2231 struct drm_device *dev = encoder->base.dev;
2232 struct drm_i915_private *dev_priv = dev->dev_private;
2233 enum port port = dp_to_dig_port(intel_dp)->port;
2234 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2235 int dotclock;
2236
2237 tmp = I915_READ(intel_dp->output_reg);
2238
2239 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2240
2241 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2242 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2243 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2244 flags |= DRM_MODE_FLAG_PHSYNC;
2245 else
2246 flags |= DRM_MODE_FLAG_NHSYNC;
2247
2248 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2249 flags |= DRM_MODE_FLAG_PVSYNC;
2250 else
2251 flags |= DRM_MODE_FLAG_NVSYNC;
2252 } else {
2253 if (tmp & DP_SYNC_HS_HIGH)
2254 flags |= DRM_MODE_FLAG_PHSYNC;
2255 else
2256 flags |= DRM_MODE_FLAG_NHSYNC;
2257
2258 if (tmp & DP_SYNC_VS_HIGH)
2259 flags |= DRM_MODE_FLAG_PVSYNC;
2260 else
2261 flags |= DRM_MODE_FLAG_NVSYNC;
2262 }
2263
2264 pipe_config->base.adjusted_mode.flags |= flags;
2265
2266 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2267 tmp & DP_COLOR_RANGE_16_235)
2268 pipe_config->limited_color_range = true;
2269
2270 pipe_config->has_dp_encoder = true;
2271
2272 intel_dp_get_m_n(crtc, pipe_config);
2273
2274 if (port == PORT_A) {
2275 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2276 pipe_config->port_clock = 162000;
2277 else
2278 pipe_config->port_clock = 270000;
2279 }
2280
2281 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2282 &pipe_config->dp_m_n);
2283
2284 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2285 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2286
2287 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2288
2289 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2290 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2291 /*
2292 * This is a big fat ugly hack.
2293 *
2294 * Some machines in UEFI boot mode provide us a VBT that has 18
2295 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2296 * unknown we fail to light up. Yet the same BIOS boots up with
2297 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2298 * max, not what it tells us to use.
2299 *
2300 * Note: This will still be broken if the eDP panel is not lit
2301 * up by the BIOS, and thus we can't get the mode at module
2302 * load.
2303 */
2304 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2305 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2306 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2307 }
2308 }
2309
2310 static void intel_disable_dp(struct intel_encoder *encoder)
2311 {
2312 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2313 struct drm_device *dev = encoder->base.dev;
2314 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2315
2316 if (crtc->config->has_audio)
2317 intel_audio_codec_disable(encoder);
2318
2319 if (HAS_PSR(dev) && !HAS_DDI(dev))
2320 intel_psr_disable(intel_dp);
2321
2322 /* Make sure the panel is off before trying to change the mode. But also
2323 * ensure that we have vdd while we switch off the panel. */
2324 intel_edp_panel_vdd_on(intel_dp);
2325 intel_edp_backlight_off(intel_dp);
2326 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2327 intel_edp_panel_off(intel_dp);
2328
2329 /* disable the port before the pipe on g4x */
2330 if (INTEL_INFO(dev)->gen < 5)
2331 intel_dp_link_down(intel_dp);
2332 }
2333
2334 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2335 {
2336 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2337 enum port port = dp_to_dig_port(intel_dp)->port;
2338
2339 intel_dp_link_down(intel_dp);
2340 if (port == PORT_A)
2341 ironlake_edp_pll_off(intel_dp);
2342 }
2343
2344 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2345 {
2346 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2347
2348 intel_dp_link_down(intel_dp);
2349 }
2350
2351 static void chv_post_disable_dp(struct intel_encoder *encoder)
2352 {
2353 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2354 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2355 struct drm_device *dev = encoder->base.dev;
2356 struct drm_i915_private *dev_priv = dev->dev_private;
2357 struct intel_crtc *intel_crtc =
2358 to_intel_crtc(encoder->base.crtc);
2359 enum dpio_channel ch = vlv_dport_to_channel(dport);
2360 enum pipe pipe = intel_crtc->pipe;
2361 u32 val;
2362
2363 intel_dp_link_down(intel_dp);
2364
2365 mutex_lock(&dev_priv->dpio_lock);
2366
2367 /* Propagate soft reset to data lane reset */
2368 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2369 val |= CHV_PCS_REQ_SOFTRESET_EN;
2370 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2371
2372 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2373 val |= CHV_PCS_REQ_SOFTRESET_EN;
2374 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2375
2376 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2377 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2378 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2379
2380 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2381 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2382 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2383
2384 mutex_unlock(&dev_priv->dpio_lock);
2385 }
2386
2387 static void
2388 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2389 uint32_t *DP,
2390 uint8_t dp_train_pat)
2391 {
2392 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2393 struct drm_device *dev = intel_dig_port->base.base.dev;
2394 struct drm_i915_private *dev_priv = dev->dev_private;
2395 enum port port = intel_dig_port->port;
2396
2397 if (HAS_DDI(dev)) {
2398 uint32_t temp = I915_READ(DP_TP_CTL(port));
2399
2400 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2401 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2402 else
2403 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2404
2405 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2406 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2407 case DP_TRAINING_PATTERN_DISABLE:
2408 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2409
2410 break;
2411 case DP_TRAINING_PATTERN_1:
2412 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2413 break;
2414 case DP_TRAINING_PATTERN_2:
2415 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2416 break;
2417 case DP_TRAINING_PATTERN_3:
2418 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2419 break;
2420 }
2421 I915_WRITE(DP_TP_CTL(port), temp);
2422
2423 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2424 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2425 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2426
2427 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2428 case DP_TRAINING_PATTERN_DISABLE:
2429 *DP |= DP_LINK_TRAIN_OFF_CPT;
2430 break;
2431 case DP_TRAINING_PATTERN_1:
2432 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2433 break;
2434 case DP_TRAINING_PATTERN_2:
2435 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2436 break;
2437 case DP_TRAINING_PATTERN_3:
2438 DRM_ERROR("DP training pattern 3 not supported\n");
2439 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2440 break;
2441 }
2442
2443 } else {
2444 if (IS_CHERRYVIEW(dev))
2445 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2446 else
2447 *DP &= ~DP_LINK_TRAIN_MASK;
2448
2449 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2450 case DP_TRAINING_PATTERN_DISABLE:
2451 *DP |= DP_LINK_TRAIN_OFF;
2452 break;
2453 case DP_TRAINING_PATTERN_1:
2454 *DP |= DP_LINK_TRAIN_PAT_1;
2455 break;
2456 case DP_TRAINING_PATTERN_2:
2457 *DP |= DP_LINK_TRAIN_PAT_2;
2458 break;
2459 case DP_TRAINING_PATTERN_3:
2460 if (IS_CHERRYVIEW(dev)) {
2461 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2462 } else {
2463 DRM_ERROR("DP training pattern 3 not supported\n");
2464 *DP |= DP_LINK_TRAIN_PAT_2;
2465 }
2466 break;
2467 }
2468 }
2469 }
2470
2471 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2472 {
2473 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2474 struct drm_i915_private *dev_priv = dev->dev_private;
2475
2476 /* enable with pattern 1 (as per spec) */
2477 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2478 DP_TRAINING_PATTERN_1);
2479
2480 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2481 POSTING_READ(intel_dp->output_reg);
2482
2483 /*
2484 * Magic for VLV/CHV. We _must_ first set up the register
2485 * without actually enabling the port, and then do another
2486 * write to enable the port. Otherwise link training will
2487 * fail when the power sequencer is freshly used for this port.
2488 */
2489 intel_dp->DP |= DP_PORT_EN;
2490
2491 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2492 POSTING_READ(intel_dp->output_reg);
2493 }
2494
2495 static void intel_enable_dp(struct intel_encoder *encoder)
2496 {
2497 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2498 struct drm_device *dev = encoder->base.dev;
2499 struct drm_i915_private *dev_priv = dev->dev_private;
2500 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2501 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2502 unsigned int lane_mask = 0x0;
2503
2504 if (WARN_ON(dp_reg & DP_PORT_EN))
2505 return;
2506
2507 pps_lock(intel_dp);
2508
2509 if (IS_VALLEYVIEW(dev))
2510 vlv_init_panel_power_sequencer(intel_dp);
2511
2512 intel_dp_enable_port(intel_dp);
2513
2514 edp_panel_vdd_on(intel_dp);
2515 edp_panel_on(intel_dp);
2516 edp_panel_vdd_off(intel_dp, true);
2517
2518 pps_unlock(intel_dp);
2519
2520 if (IS_VALLEYVIEW(dev))
2521 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2522 lane_mask);
2523
2524 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2525 intel_dp_start_link_train(intel_dp);
2526 intel_dp_complete_link_train(intel_dp);
2527 intel_dp_stop_link_train(intel_dp);
2528
2529 if (crtc->config->has_audio) {
2530 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2531 pipe_name(crtc->pipe));
2532 intel_audio_codec_enable(encoder);
2533 }
2534 }
2535
2536 static void g4x_enable_dp(struct intel_encoder *encoder)
2537 {
2538 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2539
2540 intel_enable_dp(encoder);
2541 intel_edp_backlight_on(intel_dp);
2542 }
2543
2544 static void vlv_enable_dp(struct intel_encoder *encoder)
2545 {
2546 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2547
2548 intel_edp_backlight_on(intel_dp);
2549 intel_psr_enable(intel_dp);
2550 }
2551
2552 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2553 {
2554 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2555 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2556
2557 intel_dp_prepare(encoder);
2558
2559 /* Only ilk+ has port A */
2560 if (dport->port == PORT_A) {
2561 ironlake_set_pll_cpu_edp(intel_dp);
2562 ironlake_edp_pll_on(intel_dp);
2563 }
2564 }
2565
2566 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2567 {
2568 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2569 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2570 enum pipe pipe = intel_dp->pps_pipe;
2571 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2572
2573 edp_panel_vdd_off_sync(intel_dp);
2574
2575 /*
2576 * VLV seems to get confused when multiple power seqeuencers
2577 * have the same port selected (even if only one has power/vdd
2578 * enabled). The failure manifests as vlv_wait_port_ready() failing
2579 * CHV on the other hand doesn't seem to mind having the same port
2580 * selected in multiple power seqeuencers, but let's clear the
2581 * port select always when logically disconnecting a power sequencer
2582 * from a port.
2583 */
2584 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2585 pipe_name(pipe), port_name(intel_dig_port->port));
2586 I915_WRITE(pp_on_reg, 0);
2587 POSTING_READ(pp_on_reg);
2588
2589 intel_dp->pps_pipe = INVALID_PIPE;
2590 }
2591
2592 static void vlv_steal_power_sequencer(struct drm_device *dev,
2593 enum pipe pipe)
2594 {
2595 struct drm_i915_private *dev_priv = dev->dev_private;
2596 struct intel_encoder *encoder;
2597
2598 lockdep_assert_held(&dev_priv->pps_mutex);
2599
2600 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2601 return;
2602
2603 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2604 base.head) {
2605 struct intel_dp *intel_dp;
2606 enum port port;
2607
2608 if (encoder->type != INTEL_OUTPUT_EDP)
2609 continue;
2610
2611 intel_dp = enc_to_intel_dp(&encoder->base);
2612 port = dp_to_dig_port(intel_dp)->port;
2613
2614 if (intel_dp->pps_pipe != pipe)
2615 continue;
2616
2617 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2618 pipe_name(pipe), port_name(port));
2619
2620 WARN(encoder->connectors_active,
2621 "stealing pipe %c power sequencer from active eDP port %c\n",
2622 pipe_name(pipe), port_name(port));
2623
2624 /* make sure vdd is off before we steal it */
2625 vlv_detach_power_sequencer(intel_dp);
2626 }
2627 }
2628
2629 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2630 {
2631 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2632 struct intel_encoder *encoder = &intel_dig_port->base;
2633 struct drm_device *dev = encoder->base.dev;
2634 struct drm_i915_private *dev_priv = dev->dev_private;
2635 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2636
2637 lockdep_assert_held(&dev_priv->pps_mutex);
2638
2639 if (!is_edp(intel_dp))
2640 return;
2641
2642 if (intel_dp->pps_pipe == crtc->pipe)
2643 return;
2644
2645 /*
2646 * If another power sequencer was being used on this
2647 * port previously make sure to turn off vdd there while
2648 * we still have control of it.
2649 */
2650 if (intel_dp->pps_pipe != INVALID_PIPE)
2651 vlv_detach_power_sequencer(intel_dp);
2652
2653 /*
2654 * We may be stealing the power
2655 * sequencer from another port.
2656 */
2657 vlv_steal_power_sequencer(dev, crtc->pipe);
2658
2659 /* now it's all ours */
2660 intel_dp->pps_pipe = crtc->pipe;
2661
2662 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2663 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2664
2665 /* init power sequencer on this pipe and port */
2666 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2667 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2668 }
2669
2670 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2671 {
2672 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2673 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2674 struct drm_device *dev = encoder->base.dev;
2675 struct drm_i915_private *dev_priv = dev->dev_private;
2676 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2677 enum dpio_channel port = vlv_dport_to_channel(dport);
2678 int pipe = intel_crtc->pipe;
2679 u32 val;
2680
2681 mutex_lock(&dev_priv->dpio_lock);
2682
2683 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2684 val = 0;
2685 if (pipe)
2686 val |= (1<<21);
2687 else
2688 val &= ~(1<<21);
2689 val |= 0x001000c4;
2690 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2691 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2692 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2693
2694 mutex_unlock(&dev_priv->dpio_lock);
2695
2696 intel_enable_dp(encoder);
2697 }
2698
2699 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2700 {
2701 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2702 struct drm_device *dev = encoder->base.dev;
2703 struct drm_i915_private *dev_priv = dev->dev_private;
2704 struct intel_crtc *intel_crtc =
2705 to_intel_crtc(encoder->base.crtc);
2706 enum dpio_channel port = vlv_dport_to_channel(dport);
2707 int pipe = intel_crtc->pipe;
2708
2709 intel_dp_prepare(encoder);
2710
2711 /* Program Tx lane resets to default */
2712 mutex_lock(&dev_priv->dpio_lock);
2713 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2714 DPIO_PCS_TX_LANE2_RESET |
2715 DPIO_PCS_TX_LANE1_RESET);
2716 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2717 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2718 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2719 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2720 DPIO_PCS_CLK_SOFT_RESET);
2721
2722 /* Fix up inter-pair skew failure */
2723 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2724 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2725 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2726 mutex_unlock(&dev_priv->dpio_lock);
2727 }
2728
2729 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2730 {
2731 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2732 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2733 struct drm_device *dev = encoder->base.dev;
2734 struct drm_i915_private *dev_priv = dev->dev_private;
2735 struct intel_crtc *intel_crtc =
2736 to_intel_crtc(encoder->base.crtc);
2737 enum dpio_channel ch = vlv_dport_to_channel(dport);
2738 int pipe = intel_crtc->pipe;
2739 int data, i, stagger;
2740 u32 val;
2741
2742 mutex_lock(&dev_priv->dpio_lock);
2743
2744 /* allow hardware to manage TX FIFO reset source */
2745 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2746 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2747 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2748
2749 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2750 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2751 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2752
2753 /* Deassert soft data lane reset*/
2754 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2755 val |= CHV_PCS_REQ_SOFTRESET_EN;
2756 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2757
2758 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2759 val |= CHV_PCS_REQ_SOFTRESET_EN;
2760 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2761
2762 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2763 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2764 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2765
2766 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2767 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2768 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2769
2770 /* Program Tx lane latency optimal setting*/
2771 for (i = 0; i < 4; i++) {
2772 /* Set the upar bit */
2773 data = (i == 1) ? 0x0 : 0x1;
2774 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2775 data << DPIO_UPAR_SHIFT);
2776 }
2777
2778 /* Data lane stagger programming */
2779 if (intel_crtc->config->port_clock > 270000)
2780 stagger = 0x18;
2781 else if (intel_crtc->config->port_clock > 135000)
2782 stagger = 0xd;
2783 else if (intel_crtc->config->port_clock > 67500)
2784 stagger = 0x7;
2785 else if (intel_crtc->config->port_clock > 33750)
2786 stagger = 0x4;
2787 else
2788 stagger = 0x2;
2789
2790 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2791 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2792 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2793
2794 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2795 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2796 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2797
2798 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2799 DPIO_LANESTAGGER_STRAP(stagger) |
2800 DPIO_LANESTAGGER_STRAP_OVRD |
2801 DPIO_TX1_STAGGER_MASK(0x1f) |
2802 DPIO_TX1_STAGGER_MULT(6) |
2803 DPIO_TX2_STAGGER_MULT(0));
2804
2805 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2806 DPIO_LANESTAGGER_STRAP(stagger) |
2807 DPIO_LANESTAGGER_STRAP_OVRD |
2808 DPIO_TX1_STAGGER_MASK(0x1f) |
2809 DPIO_TX1_STAGGER_MULT(7) |
2810 DPIO_TX2_STAGGER_MULT(5));
2811
2812 mutex_unlock(&dev_priv->dpio_lock);
2813
2814 intel_enable_dp(encoder);
2815 }
2816
2817 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2818 {
2819 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2820 struct drm_device *dev = encoder->base.dev;
2821 struct drm_i915_private *dev_priv = dev->dev_private;
2822 struct intel_crtc *intel_crtc =
2823 to_intel_crtc(encoder->base.crtc);
2824 enum dpio_channel ch = vlv_dport_to_channel(dport);
2825 enum pipe pipe = intel_crtc->pipe;
2826 u32 val;
2827
2828 intel_dp_prepare(encoder);
2829
2830 mutex_lock(&dev_priv->dpio_lock);
2831
2832 /* program left/right clock distribution */
2833 if (pipe != PIPE_B) {
2834 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2835 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2836 if (ch == DPIO_CH0)
2837 val |= CHV_BUFLEFTENA1_FORCE;
2838 if (ch == DPIO_CH1)
2839 val |= CHV_BUFRIGHTENA1_FORCE;
2840 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2841 } else {
2842 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2843 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2844 if (ch == DPIO_CH0)
2845 val |= CHV_BUFLEFTENA2_FORCE;
2846 if (ch == DPIO_CH1)
2847 val |= CHV_BUFRIGHTENA2_FORCE;
2848 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2849 }
2850
2851 /* program clock channel usage */
2852 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2853 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2854 if (pipe != PIPE_B)
2855 val &= ~CHV_PCS_USEDCLKCHANNEL;
2856 else
2857 val |= CHV_PCS_USEDCLKCHANNEL;
2858 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2859
2860 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2861 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2862 if (pipe != PIPE_B)
2863 val &= ~CHV_PCS_USEDCLKCHANNEL;
2864 else
2865 val |= CHV_PCS_USEDCLKCHANNEL;
2866 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2867
2868 /*
2869 * This a a bit weird since generally CL
2870 * matches the pipe, but here we need to
2871 * pick the CL based on the port.
2872 */
2873 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2874 if (pipe != PIPE_B)
2875 val &= ~CHV_CMN_USEDCLKCHANNEL;
2876 else
2877 val |= CHV_CMN_USEDCLKCHANNEL;
2878 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2879
2880 mutex_unlock(&dev_priv->dpio_lock);
2881 }
2882
2883 /*
2884 * Native read with retry for link status and receiver capability reads for
2885 * cases where the sink may still be asleep.
2886 *
2887 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2888 * supposed to retry 3 times per the spec.
2889 */
2890 static ssize_t
2891 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2892 void *buffer, size_t size)
2893 {
2894 ssize_t ret;
2895 int i;
2896
2897 /*
2898 * Sometime we just get the same incorrect byte repeated
2899 * over the entire buffer. Doing just one throw away read
2900 * initially seems to "solve" it.
2901 */
2902 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2903
2904 for (i = 0; i < 3; i++) {
2905 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2906 if (ret == size)
2907 return ret;
2908 msleep(1);
2909 }
2910
2911 return ret;
2912 }
2913
2914 /*
2915 * Fetch AUX CH registers 0x202 - 0x207 which contain
2916 * link status information
2917 */
2918 static bool
2919 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2920 {
2921 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2922 DP_LANE0_1_STATUS,
2923 link_status,
2924 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2925 }
2926
2927 /* These are source-specific values. */
2928 static uint8_t
2929 intel_dp_voltage_max(struct intel_dp *intel_dp)
2930 {
2931 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2932 struct drm_i915_private *dev_priv = dev->dev_private;
2933 enum port port = dp_to_dig_port(intel_dp)->port;
2934
2935 if (IS_BROXTON(dev))
2936 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2937 else if (INTEL_INFO(dev)->gen >= 9) {
2938 if (dev_priv->edp_low_vswing && port == PORT_A)
2939 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2940 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2941 } else if (IS_VALLEYVIEW(dev))
2942 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2943 else if (IS_GEN7(dev) && port == PORT_A)
2944 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2945 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2946 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2947 else
2948 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2949 }
2950
2951 static uint8_t
2952 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2953 {
2954 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2955 enum port port = dp_to_dig_port(intel_dp)->port;
2956
2957 if (INTEL_INFO(dev)->gen >= 9) {
2958 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2959 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2960 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2961 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2962 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2963 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2964 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2965 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2966 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2967 default:
2968 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2969 }
2970 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2971 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2972 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2973 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2974 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2975 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2976 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2977 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2978 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2979 default:
2980 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2981 }
2982 } else if (IS_VALLEYVIEW(dev)) {
2983 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2984 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2985 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2986 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2987 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2988 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2989 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2990 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2991 default:
2992 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2993 }
2994 } else if (IS_GEN7(dev) && port == PORT_A) {
2995 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2996 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2997 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2998 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2999 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3000 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3001 default:
3002 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3003 }
3004 } else {
3005 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3006 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3007 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3008 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3009 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3010 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3011 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3012 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3013 default:
3014 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3015 }
3016 }
3017 }
3018
3019 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3020 {
3021 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3022 struct drm_i915_private *dev_priv = dev->dev_private;
3023 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3024 struct intel_crtc *intel_crtc =
3025 to_intel_crtc(dport->base.base.crtc);
3026 unsigned long demph_reg_value, preemph_reg_value,
3027 uniqtranscale_reg_value;
3028 uint8_t train_set = intel_dp->train_set[0];
3029 enum dpio_channel port = vlv_dport_to_channel(dport);
3030 int pipe = intel_crtc->pipe;
3031
3032 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3033 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3034 preemph_reg_value = 0x0004000;
3035 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3036 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3037 demph_reg_value = 0x2B405555;
3038 uniqtranscale_reg_value = 0x552AB83A;
3039 break;
3040 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3041 demph_reg_value = 0x2B404040;
3042 uniqtranscale_reg_value = 0x5548B83A;
3043 break;
3044 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3045 demph_reg_value = 0x2B245555;
3046 uniqtranscale_reg_value = 0x5560B83A;
3047 break;
3048 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3049 demph_reg_value = 0x2B405555;
3050 uniqtranscale_reg_value = 0x5598DA3A;
3051 break;
3052 default:
3053 return 0;
3054 }
3055 break;
3056 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3057 preemph_reg_value = 0x0002000;
3058 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3060 demph_reg_value = 0x2B404040;
3061 uniqtranscale_reg_value = 0x5552B83A;
3062 break;
3063 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3064 demph_reg_value = 0x2B404848;
3065 uniqtranscale_reg_value = 0x5580B83A;
3066 break;
3067 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3068 demph_reg_value = 0x2B404040;
3069 uniqtranscale_reg_value = 0x55ADDA3A;
3070 break;
3071 default:
3072 return 0;
3073 }
3074 break;
3075 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3076 preemph_reg_value = 0x0000000;
3077 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3078 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3079 demph_reg_value = 0x2B305555;
3080 uniqtranscale_reg_value = 0x5570B83A;
3081 break;
3082 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3083 demph_reg_value = 0x2B2B4040;
3084 uniqtranscale_reg_value = 0x55ADDA3A;
3085 break;
3086 default:
3087 return 0;
3088 }
3089 break;
3090 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3091 preemph_reg_value = 0x0006000;
3092 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3093 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3094 demph_reg_value = 0x1B405555;
3095 uniqtranscale_reg_value = 0x55ADDA3A;
3096 break;
3097 default:
3098 return 0;
3099 }
3100 break;
3101 default:
3102 return 0;
3103 }
3104
3105 mutex_lock(&dev_priv->dpio_lock);
3106 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3107 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3108 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3109 uniqtranscale_reg_value);
3110 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3111 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3112 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3113 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3114 mutex_unlock(&dev_priv->dpio_lock);
3115
3116 return 0;
3117 }
3118
3119 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3120 {
3121 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3122 struct drm_i915_private *dev_priv = dev->dev_private;
3123 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3124 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3125 u32 deemph_reg_value, margin_reg_value, val;
3126 uint8_t train_set = intel_dp->train_set[0];
3127 enum dpio_channel ch = vlv_dport_to_channel(dport);
3128 enum pipe pipe = intel_crtc->pipe;
3129 int i;
3130
3131 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3132 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3133 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3134 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3135 deemph_reg_value = 128;
3136 margin_reg_value = 52;
3137 break;
3138 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3139 deemph_reg_value = 128;
3140 margin_reg_value = 77;
3141 break;
3142 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3143 deemph_reg_value = 128;
3144 margin_reg_value = 102;
3145 break;
3146 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3147 deemph_reg_value = 128;
3148 margin_reg_value = 154;
3149 /* FIXME extra to set for 1200 */
3150 break;
3151 default:
3152 return 0;
3153 }
3154 break;
3155 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3156 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3157 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3158 deemph_reg_value = 85;
3159 margin_reg_value = 78;
3160 break;
3161 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3162 deemph_reg_value = 85;
3163 margin_reg_value = 116;
3164 break;
3165 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3166 deemph_reg_value = 85;
3167 margin_reg_value = 154;
3168 break;
3169 default:
3170 return 0;
3171 }
3172 break;
3173 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3174 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3175 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3176 deemph_reg_value = 64;
3177 margin_reg_value = 104;
3178 break;
3179 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3180 deemph_reg_value = 64;
3181 margin_reg_value = 154;
3182 break;
3183 default:
3184 return 0;
3185 }
3186 break;
3187 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3188 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3189 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3190 deemph_reg_value = 43;
3191 margin_reg_value = 154;
3192 break;
3193 default:
3194 return 0;
3195 }
3196 break;
3197 default:
3198 return 0;
3199 }
3200
3201 mutex_lock(&dev_priv->dpio_lock);
3202
3203 /* Clear calc init */
3204 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3205 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3206 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3207 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3208 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3209
3210 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3211 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3212 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3213 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3214 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3215
3216 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3217 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3218 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3219 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3220
3221 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3222 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3223 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3224 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3225
3226 /* Program swing deemph */
3227 for (i = 0; i < 4; i++) {
3228 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3229 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3230 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3231 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3232 }
3233
3234 /* Program swing margin */
3235 for (i = 0; i < 4; i++) {
3236 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3237 val &= ~DPIO_SWING_MARGIN000_MASK;
3238 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3239 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3240 }
3241
3242 /* Disable unique transition scale */
3243 for (i = 0; i < 4; i++) {
3244 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3245 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3246 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3247 }
3248
3249 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3250 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3251 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3252 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3253
3254 /*
3255 * The document said it needs to set bit 27 for ch0 and bit 26
3256 * for ch1. Might be a typo in the doc.
3257 * For now, for this unique transition scale selection, set bit
3258 * 27 for ch0 and ch1.
3259 */
3260 for (i = 0; i < 4; i++) {
3261 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3262 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3263 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3264 }
3265
3266 for (i = 0; i < 4; i++) {
3267 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3268 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3269 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3270 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3271 }
3272 }
3273
3274 /* Start swing calculation */
3275 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3276 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3277 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3278
3279 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3280 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3281 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3282
3283 /* LRC Bypass */
3284 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3285 val |= DPIO_LRC_BYPASS;
3286 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3287
3288 mutex_unlock(&dev_priv->dpio_lock);
3289
3290 return 0;
3291 }
3292
3293 static void
3294 intel_get_adjust_train(struct intel_dp *intel_dp,
3295 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3296 {
3297 uint8_t v = 0;
3298 uint8_t p = 0;
3299 int lane;
3300 uint8_t voltage_max;
3301 uint8_t preemph_max;
3302
3303 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3304 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3305 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3306
3307 if (this_v > v)
3308 v = this_v;
3309 if (this_p > p)
3310 p = this_p;
3311 }
3312
3313 voltage_max = intel_dp_voltage_max(intel_dp);
3314 if (v >= voltage_max)
3315 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3316
3317 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3318 if (p >= preemph_max)
3319 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3320
3321 for (lane = 0; lane < 4; lane++)
3322 intel_dp->train_set[lane] = v | p;
3323 }
3324
3325 static uint32_t
3326 gen4_signal_levels(uint8_t train_set)
3327 {
3328 uint32_t signal_levels = 0;
3329
3330 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3331 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3332 default:
3333 signal_levels |= DP_VOLTAGE_0_4;
3334 break;
3335 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3336 signal_levels |= DP_VOLTAGE_0_6;
3337 break;
3338 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3339 signal_levels |= DP_VOLTAGE_0_8;
3340 break;
3341 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3342 signal_levels |= DP_VOLTAGE_1_2;
3343 break;
3344 }
3345 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3346 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3347 default:
3348 signal_levels |= DP_PRE_EMPHASIS_0;
3349 break;
3350 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3351 signal_levels |= DP_PRE_EMPHASIS_3_5;
3352 break;
3353 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3354 signal_levels |= DP_PRE_EMPHASIS_6;
3355 break;
3356 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3357 signal_levels |= DP_PRE_EMPHASIS_9_5;
3358 break;
3359 }
3360 return signal_levels;
3361 }
3362
3363 /* Gen6's DP voltage swing and pre-emphasis control */
3364 static uint32_t
3365 gen6_edp_signal_levels(uint8_t train_set)
3366 {
3367 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3368 DP_TRAIN_PRE_EMPHASIS_MASK);
3369 switch (signal_levels) {
3370 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3372 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3374 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3375 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3376 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3377 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3378 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3379 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3380 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3381 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3382 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3383 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3384 default:
3385 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3386 "0x%x\n", signal_levels);
3387 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3388 }
3389 }
3390
3391 /* Gen7's DP voltage swing and pre-emphasis control */
3392 static uint32_t
3393 gen7_edp_signal_levels(uint8_t train_set)
3394 {
3395 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3396 DP_TRAIN_PRE_EMPHASIS_MASK);
3397 switch (signal_levels) {
3398 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3399 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3400 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3401 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3402 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3403 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3404
3405 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3406 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3407 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3408 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3409
3410 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3411 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3412 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3413 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3414
3415 default:
3416 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3417 "0x%x\n", signal_levels);
3418 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3419 }
3420 }
3421
3422 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3423 static uint32_t
3424 hsw_signal_levels(uint8_t train_set)
3425 {
3426 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3427 DP_TRAIN_PRE_EMPHASIS_MASK);
3428 switch (signal_levels) {
3429 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3430 return DDI_BUF_TRANS_SELECT(0);
3431 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3432 return DDI_BUF_TRANS_SELECT(1);
3433 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3434 return DDI_BUF_TRANS_SELECT(2);
3435 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3436 return DDI_BUF_TRANS_SELECT(3);
3437
3438 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3439 return DDI_BUF_TRANS_SELECT(4);
3440 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3441 return DDI_BUF_TRANS_SELECT(5);
3442 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3443 return DDI_BUF_TRANS_SELECT(6);
3444
3445 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3446 return DDI_BUF_TRANS_SELECT(7);
3447 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3448 return DDI_BUF_TRANS_SELECT(8);
3449
3450 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3451 return DDI_BUF_TRANS_SELECT(9);
3452 default:
3453 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3454 "0x%x\n", signal_levels);
3455 return DDI_BUF_TRANS_SELECT(0);
3456 }
3457 }
3458
3459 static void bxt_signal_levels(struct intel_dp *intel_dp)
3460 {
3461 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3462 enum port port = dport->port;
3463 struct drm_device *dev = dport->base.base.dev;
3464 struct intel_encoder *encoder = &dport->base;
3465 uint8_t train_set = intel_dp->train_set[0];
3466 uint32_t level = 0;
3467
3468 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3469 DP_TRAIN_PRE_EMPHASIS_MASK);
3470 switch (signal_levels) {
3471 default:
3472 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3473 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3474 level = 0;
3475 break;
3476 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3477 level = 1;
3478 break;
3479 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3480 level = 2;
3481 break;
3482 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3483 level = 3;
3484 break;
3485 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3486 level = 4;
3487 break;
3488 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3489 level = 5;
3490 break;
3491 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3492 level = 6;
3493 break;
3494 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3495 level = 7;
3496 break;
3497 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3498 level = 8;
3499 break;
3500 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3501 level = 9;
3502 break;
3503 }
3504
3505 bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3506 }
3507
3508 /* Properly updates "DP" with the correct signal levels. */
3509 static void
3510 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3511 {
3512 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3513 enum port port = intel_dig_port->port;
3514 struct drm_device *dev = intel_dig_port->base.base.dev;
3515 uint32_t signal_levels, mask;
3516 uint8_t train_set = intel_dp->train_set[0];
3517
3518 if (IS_BROXTON(dev)) {
3519 signal_levels = 0;
3520 bxt_signal_levels(intel_dp);
3521 mask = 0;
3522 } else if (HAS_DDI(dev)) {
3523 signal_levels = hsw_signal_levels(train_set);
3524 mask = DDI_BUF_EMP_MASK;
3525 } else if (IS_CHERRYVIEW(dev)) {
3526 signal_levels = chv_signal_levels(intel_dp);
3527 mask = 0;
3528 } else if (IS_VALLEYVIEW(dev)) {
3529 signal_levels = vlv_signal_levels(intel_dp);
3530 mask = 0;
3531 } else if (IS_GEN7(dev) && port == PORT_A) {
3532 signal_levels = gen7_edp_signal_levels(train_set);
3533 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3534 } else if (IS_GEN6(dev) && port == PORT_A) {
3535 signal_levels = gen6_edp_signal_levels(train_set);
3536 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3537 } else {
3538 signal_levels = gen4_signal_levels(train_set);
3539 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3540 }
3541
3542 if (mask)
3543 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3544
3545 DRM_DEBUG_KMS("Using vswing level %d\n",
3546 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3547 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3548 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3549 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3550
3551 *DP = (*DP & ~mask) | signal_levels;
3552 }
3553
3554 static bool
3555 intel_dp_set_link_train(struct intel_dp *intel_dp,
3556 uint32_t *DP,
3557 uint8_t dp_train_pat)
3558 {
3559 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3560 struct drm_device *dev = intel_dig_port->base.base.dev;
3561 struct drm_i915_private *dev_priv = dev->dev_private;
3562 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3563 int ret, len;
3564
3565 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3566
3567 I915_WRITE(intel_dp->output_reg, *DP);
3568 POSTING_READ(intel_dp->output_reg);
3569
3570 buf[0] = dp_train_pat;
3571 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3572 DP_TRAINING_PATTERN_DISABLE) {
3573 /* don't write DP_TRAINING_LANEx_SET on disable */
3574 len = 1;
3575 } else {
3576 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3577 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3578 len = intel_dp->lane_count + 1;
3579 }
3580
3581 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3582 buf, len);
3583
3584 return ret == len;
3585 }
3586
3587 static bool
3588 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3589 uint8_t dp_train_pat)
3590 {
3591 if (!intel_dp->train_set_valid)
3592 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3593 intel_dp_set_signal_levels(intel_dp, DP);
3594 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3595 }
3596
3597 static bool
3598 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3599 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3600 {
3601 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3602 struct drm_device *dev = intel_dig_port->base.base.dev;
3603 struct drm_i915_private *dev_priv = dev->dev_private;
3604 int ret;
3605
3606 intel_get_adjust_train(intel_dp, link_status);
3607 intel_dp_set_signal_levels(intel_dp, DP);
3608
3609 I915_WRITE(intel_dp->output_reg, *DP);
3610 POSTING_READ(intel_dp->output_reg);
3611
3612 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3613 intel_dp->train_set, intel_dp->lane_count);
3614
3615 return ret == intel_dp->lane_count;
3616 }
3617
3618 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3619 {
3620 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3621 struct drm_device *dev = intel_dig_port->base.base.dev;
3622 struct drm_i915_private *dev_priv = dev->dev_private;
3623 enum port port = intel_dig_port->port;
3624 uint32_t val;
3625
3626 if (!HAS_DDI(dev))
3627 return;
3628
3629 val = I915_READ(DP_TP_CTL(port));
3630 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3631 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3632 I915_WRITE(DP_TP_CTL(port), val);
3633
3634 /*
3635 * On PORT_A we can have only eDP in SST mode. There the only reason
3636 * we need to set idle transmission mode is to work around a HW issue
3637 * where we enable the pipe while not in idle link-training mode.
3638 * In this case there is requirement to wait for a minimum number of
3639 * idle patterns to be sent.
3640 */
3641 if (port == PORT_A)
3642 return;
3643
3644 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3645 1))
3646 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3647 }
3648
3649 /* Enable corresponding port and start training pattern 1 */
3650 void
3651 intel_dp_start_link_train(struct intel_dp *intel_dp)
3652 {
3653 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3654 struct drm_device *dev = encoder->dev;
3655 int i;
3656 uint8_t voltage;
3657 int voltage_tries, loop_tries;
3658 uint32_t DP = intel_dp->DP;
3659 uint8_t link_config[2];
3660
3661 if (HAS_DDI(dev))
3662 intel_ddi_prepare_link_retrain(encoder);
3663
3664 /* Write the link configuration data */
3665 link_config[0] = intel_dp->link_bw;
3666 link_config[1] = intel_dp->lane_count;
3667 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3668 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3669 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3670 if (intel_dp->num_sink_rates)
3671 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3672 &intel_dp->rate_select, 1);
3673
3674 link_config[0] = 0;
3675 link_config[1] = DP_SET_ANSI_8B10B;
3676 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3677
3678 DP |= DP_PORT_EN;
3679
3680 /* clock recovery */
3681 if (!intel_dp_reset_link_train(intel_dp, &DP,
3682 DP_TRAINING_PATTERN_1 |
3683 DP_LINK_SCRAMBLING_DISABLE)) {
3684 DRM_ERROR("failed to enable link training\n");
3685 return;
3686 }
3687
3688 voltage = 0xff;
3689 voltage_tries = 0;
3690 loop_tries = 0;
3691 for (;;) {
3692 uint8_t link_status[DP_LINK_STATUS_SIZE];
3693
3694 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3695 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3696 DRM_ERROR("failed to get link status\n");
3697 break;
3698 }
3699
3700 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3701 DRM_DEBUG_KMS("clock recovery OK\n");
3702 break;
3703 }
3704
3705 /*
3706 * if we used previously trained voltage and pre-emphasis values
3707 * and we don't get clock recovery, reset link training values
3708 */
3709 if (intel_dp->train_set_valid) {
3710 DRM_DEBUG_KMS("clock recovery not ok, reset");
3711 /* clear the flag as we are not reusing train set */
3712 intel_dp->train_set_valid = false;
3713 if (!intel_dp_reset_link_train(intel_dp, &DP,
3714 DP_TRAINING_PATTERN_1 |
3715 DP_LINK_SCRAMBLING_DISABLE)) {
3716 DRM_ERROR("failed to enable link training\n");
3717 return;
3718 }
3719 continue;
3720 }
3721
3722 /* Check to see if we've tried the max voltage */
3723 for (i = 0; i < intel_dp->lane_count; i++)
3724 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3725 break;
3726 if (i == intel_dp->lane_count) {
3727 ++loop_tries;
3728 if (loop_tries == 5) {
3729 DRM_ERROR("too many full retries, give up\n");
3730 break;
3731 }
3732 intel_dp_reset_link_train(intel_dp, &DP,
3733 DP_TRAINING_PATTERN_1 |
3734 DP_LINK_SCRAMBLING_DISABLE);
3735 voltage_tries = 0;
3736 continue;
3737 }
3738
3739 /* Check to see if we've tried the same voltage 5 times */
3740 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3741 ++voltage_tries;
3742 if (voltage_tries == 5) {
3743 DRM_ERROR("too many voltage retries, give up\n");
3744 break;
3745 }
3746 } else
3747 voltage_tries = 0;
3748 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3749
3750 /* Update training set as requested by target */
3751 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3752 DRM_ERROR("failed to update link training\n");
3753 break;
3754 }
3755 }
3756
3757 intel_dp->DP = DP;
3758 }
3759
3760 void
3761 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3762 {
3763 bool channel_eq = false;
3764 int tries, cr_tries;
3765 uint32_t DP = intel_dp->DP;
3766 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3767
3768 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3769 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3770 training_pattern = DP_TRAINING_PATTERN_3;
3771
3772 /* channel equalization */
3773 if (!intel_dp_set_link_train(intel_dp, &DP,
3774 training_pattern |
3775 DP_LINK_SCRAMBLING_DISABLE)) {
3776 DRM_ERROR("failed to start channel equalization\n");
3777 return;
3778 }
3779
3780 tries = 0;
3781 cr_tries = 0;
3782 channel_eq = false;
3783 for (;;) {
3784 uint8_t link_status[DP_LINK_STATUS_SIZE];
3785
3786 if (cr_tries > 5) {
3787 DRM_ERROR("failed to train DP, aborting\n");
3788 break;
3789 }
3790
3791 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3792 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3793 DRM_ERROR("failed to get link status\n");
3794 break;
3795 }
3796
3797 /* Make sure clock is still ok */
3798 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3799 intel_dp->train_set_valid = false;
3800 intel_dp_start_link_train(intel_dp);
3801 intel_dp_set_link_train(intel_dp, &DP,
3802 training_pattern |
3803 DP_LINK_SCRAMBLING_DISABLE);
3804 cr_tries++;
3805 continue;
3806 }
3807
3808 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3809 channel_eq = true;
3810 break;
3811 }
3812
3813 /* Try 5 times, then try clock recovery if that fails */
3814 if (tries > 5) {
3815 intel_dp->train_set_valid = false;
3816 intel_dp_start_link_train(intel_dp);
3817 intel_dp_set_link_train(intel_dp, &DP,
3818 training_pattern |
3819 DP_LINK_SCRAMBLING_DISABLE);
3820 tries = 0;
3821 cr_tries++;
3822 continue;
3823 }
3824
3825 /* Update training set as requested by target */
3826 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3827 DRM_ERROR("failed to update link training\n");
3828 break;
3829 }
3830 ++tries;
3831 }
3832
3833 intel_dp_set_idle_link_train(intel_dp);
3834
3835 intel_dp->DP = DP;
3836
3837 if (channel_eq) {
3838 intel_dp->train_set_valid = true;
3839 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3840 }
3841 }
3842
3843 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3844 {
3845 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3846 DP_TRAINING_PATTERN_DISABLE);
3847 }
3848
3849 static void
3850 intel_dp_link_down(struct intel_dp *intel_dp)
3851 {
3852 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3853 enum port port = intel_dig_port->port;
3854 struct drm_device *dev = intel_dig_port->base.base.dev;
3855 struct drm_i915_private *dev_priv = dev->dev_private;
3856 uint32_t DP = intel_dp->DP;
3857
3858 if (WARN_ON(HAS_DDI(dev)))
3859 return;
3860
3861 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3862 return;
3863
3864 DRM_DEBUG_KMS("\n");
3865
3866 if ((IS_GEN7(dev) && port == PORT_A) ||
3867 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3868 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3869 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3870 } else {
3871 if (IS_CHERRYVIEW(dev))
3872 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3873 else
3874 DP &= ~DP_LINK_TRAIN_MASK;
3875 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3876 }
3877 POSTING_READ(intel_dp->output_reg);
3878
3879 if (HAS_PCH_IBX(dev) &&
3880 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3881 /* Hardware workaround: leaving our transcoder select
3882 * set to transcoder B while it's off will prevent the
3883 * corresponding HDMI output on transcoder A.
3884 *
3885 * Combine this with another hardware workaround:
3886 * transcoder select bit can only be cleared while the
3887 * port is enabled.
3888 */
3889 DP &= ~DP_PIPEB_SELECT;
3890 I915_WRITE(intel_dp->output_reg, DP);
3891 POSTING_READ(intel_dp->output_reg);
3892 }
3893
3894 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3895 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3896 POSTING_READ(intel_dp->output_reg);
3897 msleep(intel_dp->panel_power_down_delay);
3898 }
3899
3900 static bool
3901 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3902 {
3903 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3904 struct drm_device *dev = dig_port->base.base.dev;
3905 struct drm_i915_private *dev_priv = dev->dev_private;
3906 uint8_t rev;
3907
3908 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3909 sizeof(intel_dp->dpcd)) < 0)
3910 return false; /* aux transfer failed */
3911
3912 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3913
3914 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3915 return false; /* DPCD not present */
3916
3917 /* Check if the panel supports PSR */
3918 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3919 if (is_edp(intel_dp)) {
3920 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3921 intel_dp->psr_dpcd,
3922 sizeof(intel_dp->psr_dpcd));
3923 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3924 dev_priv->psr.sink_support = true;
3925 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3926 }
3927
3928 if (INTEL_INFO(dev)->gen >= 9 &&
3929 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3930 uint8_t frame_sync_cap;
3931
3932 dev_priv->psr.sink_support = true;
3933 intel_dp_dpcd_read_wake(&intel_dp->aux,
3934 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3935 &frame_sync_cap, 1);
3936 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3937 /* PSR2 needs frame sync as well */
3938 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3939 DRM_DEBUG_KMS("PSR2 %s on sink",
3940 dev_priv->psr.psr2_support ? "supported" : "not supported");
3941 }
3942 }
3943
3944 /* Training Pattern 3 support, both source and sink */
3945 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3946 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3947 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3948 intel_dp->use_tps3 = true;
3949 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3950 } else
3951 intel_dp->use_tps3 = false;
3952
3953 /* Intermediate frequency support */
3954 if (is_edp(intel_dp) &&
3955 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3956 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3957 (rev >= 0x03)) { /* eDp v1.4 or higher */
3958 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3959 int i;
3960
3961 intel_dp_dpcd_read_wake(&intel_dp->aux,
3962 DP_SUPPORTED_LINK_RATES,
3963 sink_rates,
3964 sizeof(sink_rates));
3965
3966 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3967 int val = le16_to_cpu(sink_rates[i]);
3968
3969 if (val == 0)
3970 break;
3971
3972 /* Value read is in kHz while drm clock is saved in deca-kHz */
3973 intel_dp->sink_rates[i] = (val * 200) / 10;
3974 }
3975 intel_dp->num_sink_rates = i;
3976 }
3977
3978 intel_dp_print_rates(intel_dp);
3979
3980 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3981 DP_DWN_STRM_PORT_PRESENT))
3982 return true; /* native DP sink */
3983
3984 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3985 return true; /* no per-port downstream info */
3986
3987 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3988 intel_dp->downstream_ports,
3989 DP_MAX_DOWNSTREAM_PORTS) < 0)
3990 return false; /* downstream port status fetch failed */
3991
3992 return true;
3993 }
3994
3995 static void
3996 intel_dp_probe_oui(struct intel_dp *intel_dp)
3997 {
3998 u8 buf[3];
3999
4000 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4001 return;
4002
4003 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4004 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4005 buf[0], buf[1], buf[2]);
4006
4007 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4008 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4009 buf[0], buf[1], buf[2]);
4010 }
4011
4012 static bool
4013 intel_dp_probe_mst(struct intel_dp *intel_dp)
4014 {
4015 u8 buf[1];
4016
4017 if (!intel_dp->can_mst)
4018 return false;
4019
4020 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4021 return false;
4022
4023 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4024 if (buf[0] & DP_MST_CAP) {
4025 DRM_DEBUG_KMS("Sink is MST capable\n");
4026 intel_dp->is_mst = true;
4027 } else {
4028 DRM_DEBUG_KMS("Sink is not MST capable\n");
4029 intel_dp->is_mst = false;
4030 }
4031 }
4032
4033 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4034 return intel_dp->is_mst;
4035 }
4036
4037 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4038 {
4039 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4040 struct drm_device *dev = intel_dig_port->base.base.dev;
4041 struct intel_crtc *intel_crtc =
4042 to_intel_crtc(intel_dig_port->base.base.crtc);
4043 u8 buf;
4044 int test_crc_count;
4045 int attempts = 6;
4046
4047 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4048 return -EIO;
4049
4050 if (!(buf & DP_TEST_CRC_SUPPORTED))
4051 return -ENOTTY;
4052
4053 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4054 return -EIO;
4055
4056 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4057 buf | DP_TEST_SINK_START) < 0)
4058 return -EIO;
4059
4060 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4061 return -EIO;
4062 test_crc_count = buf & DP_TEST_COUNT_MASK;
4063
4064 do {
4065 if (drm_dp_dpcd_readb(&intel_dp->aux,
4066 DP_TEST_SINK_MISC, &buf) < 0)
4067 return -EIO;
4068 intel_wait_for_vblank(dev, intel_crtc->pipe);
4069 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4070
4071 if (attempts == 0) {
4072 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4073 return -ETIMEDOUT;
4074 }
4075
4076 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
4077 return -EIO;
4078
4079 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4080 return -EIO;
4081 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4082 buf & ~DP_TEST_SINK_START) < 0)
4083 return -EIO;
4084
4085 return 0;
4086 }
4087
4088 static bool
4089 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4090 {
4091 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4092 DP_DEVICE_SERVICE_IRQ_VECTOR,
4093 sink_irq_vector, 1) == 1;
4094 }
4095
4096 static bool
4097 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4098 {
4099 int ret;
4100
4101 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4102 DP_SINK_COUNT_ESI,
4103 sink_irq_vector, 14);
4104 if (ret != 14)
4105 return false;
4106
4107 return true;
4108 }
4109
4110 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4111 {
4112 uint8_t test_result = DP_TEST_ACK;
4113 return test_result;
4114 }
4115
4116 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4117 {
4118 uint8_t test_result = DP_TEST_NAK;
4119 return test_result;
4120 }
4121
4122 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4123 {
4124 uint8_t test_result = DP_TEST_NAK;
4125 struct intel_connector *intel_connector = intel_dp->attached_connector;
4126 struct drm_connector *connector = &intel_connector->base;
4127
4128 if (intel_connector->detect_edid == NULL ||
4129 connector->edid_corrupt ||
4130 intel_dp->aux.i2c_defer_count > 6) {
4131 /* Check EDID read for NACKs, DEFERs and corruption
4132 * (DP CTS 1.2 Core r1.1)
4133 * 4.2.2.4 : Failed EDID read, I2C_NAK
4134 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4135 * 4.2.2.6 : EDID corruption detected
4136 * Use failsafe mode for all cases
4137 */
4138 if (intel_dp->aux.i2c_nack_count > 0 ||
4139 intel_dp->aux.i2c_defer_count > 0)
4140 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4141 intel_dp->aux.i2c_nack_count,
4142 intel_dp->aux.i2c_defer_count);
4143 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4144 } else {
4145 if (!drm_dp_dpcd_write(&intel_dp->aux,
4146 DP_TEST_EDID_CHECKSUM,
4147 &intel_connector->detect_edid->checksum,
4148 1))
4149 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4150
4151 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4152 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4153 }
4154
4155 /* Set test active flag here so userspace doesn't interrupt things */
4156 intel_dp->compliance_test_active = 1;
4157
4158 return test_result;
4159 }
4160
4161 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4162 {
4163 uint8_t test_result = DP_TEST_NAK;
4164 return test_result;
4165 }
4166
4167 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4168 {
4169 uint8_t response = DP_TEST_NAK;
4170 uint8_t rxdata = 0;
4171 int status = 0;
4172
4173 intel_dp->compliance_test_active = 0;
4174 intel_dp->compliance_test_type = 0;
4175 intel_dp->compliance_test_data = 0;
4176
4177 intel_dp->aux.i2c_nack_count = 0;
4178 intel_dp->aux.i2c_defer_count = 0;
4179
4180 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4181 if (status <= 0) {
4182 DRM_DEBUG_KMS("Could not read test request from sink\n");
4183 goto update_status;
4184 }
4185
4186 switch (rxdata) {
4187 case DP_TEST_LINK_TRAINING:
4188 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4189 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4190 response = intel_dp_autotest_link_training(intel_dp);
4191 break;
4192 case DP_TEST_LINK_VIDEO_PATTERN:
4193 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4194 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4195 response = intel_dp_autotest_video_pattern(intel_dp);
4196 break;
4197 case DP_TEST_LINK_EDID_READ:
4198 DRM_DEBUG_KMS("EDID test requested\n");
4199 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4200 response = intel_dp_autotest_edid(intel_dp);
4201 break;
4202 case DP_TEST_LINK_PHY_TEST_PATTERN:
4203 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4204 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4205 response = intel_dp_autotest_phy_pattern(intel_dp);
4206 break;
4207 default:
4208 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4209 break;
4210 }
4211
4212 update_status:
4213 status = drm_dp_dpcd_write(&intel_dp->aux,
4214 DP_TEST_RESPONSE,
4215 &response, 1);
4216 if (status <= 0)
4217 DRM_DEBUG_KMS("Could not write test response to sink\n");
4218 }
4219
4220 static int
4221 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4222 {
4223 bool bret;
4224
4225 if (intel_dp->is_mst) {
4226 u8 esi[16] = { 0 };
4227 int ret = 0;
4228 int retry;
4229 bool handled;
4230 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4231 go_again:
4232 if (bret == true) {
4233
4234 /* check link status - esi[10] = 0x200c */
4235 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4236 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4237 intel_dp_start_link_train(intel_dp);
4238 intel_dp_complete_link_train(intel_dp);
4239 intel_dp_stop_link_train(intel_dp);
4240 }
4241
4242 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4243 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4244
4245 if (handled) {
4246 for (retry = 0; retry < 3; retry++) {
4247 int wret;
4248 wret = drm_dp_dpcd_write(&intel_dp->aux,
4249 DP_SINK_COUNT_ESI+1,
4250 &esi[1], 3);
4251 if (wret == 3) {
4252 break;
4253 }
4254 }
4255
4256 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4257 if (bret == true) {
4258 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4259 goto go_again;
4260 }
4261 } else
4262 ret = 0;
4263
4264 return ret;
4265 } else {
4266 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4267 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4268 intel_dp->is_mst = false;
4269 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4270 /* send a hotplug event */
4271 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4272 }
4273 }
4274 return -EINVAL;
4275 }
4276
4277 /*
4278 * According to DP spec
4279 * 5.1.2:
4280 * 1. Read DPCD
4281 * 2. Configure link according to Receiver Capabilities
4282 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4283 * 4. Check link status on receipt of hot-plug interrupt
4284 */
4285 static void
4286 intel_dp_check_link_status(struct intel_dp *intel_dp)
4287 {
4288 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4289 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4290 u8 sink_irq_vector;
4291 u8 link_status[DP_LINK_STATUS_SIZE];
4292
4293 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4294
4295 if (!intel_encoder->connectors_active)
4296 return;
4297
4298 if (WARN_ON(!intel_encoder->base.crtc))
4299 return;
4300
4301 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4302 return;
4303
4304 /* Try to read receiver status if the link appears to be up */
4305 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4306 return;
4307 }
4308
4309 /* Now read the DPCD to see if it's actually running */
4310 if (!intel_dp_get_dpcd(intel_dp)) {
4311 return;
4312 }
4313
4314 /* Try to read the source of the interrupt */
4315 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4316 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4317 /* Clear interrupt source */
4318 drm_dp_dpcd_writeb(&intel_dp->aux,
4319 DP_DEVICE_SERVICE_IRQ_VECTOR,
4320 sink_irq_vector);
4321
4322 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4323 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4324 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4325 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4326 }
4327
4328 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4329 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4330 intel_encoder->base.name);
4331 intel_dp_start_link_train(intel_dp);
4332 intel_dp_complete_link_train(intel_dp);
4333 intel_dp_stop_link_train(intel_dp);
4334 }
4335 }
4336
4337 /* XXX this is probably wrong for multiple downstream ports */
4338 static enum drm_connector_status
4339 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4340 {
4341 uint8_t *dpcd = intel_dp->dpcd;
4342 uint8_t type;
4343
4344 if (!intel_dp_get_dpcd(intel_dp))
4345 return connector_status_disconnected;
4346
4347 /* if there's no downstream port, we're done */
4348 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4349 return connector_status_connected;
4350
4351 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4352 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4353 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4354 uint8_t reg;
4355
4356 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4357 &reg, 1) < 0)
4358 return connector_status_unknown;
4359
4360 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4361 : connector_status_disconnected;
4362 }
4363
4364 /* If no HPD, poke DDC gently */
4365 if (drm_probe_ddc(&intel_dp->aux.ddc))
4366 return connector_status_connected;
4367
4368 /* Well we tried, say unknown for unreliable port types */
4369 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4370 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4371 if (type == DP_DS_PORT_TYPE_VGA ||
4372 type == DP_DS_PORT_TYPE_NON_EDID)
4373 return connector_status_unknown;
4374 } else {
4375 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4376 DP_DWN_STRM_PORT_TYPE_MASK;
4377 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4378 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4379 return connector_status_unknown;
4380 }
4381
4382 /* Anything else is out of spec, warn and ignore */
4383 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4384 return connector_status_disconnected;
4385 }
4386
4387 static enum drm_connector_status
4388 edp_detect(struct intel_dp *intel_dp)
4389 {
4390 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4391 enum drm_connector_status status;
4392
4393 status = intel_panel_detect(dev);
4394 if (status == connector_status_unknown)
4395 status = connector_status_connected;
4396
4397 return status;
4398 }
4399
4400 static enum drm_connector_status
4401 ironlake_dp_detect(struct intel_dp *intel_dp)
4402 {
4403 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4404 struct drm_i915_private *dev_priv = dev->dev_private;
4405 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4406
4407 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4408 return connector_status_disconnected;
4409
4410 return intel_dp_detect_dpcd(intel_dp);
4411 }
4412
4413 static int g4x_digital_port_connected(struct drm_device *dev,
4414 struct intel_digital_port *intel_dig_port)
4415 {
4416 struct drm_i915_private *dev_priv = dev->dev_private;
4417 uint32_t bit;
4418
4419 if (IS_VALLEYVIEW(dev)) {
4420 switch (intel_dig_port->port) {
4421 case PORT_B:
4422 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4423 break;
4424 case PORT_C:
4425 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4426 break;
4427 case PORT_D:
4428 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4429 break;
4430 default:
4431 return -EINVAL;
4432 }
4433 } else {
4434 switch (intel_dig_port->port) {
4435 case PORT_B:
4436 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4437 break;
4438 case PORT_C:
4439 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4440 break;
4441 case PORT_D:
4442 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4443 break;
4444 default:
4445 return -EINVAL;
4446 }
4447 }
4448
4449 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4450 return 0;
4451 return 1;
4452 }
4453
4454 static enum drm_connector_status
4455 g4x_dp_detect(struct intel_dp *intel_dp)
4456 {
4457 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4458 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4459 int ret;
4460
4461 /* Can't disconnect eDP, but you can close the lid... */
4462 if (is_edp(intel_dp)) {
4463 enum drm_connector_status status;
4464
4465 status = intel_panel_detect(dev);
4466 if (status == connector_status_unknown)
4467 status = connector_status_connected;
4468 return status;
4469 }
4470
4471 ret = g4x_digital_port_connected(dev, intel_dig_port);
4472 if (ret == -EINVAL)
4473 return connector_status_unknown;
4474 else if (ret == 0)
4475 return connector_status_disconnected;
4476
4477 return intel_dp_detect_dpcd(intel_dp);
4478 }
4479
4480 static struct edid *
4481 intel_dp_get_edid(struct intel_dp *intel_dp)
4482 {
4483 struct intel_connector *intel_connector = intel_dp->attached_connector;
4484
4485 /* use cached edid if we have one */
4486 if (intel_connector->edid) {
4487 /* invalid edid */
4488 if (IS_ERR(intel_connector->edid))
4489 return NULL;
4490
4491 return drm_edid_duplicate(intel_connector->edid);
4492 } else
4493 return drm_get_edid(&intel_connector->base,
4494 &intel_dp->aux.ddc);
4495 }
4496
4497 static void
4498 intel_dp_set_edid(struct intel_dp *intel_dp)
4499 {
4500 struct intel_connector *intel_connector = intel_dp->attached_connector;
4501 struct edid *edid;
4502
4503 edid = intel_dp_get_edid(intel_dp);
4504 intel_connector->detect_edid = edid;
4505
4506 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4507 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4508 else
4509 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4510 }
4511
4512 static void
4513 intel_dp_unset_edid(struct intel_dp *intel_dp)
4514 {
4515 struct intel_connector *intel_connector = intel_dp->attached_connector;
4516
4517 kfree(intel_connector->detect_edid);
4518 intel_connector->detect_edid = NULL;
4519
4520 intel_dp->has_audio = false;
4521 }
4522
4523 static enum intel_display_power_domain
4524 intel_dp_power_get(struct intel_dp *dp)
4525 {
4526 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4527 enum intel_display_power_domain power_domain;
4528
4529 power_domain = intel_display_port_power_domain(encoder);
4530 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4531
4532 return power_domain;
4533 }
4534
4535 static void
4536 intel_dp_power_put(struct intel_dp *dp,
4537 enum intel_display_power_domain power_domain)
4538 {
4539 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4540 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4541 }
4542
4543 static enum drm_connector_status
4544 intel_dp_detect(struct drm_connector *connector, bool force)
4545 {
4546 struct intel_dp *intel_dp = intel_attached_dp(connector);
4547 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4548 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4549 struct drm_device *dev = connector->dev;
4550 enum drm_connector_status status;
4551 enum intel_display_power_domain power_domain;
4552 bool ret;
4553 u8 sink_irq_vector;
4554
4555 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4556 connector->base.id, connector->name);
4557 intel_dp_unset_edid(intel_dp);
4558
4559 if (intel_dp->is_mst) {
4560 /* MST devices are disconnected from a monitor POV */
4561 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4562 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4563 return connector_status_disconnected;
4564 }
4565
4566 power_domain = intel_dp_power_get(intel_dp);
4567
4568 /* Can't disconnect eDP, but you can close the lid... */
4569 if (is_edp(intel_dp))
4570 status = edp_detect(intel_dp);
4571 else if (HAS_PCH_SPLIT(dev))
4572 status = ironlake_dp_detect(intel_dp);
4573 else
4574 status = g4x_dp_detect(intel_dp);
4575 if (status != connector_status_connected)
4576 goto out;
4577
4578 intel_dp_probe_oui(intel_dp);
4579
4580 ret = intel_dp_probe_mst(intel_dp);
4581 if (ret) {
4582 /* if we are in MST mode then this connector
4583 won't appear connected or have anything with EDID on it */
4584 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4585 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4586 status = connector_status_disconnected;
4587 goto out;
4588 }
4589
4590 intel_dp_set_edid(intel_dp);
4591
4592 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4593 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4594 status = connector_status_connected;
4595
4596 /* Try to read the source of the interrupt */
4597 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4598 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4599 /* Clear interrupt source */
4600 drm_dp_dpcd_writeb(&intel_dp->aux,
4601 DP_DEVICE_SERVICE_IRQ_VECTOR,
4602 sink_irq_vector);
4603
4604 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4605 intel_dp_handle_test_request(intel_dp);
4606 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4607 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4608 }
4609
4610 out:
4611 intel_dp_power_put(intel_dp, power_domain);
4612 return status;
4613 }
4614
4615 static void
4616 intel_dp_force(struct drm_connector *connector)
4617 {
4618 struct intel_dp *intel_dp = intel_attached_dp(connector);
4619 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4620 enum intel_display_power_domain power_domain;
4621
4622 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4623 connector->base.id, connector->name);
4624 intel_dp_unset_edid(intel_dp);
4625
4626 if (connector->status != connector_status_connected)
4627 return;
4628
4629 power_domain = intel_dp_power_get(intel_dp);
4630
4631 intel_dp_set_edid(intel_dp);
4632
4633 intel_dp_power_put(intel_dp, power_domain);
4634
4635 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4636 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4637 }
4638
4639 static int intel_dp_get_modes(struct drm_connector *connector)
4640 {
4641 struct intel_connector *intel_connector = to_intel_connector(connector);
4642 struct edid *edid;
4643
4644 edid = intel_connector->detect_edid;
4645 if (edid) {
4646 int ret = intel_connector_update_modes(connector, edid);
4647 if (ret)
4648 return ret;
4649 }
4650
4651 /* if eDP has no EDID, fall back to fixed mode */
4652 if (is_edp(intel_attached_dp(connector)) &&
4653 intel_connector->panel.fixed_mode) {
4654 struct drm_display_mode *mode;
4655
4656 mode = drm_mode_duplicate(connector->dev,
4657 intel_connector->panel.fixed_mode);
4658 if (mode) {
4659 drm_mode_probed_add(connector, mode);
4660 return 1;
4661 }
4662 }
4663
4664 return 0;
4665 }
4666
4667 static bool
4668 intel_dp_detect_audio(struct drm_connector *connector)
4669 {
4670 bool has_audio = false;
4671 struct edid *edid;
4672
4673 edid = to_intel_connector(connector)->detect_edid;
4674 if (edid)
4675 has_audio = drm_detect_monitor_audio(edid);
4676
4677 return has_audio;
4678 }
4679
4680 static int
4681 intel_dp_set_property(struct drm_connector *connector,
4682 struct drm_property *property,
4683 uint64_t val)
4684 {
4685 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4686 struct intel_connector *intel_connector = to_intel_connector(connector);
4687 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4688 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4689 int ret;
4690
4691 ret = drm_object_property_set_value(&connector->base, property, val);
4692 if (ret)
4693 return ret;
4694
4695 if (property == dev_priv->force_audio_property) {
4696 int i = val;
4697 bool has_audio;
4698
4699 if (i == intel_dp->force_audio)
4700 return 0;
4701
4702 intel_dp->force_audio = i;
4703
4704 if (i == HDMI_AUDIO_AUTO)
4705 has_audio = intel_dp_detect_audio(connector);
4706 else
4707 has_audio = (i == HDMI_AUDIO_ON);
4708
4709 if (has_audio == intel_dp->has_audio)
4710 return 0;
4711
4712 intel_dp->has_audio = has_audio;
4713 goto done;
4714 }
4715
4716 if (property == dev_priv->broadcast_rgb_property) {
4717 bool old_auto = intel_dp->color_range_auto;
4718 uint32_t old_range = intel_dp->color_range;
4719
4720 switch (val) {
4721 case INTEL_BROADCAST_RGB_AUTO:
4722 intel_dp->color_range_auto = true;
4723 break;
4724 case INTEL_BROADCAST_RGB_FULL:
4725 intel_dp->color_range_auto = false;
4726 intel_dp->color_range = 0;
4727 break;
4728 case INTEL_BROADCAST_RGB_LIMITED:
4729 intel_dp->color_range_auto = false;
4730 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4731 break;
4732 default:
4733 return -EINVAL;
4734 }
4735
4736 if (old_auto == intel_dp->color_range_auto &&
4737 old_range == intel_dp->color_range)
4738 return 0;
4739
4740 goto done;
4741 }
4742
4743 if (is_edp(intel_dp) &&
4744 property == connector->dev->mode_config.scaling_mode_property) {
4745 if (val == DRM_MODE_SCALE_NONE) {
4746 DRM_DEBUG_KMS("no scaling not supported\n");
4747 return -EINVAL;
4748 }
4749
4750 if (intel_connector->panel.fitting_mode == val) {
4751 /* the eDP scaling property is not changed */
4752 return 0;
4753 }
4754 intel_connector->panel.fitting_mode = val;
4755
4756 goto done;
4757 }
4758
4759 return -EINVAL;
4760
4761 done:
4762 if (intel_encoder->base.crtc)
4763 intel_crtc_restore_mode(intel_encoder->base.crtc);
4764
4765 return 0;
4766 }
4767
4768 static void
4769 intel_dp_connector_destroy(struct drm_connector *connector)
4770 {
4771 struct intel_connector *intel_connector = to_intel_connector(connector);
4772
4773 kfree(intel_connector->detect_edid);
4774
4775 if (!IS_ERR_OR_NULL(intel_connector->edid))
4776 kfree(intel_connector->edid);
4777
4778 /* Can't call is_edp() since the encoder may have been destroyed
4779 * already. */
4780 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4781 intel_panel_fini(&intel_connector->panel);
4782
4783 drm_connector_cleanup(connector);
4784 kfree(connector);
4785 }
4786
4787 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4788 {
4789 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4790 struct intel_dp *intel_dp = &intel_dig_port->dp;
4791
4792 drm_dp_aux_unregister(&intel_dp->aux);
4793 intel_dp_mst_encoder_cleanup(intel_dig_port);
4794 if (is_edp(intel_dp)) {
4795 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4796 /*
4797 * vdd might still be enabled do to the delayed vdd off.
4798 * Make sure vdd is actually turned off here.
4799 */
4800 pps_lock(intel_dp);
4801 edp_panel_vdd_off_sync(intel_dp);
4802 pps_unlock(intel_dp);
4803
4804 if (intel_dp->edp_notifier.notifier_call) {
4805 unregister_reboot_notifier(&intel_dp->edp_notifier);
4806 intel_dp->edp_notifier.notifier_call = NULL;
4807 }
4808 }
4809 drm_encoder_cleanup(encoder);
4810 kfree(intel_dig_port);
4811 }
4812
4813 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4814 {
4815 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4816
4817 if (!is_edp(intel_dp))
4818 return;
4819
4820 /*
4821 * vdd might still be enabled do to the delayed vdd off.
4822 * Make sure vdd is actually turned off here.
4823 */
4824 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4825 pps_lock(intel_dp);
4826 edp_panel_vdd_off_sync(intel_dp);
4827 pps_unlock(intel_dp);
4828 }
4829
4830 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4831 {
4832 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4833 struct drm_device *dev = intel_dig_port->base.base.dev;
4834 struct drm_i915_private *dev_priv = dev->dev_private;
4835 enum intel_display_power_domain power_domain;
4836
4837 lockdep_assert_held(&dev_priv->pps_mutex);
4838
4839 if (!edp_have_panel_vdd(intel_dp))
4840 return;
4841
4842 /*
4843 * The VDD bit needs a power domain reference, so if the bit is
4844 * already enabled when we boot or resume, grab this reference and
4845 * schedule a vdd off, so we don't hold on to the reference
4846 * indefinitely.
4847 */
4848 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4849 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4850 intel_display_power_get(dev_priv, power_domain);
4851
4852 edp_panel_vdd_schedule_off(intel_dp);
4853 }
4854
4855 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4856 {
4857 struct intel_dp *intel_dp;
4858
4859 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4860 return;
4861
4862 intel_dp = enc_to_intel_dp(encoder);
4863
4864 pps_lock(intel_dp);
4865
4866 /*
4867 * Read out the current power sequencer assignment,
4868 * in case the BIOS did something with it.
4869 */
4870 if (IS_VALLEYVIEW(encoder->dev))
4871 vlv_initial_power_sequencer_setup(intel_dp);
4872
4873 intel_edp_panel_vdd_sanitize(intel_dp);
4874
4875 pps_unlock(intel_dp);
4876 }
4877
4878 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4879 .dpms = intel_connector_dpms,
4880 .detect = intel_dp_detect,
4881 .force = intel_dp_force,
4882 .fill_modes = drm_helper_probe_single_connector_modes,
4883 .set_property = intel_dp_set_property,
4884 .atomic_get_property = intel_connector_atomic_get_property,
4885 .destroy = intel_dp_connector_destroy,
4886 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4887 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4888 };
4889
4890 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4891 .get_modes = intel_dp_get_modes,
4892 .mode_valid = intel_dp_mode_valid,
4893 .best_encoder = intel_best_encoder,
4894 };
4895
4896 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4897 .reset = intel_dp_encoder_reset,
4898 .destroy = intel_dp_encoder_destroy,
4899 };
4900
4901 void
4902 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4903 {
4904 return;
4905 }
4906
4907 enum irqreturn
4908 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4909 {
4910 struct intel_dp *intel_dp = &intel_dig_port->dp;
4911 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4912 struct drm_device *dev = intel_dig_port->base.base.dev;
4913 struct drm_i915_private *dev_priv = dev->dev_private;
4914 enum intel_display_power_domain power_domain;
4915 enum irqreturn ret = IRQ_NONE;
4916
4917 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4918 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4919
4920 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4921 /*
4922 * vdd off can generate a long pulse on eDP which
4923 * would require vdd on to handle it, and thus we
4924 * would end up in an endless cycle of
4925 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4926 */
4927 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4928 port_name(intel_dig_port->port));
4929 return IRQ_HANDLED;
4930 }
4931
4932 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4933 port_name(intel_dig_port->port),
4934 long_hpd ? "long" : "short");
4935
4936 power_domain = intel_display_port_power_domain(intel_encoder);
4937 intel_display_power_get(dev_priv, power_domain);
4938
4939 if (long_hpd) {
4940 /* indicate that we need to restart link training */
4941 intel_dp->train_set_valid = false;
4942
4943 if (HAS_PCH_SPLIT(dev)) {
4944 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4945 goto mst_fail;
4946 } else {
4947 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4948 goto mst_fail;
4949 }
4950
4951 if (!intel_dp_get_dpcd(intel_dp)) {
4952 goto mst_fail;
4953 }
4954
4955 intel_dp_probe_oui(intel_dp);
4956
4957 if (!intel_dp_probe_mst(intel_dp))
4958 goto mst_fail;
4959
4960 } else {
4961 if (intel_dp->is_mst) {
4962 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4963 goto mst_fail;
4964 }
4965
4966 if (!intel_dp->is_mst) {
4967 /*
4968 * we'll check the link status via the normal hot plug path later -
4969 * but for short hpds we should check it now
4970 */
4971 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4972 intel_dp_check_link_status(intel_dp);
4973 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4974 }
4975 }
4976
4977 ret = IRQ_HANDLED;
4978
4979 goto put_power;
4980 mst_fail:
4981 /* if we were in MST mode, and device is not there get out of MST mode */
4982 if (intel_dp->is_mst) {
4983 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4984 intel_dp->is_mst = false;
4985 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4986 }
4987 put_power:
4988 intel_display_power_put(dev_priv, power_domain);
4989
4990 return ret;
4991 }
4992
4993 /* Return which DP Port should be selected for Transcoder DP control */
4994 int
4995 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4996 {
4997 struct drm_device *dev = crtc->dev;
4998 struct intel_encoder *intel_encoder;
4999 struct intel_dp *intel_dp;
5000
5001 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5002 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5003
5004 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5005 intel_encoder->type == INTEL_OUTPUT_EDP)
5006 return intel_dp->output_reg;
5007 }
5008
5009 return -1;
5010 }
5011
5012 /* check the VBT to see whether the eDP is on DP-D port */
5013 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5014 {
5015 struct drm_i915_private *dev_priv = dev->dev_private;
5016 union child_device_config *p_child;
5017 int i;
5018 static const short port_mapping[] = {
5019 [PORT_B] = PORT_IDPB,
5020 [PORT_C] = PORT_IDPC,
5021 [PORT_D] = PORT_IDPD,
5022 };
5023
5024 if (port == PORT_A)
5025 return true;
5026
5027 if (!dev_priv->vbt.child_dev_num)
5028 return false;
5029
5030 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5031 p_child = dev_priv->vbt.child_dev + i;
5032
5033 if (p_child->common.dvo_port == port_mapping[port] &&
5034 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5035 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5036 return true;
5037 }
5038 return false;
5039 }
5040
5041 void
5042 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5043 {
5044 struct intel_connector *intel_connector = to_intel_connector(connector);
5045
5046 intel_attach_force_audio_property(connector);
5047 intel_attach_broadcast_rgb_property(connector);
5048 intel_dp->color_range_auto = true;
5049
5050 if (is_edp(intel_dp)) {
5051 drm_mode_create_scaling_mode_property(connector->dev);
5052 drm_object_attach_property(
5053 &connector->base,
5054 connector->dev->mode_config.scaling_mode_property,
5055 DRM_MODE_SCALE_ASPECT);
5056 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5057 }
5058 }
5059
5060 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5061 {
5062 intel_dp->last_power_cycle = jiffies;
5063 intel_dp->last_power_on = jiffies;
5064 intel_dp->last_backlight_off = jiffies;
5065 }
5066
5067 static void
5068 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5069 struct intel_dp *intel_dp)
5070 {
5071 struct drm_i915_private *dev_priv = dev->dev_private;
5072 struct edp_power_seq cur, vbt, spec,
5073 *final = &intel_dp->pps_delays;
5074 u32 pp_on, pp_off, pp_div, pp;
5075 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5076
5077 lockdep_assert_held(&dev_priv->pps_mutex);
5078
5079 /* already initialized? */
5080 if (final->t11_t12 != 0)
5081 return;
5082
5083 if (HAS_PCH_SPLIT(dev)) {
5084 pp_ctrl_reg = PCH_PP_CONTROL;
5085 pp_on_reg = PCH_PP_ON_DELAYS;
5086 pp_off_reg = PCH_PP_OFF_DELAYS;
5087 pp_div_reg = PCH_PP_DIVISOR;
5088 } else {
5089 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5090
5091 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5092 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5093 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5094 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5095 }
5096
5097 /* Workaround: Need to write PP_CONTROL with the unlock key as
5098 * the very first thing. */
5099 pp = ironlake_get_pp_control(intel_dp);
5100 I915_WRITE(pp_ctrl_reg, pp);
5101
5102 pp_on = I915_READ(pp_on_reg);
5103 pp_off = I915_READ(pp_off_reg);
5104 pp_div = I915_READ(pp_div_reg);
5105
5106 /* Pull timing values out of registers */
5107 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5108 PANEL_POWER_UP_DELAY_SHIFT;
5109
5110 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5111 PANEL_LIGHT_ON_DELAY_SHIFT;
5112
5113 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5114 PANEL_LIGHT_OFF_DELAY_SHIFT;
5115
5116 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5117 PANEL_POWER_DOWN_DELAY_SHIFT;
5118
5119 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5120 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5121
5122 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5123 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5124
5125 vbt = dev_priv->vbt.edp_pps;
5126
5127 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5128 * our hw here, which are all in 100usec. */
5129 spec.t1_t3 = 210 * 10;
5130 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5131 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5132 spec.t10 = 500 * 10;
5133 /* This one is special and actually in units of 100ms, but zero
5134 * based in the hw (so we need to add 100 ms). But the sw vbt
5135 * table multiplies it with 1000 to make it in units of 100usec,
5136 * too. */
5137 spec.t11_t12 = (510 + 100) * 10;
5138
5139 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5140 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5141
5142 /* Use the max of the register settings and vbt. If both are
5143 * unset, fall back to the spec limits. */
5144 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5145 spec.field : \
5146 max(cur.field, vbt.field))
5147 assign_final(t1_t3);
5148 assign_final(t8);
5149 assign_final(t9);
5150 assign_final(t10);
5151 assign_final(t11_t12);
5152 #undef assign_final
5153
5154 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5155 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5156 intel_dp->backlight_on_delay = get_delay(t8);
5157 intel_dp->backlight_off_delay = get_delay(t9);
5158 intel_dp->panel_power_down_delay = get_delay(t10);
5159 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5160 #undef get_delay
5161
5162 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5163 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5164 intel_dp->panel_power_cycle_delay);
5165
5166 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5167 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5168 }
5169
5170 static void
5171 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5172 struct intel_dp *intel_dp)
5173 {
5174 struct drm_i915_private *dev_priv = dev->dev_private;
5175 u32 pp_on, pp_off, pp_div, port_sel = 0;
5176 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5177 int pp_on_reg, pp_off_reg, pp_div_reg;
5178 enum port port = dp_to_dig_port(intel_dp)->port;
5179 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5180
5181 lockdep_assert_held(&dev_priv->pps_mutex);
5182
5183 if (HAS_PCH_SPLIT(dev)) {
5184 pp_on_reg = PCH_PP_ON_DELAYS;
5185 pp_off_reg = PCH_PP_OFF_DELAYS;
5186 pp_div_reg = PCH_PP_DIVISOR;
5187 } else {
5188 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5189
5190 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5191 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5192 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5193 }
5194
5195 /*
5196 * And finally store the new values in the power sequencer. The
5197 * backlight delays are set to 1 because we do manual waits on them. For
5198 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5199 * we'll end up waiting for the backlight off delay twice: once when we
5200 * do the manual sleep, and once when we disable the panel and wait for
5201 * the PP_STATUS bit to become zero.
5202 */
5203 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5204 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5205 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5206 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5207 /* Compute the divisor for the pp clock, simply match the Bspec
5208 * formula. */
5209 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5210 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5211 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5212
5213 /* Haswell doesn't have any port selection bits for the panel
5214 * power sequencer any more. */
5215 if (IS_VALLEYVIEW(dev)) {
5216 port_sel = PANEL_PORT_SELECT_VLV(port);
5217 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5218 if (port == PORT_A)
5219 port_sel = PANEL_PORT_SELECT_DPA;
5220 else
5221 port_sel = PANEL_PORT_SELECT_DPD;
5222 }
5223
5224 pp_on |= port_sel;
5225
5226 I915_WRITE(pp_on_reg, pp_on);
5227 I915_WRITE(pp_off_reg, pp_off);
5228 I915_WRITE(pp_div_reg, pp_div);
5229
5230 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5231 I915_READ(pp_on_reg),
5232 I915_READ(pp_off_reg),
5233 I915_READ(pp_div_reg));
5234 }
5235
5236 /**
5237 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5238 * @dev: DRM device
5239 * @refresh_rate: RR to be programmed
5240 *
5241 * This function gets called when refresh rate (RR) has to be changed from
5242 * one frequency to another. Switches can be between high and low RR
5243 * supported by the panel or to any other RR based on media playback (in
5244 * this case, RR value needs to be passed from user space).
5245 *
5246 * The caller of this function needs to take a lock on dev_priv->drrs.
5247 */
5248 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5249 {
5250 struct drm_i915_private *dev_priv = dev->dev_private;
5251 struct intel_encoder *encoder;
5252 struct intel_digital_port *dig_port = NULL;
5253 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5254 struct intel_crtc_state *config = NULL;
5255 struct intel_crtc *intel_crtc = NULL;
5256 u32 reg, val;
5257 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5258
5259 if (refresh_rate <= 0) {
5260 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5261 return;
5262 }
5263
5264 if (intel_dp == NULL) {
5265 DRM_DEBUG_KMS("DRRS not supported.\n");
5266 return;
5267 }
5268
5269 /*
5270 * FIXME: This needs proper synchronization with psr state for some
5271 * platforms that cannot have PSR and DRRS enabled at the same time.
5272 */
5273
5274 dig_port = dp_to_dig_port(intel_dp);
5275 encoder = &dig_port->base;
5276 intel_crtc = to_intel_crtc(encoder->base.crtc);
5277
5278 if (!intel_crtc) {
5279 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5280 return;
5281 }
5282
5283 config = intel_crtc->config;
5284
5285 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5286 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5287 return;
5288 }
5289
5290 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5291 refresh_rate)
5292 index = DRRS_LOW_RR;
5293
5294 if (index == dev_priv->drrs.refresh_rate_type) {
5295 DRM_DEBUG_KMS(
5296 "DRRS requested for previously set RR...ignoring\n");
5297 return;
5298 }
5299
5300 if (!intel_crtc->active) {
5301 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5302 return;
5303 }
5304
5305 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5306 switch (index) {
5307 case DRRS_HIGH_RR:
5308 intel_dp_set_m_n(intel_crtc, M1_N1);
5309 break;
5310 case DRRS_LOW_RR:
5311 intel_dp_set_m_n(intel_crtc, M2_N2);
5312 break;
5313 case DRRS_MAX_RR:
5314 default:
5315 DRM_ERROR("Unsupported refreshrate type\n");
5316 }
5317 } else if (INTEL_INFO(dev)->gen > 6) {
5318 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5319 val = I915_READ(reg);
5320
5321 if (index > DRRS_HIGH_RR) {
5322 if (IS_VALLEYVIEW(dev))
5323 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5324 else
5325 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5326 } else {
5327 if (IS_VALLEYVIEW(dev))
5328 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5329 else
5330 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5331 }
5332 I915_WRITE(reg, val);
5333 }
5334
5335 dev_priv->drrs.refresh_rate_type = index;
5336
5337 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5338 }
5339
5340 /**
5341 * intel_edp_drrs_enable - init drrs struct if supported
5342 * @intel_dp: DP struct
5343 *
5344 * Initializes frontbuffer_bits and drrs.dp
5345 */
5346 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5347 {
5348 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5349 struct drm_i915_private *dev_priv = dev->dev_private;
5350 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5351 struct drm_crtc *crtc = dig_port->base.base.crtc;
5352 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5353
5354 if (!intel_crtc->config->has_drrs) {
5355 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5356 return;
5357 }
5358
5359 mutex_lock(&dev_priv->drrs.mutex);
5360 if (WARN_ON(dev_priv->drrs.dp)) {
5361 DRM_ERROR("DRRS already enabled\n");
5362 goto unlock;
5363 }
5364
5365 dev_priv->drrs.busy_frontbuffer_bits = 0;
5366
5367 dev_priv->drrs.dp = intel_dp;
5368
5369 unlock:
5370 mutex_unlock(&dev_priv->drrs.mutex);
5371 }
5372
5373 /**
5374 * intel_edp_drrs_disable - Disable DRRS
5375 * @intel_dp: DP struct
5376 *
5377 */
5378 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5379 {
5380 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5381 struct drm_i915_private *dev_priv = dev->dev_private;
5382 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5383 struct drm_crtc *crtc = dig_port->base.base.crtc;
5384 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5385
5386 if (!intel_crtc->config->has_drrs)
5387 return;
5388
5389 mutex_lock(&dev_priv->drrs.mutex);
5390 if (!dev_priv->drrs.dp) {
5391 mutex_unlock(&dev_priv->drrs.mutex);
5392 return;
5393 }
5394
5395 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5396 intel_dp_set_drrs_state(dev_priv->dev,
5397 intel_dp->attached_connector->panel.
5398 fixed_mode->vrefresh);
5399
5400 dev_priv->drrs.dp = NULL;
5401 mutex_unlock(&dev_priv->drrs.mutex);
5402
5403 cancel_delayed_work_sync(&dev_priv->drrs.work);
5404 }
5405
5406 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5407 {
5408 struct drm_i915_private *dev_priv =
5409 container_of(work, typeof(*dev_priv), drrs.work.work);
5410 struct intel_dp *intel_dp;
5411
5412 mutex_lock(&dev_priv->drrs.mutex);
5413
5414 intel_dp = dev_priv->drrs.dp;
5415
5416 if (!intel_dp)
5417 goto unlock;
5418
5419 /*
5420 * The delayed work can race with an invalidate hence we need to
5421 * recheck.
5422 */
5423
5424 if (dev_priv->drrs.busy_frontbuffer_bits)
5425 goto unlock;
5426
5427 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5428 intel_dp_set_drrs_state(dev_priv->dev,
5429 intel_dp->attached_connector->panel.
5430 downclock_mode->vrefresh);
5431
5432 unlock:
5433 mutex_unlock(&dev_priv->drrs.mutex);
5434 }
5435
5436 /**
5437 * intel_edp_drrs_invalidate - Invalidate DRRS
5438 * @dev: DRM device
5439 * @frontbuffer_bits: frontbuffer plane tracking bits
5440 *
5441 * When there is a disturbance on screen (due to cursor movement/time
5442 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5443 * high RR.
5444 *
5445 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5446 */
5447 void intel_edp_drrs_invalidate(struct drm_device *dev,
5448 unsigned frontbuffer_bits)
5449 {
5450 struct drm_i915_private *dev_priv = dev->dev_private;
5451 struct drm_crtc *crtc;
5452 enum pipe pipe;
5453
5454 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5455 return;
5456
5457 cancel_delayed_work(&dev_priv->drrs.work);
5458
5459 mutex_lock(&dev_priv->drrs.mutex);
5460 if (!dev_priv->drrs.dp) {
5461 mutex_unlock(&dev_priv->drrs.mutex);
5462 return;
5463 }
5464
5465 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5466 pipe = to_intel_crtc(crtc)->pipe;
5467
5468 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5469 intel_dp_set_drrs_state(dev_priv->dev,
5470 dev_priv->drrs.dp->attached_connector->panel.
5471 fixed_mode->vrefresh);
5472 }
5473
5474 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5475
5476 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5477 mutex_unlock(&dev_priv->drrs.mutex);
5478 }
5479
5480 /**
5481 * intel_edp_drrs_flush - Flush DRRS
5482 * @dev: DRM device
5483 * @frontbuffer_bits: frontbuffer plane tracking bits
5484 *
5485 * When there is no movement on screen, DRRS work can be scheduled.
5486 * This DRRS work is responsible for setting relevant registers after a
5487 * timeout of 1 second.
5488 *
5489 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5490 */
5491 void intel_edp_drrs_flush(struct drm_device *dev,
5492 unsigned frontbuffer_bits)
5493 {
5494 struct drm_i915_private *dev_priv = dev->dev_private;
5495 struct drm_crtc *crtc;
5496 enum pipe pipe;
5497
5498 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5499 return;
5500
5501 cancel_delayed_work(&dev_priv->drrs.work);
5502
5503 mutex_lock(&dev_priv->drrs.mutex);
5504 if (!dev_priv->drrs.dp) {
5505 mutex_unlock(&dev_priv->drrs.mutex);
5506 return;
5507 }
5508
5509 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5510 pipe = to_intel_crtc(crtc)->pipe;
5511 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5512
5513 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5514 !dev_priv->drrs.busy_frontbuffer_bits)
5515 schedule_delayed_work(&dev_priv->drrs.work,
5516 msecs_to_jiffies(1000));
5517 mutex_unlock(&dev_priv->drrs.mutex);
5518 }
5519
5520 /**
5521 * DOC: Display Refresh Rate Switching (DRRS)
5522 *
5523 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5524 * which enables swtching between low and high refresh rates,
5525 * dynamically, based on the usage scenario. This feature is applicable
5526 * for internal panels.
5527 *
5528 * Indication that the panel supports DRRS is given by the panel EDID, which
5529 * would list multiple refresh rates for one resolution.
5530 *
5531 * DRRS is of 2 types - static and seamless.
5532 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5533 * (may appear as a blink on screen) and is used in dock-undock scenario.
5534 * Seamless DRRS involves changing RR without any visual effect to the user
5535 * and can be used during normal system usage. This is done by programming
5536 * certain registers.
5537 *
5538 * Support for static/seamless DRRS may be indicated in the VBT based on
5539 * inputs from the panel spec.
5540 *
5541 * DRRS saves power by switching to low RR based on usage scenarios.
5542 *
5543 * eDP DRRS:-
5544 * The implementation is based on frontbuffer tracking implementation.
5545 * When there is a disturbance on the screen triggered by user activity or a
5546 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5547 * When there is no movement on screen, after a timeout of 1 second, a switch
5548 * to low RR is made.
5549 * For integration with frontbuffer tracking code,
5550 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5551 *
5552 * DRRS can be further extended to support other internal panels and also
5553 * the scenario of video playback wherein RR is set based on the rate
5554 * requested by userspace.
5555 */
5556
5557 /**
5558 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5559 * @intel_connector: eDP connector
5560 * @fixed_mode: preferred mode of panel
5561 *
5562 * This function is called only once at driver load to initialize basic
5563 * DRRS stuff.
5564 *
5565 * Returns:
5566 * Downclock mode if panel supports it, else return NULL.
5567 * DRRS support is determined by the presence of downclock mode (apart
5568 * from VBT setting).
5569 */
5570 static struct drm_display_mode *
5571 intel_dp_drrs_init(struct intel_connector *intel_connector,
5572 struct drm_display_mode *fixed_mode)
5573 {
5574 struct drm_connector *connector = &intel_connector->base;
5575 struct drm_device *dev = connector->dev;
5576 struct drm_i915_private *dev_priv = dev->dev_private;
5577 struct drm_display_mode *downclock_mode = NULL;
5578
5579 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5580 mutex_init(&dev_priv->drrs.mutex);
5581
5582 if (INTEL_INFO(dev)->gen <= 6) {
5583 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5584 return NULL;
5585 }
5586
5587 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5588 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5589 return NULL;
5590 }
5591
5592 downclock_mode = intel_find_panel_downclock
5593 (dev, fixed_mode, connector);
5594
5595 if (!downclock_mode) {
5596 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5597 return NULL;
5598 }
5599
5600 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5601
5602 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5603 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5604 return downclock_mode;
5605 }
5606
5607 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5608 struct intel_connector *intel_connector)
5609 {
5610 struct drm_connector *connector = &intel_connector->base;
5611 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5612 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5613 struct drm_device *dev = intel_encoder->base.dev;
5614 struct drm_i915_private *dev_priv = dev->dev_private;
5615 struct drm_display_mode *fixed_mode = NULL;
5616 struct drm_display_mode *downclock_mode = NULL;
5617 bool has_dpcd;
5618 struct drm_display_mode *scan;
5619 struct edid *edid;
5620 enum pipe pipe = INVALID_PIPE;
5621
5622 if (!is_edp(intel_dp))
5623 return true;
5624
5625 pps_lock(intel_dp);
5626 intel_edp_panel_vdd_sanitize(intel_dp);
5627 pps_unlock(intel_dp);
5628
5629 /* Cache DPCD and EDID for edp. */
5630 has_dpcd = intel_dp_get_dpcd(intel_dp);
5631
5632 if (has_dpcd) {
5633 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5634 dev_priv->no_aux_handshake =
5635 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5636 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5637 } else {
5638 /* if this fails, presume the device is a ghost */
5639 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5640 return false;
5641 }
5642
5643 /* We now know it's not a ghost, init power sequence regs. */
5644 pps_lock(intel_dp);
5645 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5646 pps_unlock(intel_dp);
5647
5648 mutex_lock(&dev->mode_config.mutex);
5649 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5650 if (edid) {
5651 if (drm_add_edid_modes(connector, edid)) {
5652 drm_mode_connector_update_edid_property(connector,
5653 edid);
5654 drm_edid_to_eld(connector, edid);
5655 } else {
5656 kfree(edid);
5657 edid = ERR_PTR(-EINVAL);
5658 }
5659 } else {
5660 edid = ERR_PTR(-ENOENT);
5661 }
5662 intel_connector->edid = edid;
5663
5664 /* prefer fixed mode from EDID if available */
5665 list_for_each_entry(scan, &connector->probed_modes, head) {
5666 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5667 fixed_mode = drm_mode_duplicate(dev, scan);
5668 downclock_mode = intel_dp_drrs_init(
5669 intel_connector, fixed_mode);
5670 break;
5671 }
5672 }
5673
5674 /* fallback to VBT if available for eDP */
5675 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5676 fixed_mode = drm_mode_duplicate(dev,
5677 dev_priv->vbt.lfp_lvds_vbt_mode);
5678 if (fixed_mode)
5679 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5680 }
5681 mutex_unlock(&dev->mode_config.mutex);
5682
5683 if (IS_VALLEYVIEW(dev)) {
5684 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5685 register_reboot_notifier(&intel_dp->edp_notifier);
5686
5687 /*
5688 * Figure out the current pipe for the initial backlight setup.
5689 * If the current pipe isn't valid, try the PPS pipe, and if that
5690 * fails just assume pipe A.
5691 */
5692 if (IS_CHERRYVIEW(dev))
5693 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5694 else
5695 pipe = PORT_TO_PIPE(intel_dp->DP);
5696
5697 if (pipe != PIPE_A && pipe != PIPE_B)
5698 pipe = intel_dp->pps_pipe;
5699
5700 if (pipe != PIPE_A && pipe != PIPE_B)
5701 pipe = PIPE_A;
5702
5703 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5704 pipe_name(pipe));
5705 }
5706
5707 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5708 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5709 intel_panel_setup_backlight(connector, pipe);
5710
5711 return true;
5712 }
5713
5714 bool
5715 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5716 struct intel_connector *intel_connector)
5717 {
5718 struct drm_connector *connector = &intel_connector->base;
5719 struct intel_dp *intel_dp = &intel_dig_port->dp;
5720 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5721 struct drm_device *dev = intel_encoder->base.dev;
5722 struct drm_i915_private *dev_priv = dev->dev_private;
5723 enum port port = intel_dig_port->port;
5724 int type;
5725
5726 intel_dp->pps_pipe = INVALID_PIPE;
5727
5728 /* intel_dp vfuncs */
5729 if (INTEL_INFO(dev)->gen >= 9)
5730 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5731 else if (IS_VALLEYVIEW(dev))
5732 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5733 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5734 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5735 else if (HAS_PCH_SPLIT(dev))
5736 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5737 else
5738 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5739
5740 if (INTEL_INFO(dev)->gen >= 9)
5741 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5742 else
5743 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5744
5745 /* Preserve the current hw state. */
5746 intel_dp->DP = I915_READ(intel_dp->output_reg);
5747 intel_dp->attached_connector = intel_connector;
5748
5749 if (intel_dp_is_edp(dev, port))
5750 type = DRM_MODE_CONNECTOR_eDP;
5751 else
5752 type = DRM_MODE_CONNECTOR_DisplayPort;
5753
5754 /*
5755 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5756 * for DP the encoder type can be set by the caller to
5757 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5758 */
5759 if (type == DRM_MODE_CONNECTOR_eDP)
5760 intel_encoder->type = INTEL_OUTPUT_EDP;
5761
5762 /* eDP only on port B and/or C on vlv/chv */
5763 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5764 port != PORT_B && port != PORT_C))
5765 return false;
5766
5767 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5768 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5769 port_name(port));
5770
5771 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5772 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5773
5774 connector->interlace_allowed = true;
5775 connector->doublescan_allowed = 0;
5776
5777 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5778 edp_panel_vdd_work);
5779
5780 intel_connector_attach_encoder(intel_connector, intel_encoder);
5781 drm_connector_register(connector);
5782
5783 if (HAS_DDI(dev))
5784 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5785 else
5786 intel_connector->get_hw_state = intel_connector_get_hw_state;
5787 intel_connector->unregister = intel_dp_connector_unregister;
5788
5789 /* Set up the hotplug pin. */
5790 switch (port) {
5791 case PORT_A:
5792 intel_encoder->hpd_pin = HPD_PORT_A;
5793 break;
5794 case PORT_B:
5795 intel_encoder->hpd_pin = HPD_PORT_B;
5796 break;
5797 case PORT_C:
5798 intel_encoder->hpd_pin = HPD_PORT_C;
5799 break;
5800 case PORT_D:
5801 intel_encoder->hpd_pin = HPD_PORT_D;
5802 break;
5803 default:
5804 BUG();
5805 }
5806
5807 if (is_edp(intel_dp)) {
5808 pps_lock(intel_dp);
5809 intel_dp_init_panel_power_timestamps(intel_dp);
5810 if (IS_VALLEYVIEW(dev))
5811 vlv_initial_power_sequencer_setup(intel_dp);
5812 else
5813 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5814 pps_unlock(intel_dp);
5815 }
5816
5817 intel_dp_aux_init(intel_dp, intel_connector);
5818
5819 /* init MST on ports that can support it */
5820 if (HAS_DP_MST(dev) &&
5821 (port == PORT_B || port == PORT_C || port == PORT_D))
5822 intel_dp_mst_encoder_init(intel_dig_port,
5823 intel_connector->base.base.id);
5824
5825 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5826 drm_dp_aux_unregister(&intel_dp->aux);
5827 if (is_edp(intel_dp)) {
5828 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5829 /*
5830 * vdd might still be enabled do to the delayed vdd off.
5831 * Make sure vdd is actually turned off here.
5832 */
5833 pps_lock(intel_dp);
5834 edp_panel_vdd_off_sync(intel_dp);
5835 pps_unlock(intel_dp);
5836 }
5837 drm_connector_unregister(connector);
5838 drm_connector_cleanup(connector);
5839 return false;
5840 }
5841
5842 intel_dp_add_properties(intel_dp, connector);
5843
5844 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5845 * 0xd. Failure to do so will result in spurious interrupts being
5846 * generated on the port when a cable is not attached.
5847 */
5848 if (IS_G4X(dev) && !IS_GM45(dev)) {
5849 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5850 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5851 }
5852
5853 i915_debugfs_connector_add(connector);
5854
5855 return true;
5856 }
5857
5858 void
5859 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5860 {
5861 struct drm_i915_private *dev_priv = dev->dev_private;
5862 struct intel_digital_port *intel_dig_port;
5863 struct intel_encoder *intel_encoder;
5864 struct drm_encoder *encoder;
5865 struct intel_connector *intel_connector;
5866
5867 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5868 if (!intel_dig_port)
5869 return;
5870
5871 intel_connector = intel_connector_alloc();
5872 if (!intel_connector) {
5873 kfree(intel_dig_port);
5874 return;
5875 }
5876
5877 intel_encoder = &intel_dig_port->base;
5878 encoder = &intel_encoder->base;
5879
5880 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5881 DRM_MODE_ENCODER_TMDS);
5882
5883 intel_encoder->compute_config = intel_dp_compute_config;
5884 intel_encoder->disable = intel_disable_dp;
5885 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5886 intel_encoder->get_config = intel_dp_get_config;
5887 intel_encoder->suspend = intel_dp_encoder_suspend;
5888 if (IS_CHERRYVIEW(dev)) {
5889 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5890 intel_encoder->pre_enable = chv_pre_enable_dp;
5891 intel_encoder->enable = vlv_enable_dp;
5892 intel_encoder->post_disable = chv_post_disable_dp;
5893 } else if (IS_VALLEYVIEW(dev)) {
5894 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5895 intel_encoder->pre_enable = vlv_pre_enable_dp;
5896 intel_encoder->enable = vlv_enable_dp;
5897 intel_encoder->post_disable = vlv_post_disable_dp;
5898 } else {
5899 intel_encoder->pre_enable = g4x_pre_enable_dp;
5900 intel_encoder->enable = g4x_enable_dp;
5901 if (INTEL_INFO(dev)->gen >= 5)
5902 intel_encoder->post_disable = ilk_post_disable_dp;
5903 }
5904
5905 intel_dig_port->port = port;
5906 intel_dig_port->dp.output_reg = output_reg;
5907
5908 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5909 if (IS_CHERRYVIEW(dev)) {
5910 if (port == PORT_D)
5911 intel_encoder->crtc_mask = 1 << 2;
5912 else
5913 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5914 } else {
5915 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5916 }
5917 intel_encoder->cloneable = 0;
5918 intel_encoder->hot_plug = intel_dp_hot_plug;
5919
5920 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5921 dev_priv->hpd_irq_port[port] = intel_dig_port;
5922
5923 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5924 drm_encoder_cleanup(encoder);
5925 kfree(intel_dig_port);
5926 kfree(intel_connector);
5927 }
5928 }
5929
5930 void intel_dp_mst_suspend(struct drm_device *dev)
5931 {
5932 struct drm_i915_private *dev_priv = dev->dev_private;
5933 int i;
5934
5935 /* disable MST */
5936 for (i = 0; i < I915_MAX_PORTS; i++) {
5937 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5938 if (!intel_dig_port)
5939 continue;
5940
5941 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5942 if (!intel_dig_port->dp.can_mst)
5943 continue;
5944 if (intel_dig_port->dp.is_mst)
5945 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5946 }
5947 }
5948 }
5949
5950 void intel_dp_mst_resume(struct drm_device *dev)
5951 {
5952 struct drm_i915_private *dev_priv = dev->dev_private;
5953 int i;
5954
5955 for (i = 0; i < I915_MAX_PORTS; i++) {
5956 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5957 if (!intel_dig_port)
5958 continue;
5959 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5960 int ret;
5961
5962 if (!intel_dig_port->dp.can_mst)
5963 continue;
5964
5965 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5966 if (ret != 0) {
5967 intel_dp_check_mst_status(&intel_dig_port->dp);
5968 }
5969 }
5970 }
5971 }
This page took 0.145642 seconds and 6 git commands to generate.