drm/i915/dp: there is no audio on port A
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
44 struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47 };
48
49 static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54 };
55
56 static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61 };
62
63 static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68 };
69
70 /*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74 static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86 };
87 /* Skylake supports following rates */
88 static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
90 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91 243000, 270000, 324000, 405000,
92 420000, 432000, 540000 };
93 static const int default_rates[] = { 162000, 270000, 540000 };
94
95 /**
96 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97 * @intel_dp: DP struct
98 *
99 * If a CPU or PCH DP output is attached to an eDP panel, this function
100 * will return true, and false otherwise.
101 */
102 static bool is_edp(struct intel_dp *intel_dp)
103 {
104 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
105
106 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
107 }
108
109 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
110 {
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113 return intel_dig_port->base.base.dev;
114 }
115
116 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117 {
118 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
119 }
120
121 static void intel_dp_link_down(struct intel_dp *intel_dp);
122 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
123 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
124 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
125 static void vlv_steal_power_sequencer(struct drm_device *dev,
126 enum pipe pipe);
127
128 static int
129 intel_dp_max_link_bw(struct intel_dp *intel_dp)
130 {
131 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
132
133 switch (max_link_bw) {
134 case DP_LINK_BW_1_62:
135 case DP_LINK_BW_2_7:
136 case DP_LINK_BW_5_4:
137 break;
138 default:
139 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
140 max_link_bw);
141 max_link_bw = DP_LINK_BW_1_62;
142 break;
143 }
144 return max_link_bw;
145 }
146
147 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
148 {
149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150 struct drm_device *dev = intel_dig_port->base.base.dev;
151 u8 source_max, sink_max;
152
153 source_max = 4;
154 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
156 source_max = 2;
157
158 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
159
160 return min(source_max, sink_max);
161 }
162
163 /*
164 * The units on the numbers in the next two are... bizarre. Examples will
165 * make it clearer; this one parallels an example in the eDP spec.
166 *
167 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
168 *
169 * 270000 * 1 * 8 / 10 == 216000
170 *
171 * The actual data capacity of that configuration is 2.16Gbit/s, so the
172 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
173 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174 * 119000. At 18bpp that's 2142000 kilobits per second.
175 *
176 * Thus the strange-looking division by 10 in intel_dp_link_required, to
177 * get the result in decakilobits instead of kilobits.
178 */
179
180 static int
181 intel_dp_link_required(int pixel_clock, int bpp)
182 {
183 return (pixel_clock * bpp + 9) / 10;
184 }
185
186 static int
187 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
188 {
189 return (max_link_clock * max_lanes * 8) / 10;
190 }
191
192 static enum drm_mode_status
193 intel_dp_mode_valid(struct drm_connector *connector,
194 struct drm_display_mode *mode)
195 {
196 struct intel_dp *intel_dp = intel_attached_dp(connector);
197 struct intel_connector *intel_connector = to_intel_connector(connector);
198 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
199 int target_clock = mode->clock;
200 int max_rate, mode_rate, max_lanes, max_link_clock;
201
202 if (is_edp(intel_dp) && fixed_mode) {
203 if (mode->hdisplay > fixed_mode->hdisplay)
204 return MODE_PANEL;
205
206 if (mode->vdisplay > fixed_mode->vdisplay)
207 return MODE_PANEL;
208
209 target_clock = fixed_mode->clock;
210 }
211
212 max_link_clock = intel_dp_max_link_rate(intel_dp);
213 max_lanes = intel_dp_max_lane_count(intel_dp);
214
215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216 mode_rate = intel_dp_link_required(target_clock, 18);
217
218 if (mode_rate > max_rate)
219 return MODE_CLOCK_HIGH;
220
221 if (mode->clock < 10000)
222 return MODE_CLOCK_LOW;
223
224 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225 return MODE_H_ILLEGAL;
226
227 return MODE_OK;
228 }
229
230 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
231 {
232 int i;
233 uint32_t v = 0;
234
235 if (src_bytes > 4)
236 src_bytes = 4;
237 for (i = 0; i < src_bytes; i++)
238 v |= ((uint32_t) src[i]) << ((3-i) * 8);
239 return v;
240 }
241
242 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
243 {
244 int i;
245 if (dst_bytes > 4)
246 dst_bytes = 4;
247 for (i = 0; i < dst_bytes; i++)
248 dst[i] = src >> ((3-i) * 8);
249 }
250
251 /* hrawclock is 1/4 the FSB frequency */
252 static int
253 intel_hrawclk(struct drm_device *dev)
254 {
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 uint32_t clkcfg;
257
258 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259 if (IS_VALLEYVIEW(dev))
260 return 200;
261
262 clkcfg = I915_READ(CLKCFG);
263 switch (clkcfg & CLKCFG_FSB_MASK) {
264 case CLKCFG_FSB_400:
265 return 100;
266 case CLKCFG_FSB_533:
267 return 133;
268 case CLKCFG_FSB_667:
269 return 166;
270 case CLKCFG_FSB_800:
271 return 200;
272 case CLKCFG_FSB_1067:
273 return 266;
274 case CLKCFG_FSB_1333:
275 return 333;
276 /* these two are just a guess; one of them might be right */
277 case CLKCFG_FSB_1600:
278 case CLKCFG_FSB_1600_ALT:
279 return 400;
280 default:
281 return 133;
282 }
283 }
284
285 static void
286 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
287 struct intel_dp *intel_dp);
288 static void
289 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
290 struct intel_dp *intel_dp);
291
292 static void pps_lock(struct intel_dp *intel_dp)
293 {
294 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295 struct intel_encoder *encoder = &intel_dig_port->base;
296 struct drm_device *dev = encoder->base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 enum intel_display_power_domain power_domain;
299
300 /*
301 * See vlv_power_sequencer_reset() why we need
302 * a power domain reference here.
303 */
304 power_domain = intel_display_port_power_domain(encoder);
305 intel_display_power_get(dev_priv, power_domain);
306
307 mutex_lock(&dev_priv->pps_mutex);
308 }
309
310 static void pps_unlock(struct intel_dp *intel_dp)
311 {
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct intel_encoder *encoder = &intel_dig_port->base;
314 struct drm_device *dev = encoder->base.dev;
315 struct drm_i915_private *dev_priv = dev->dev_private;
316 enum intel_display_power_domain power_domain;
317
318 mutex_unlock(&dev_priv->pps_mutex);
319
320 power_domain = intel_display_port_power_domain(encoder);
321 intel_display_power_put(dev_priv, power_domain);
322 }
323
324 static void
325 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326 {
327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 struct drm_device *dev = intel_dig_port->base.base.dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 enum pipe pipe = intel_dp->pps_pipe;
331 bool pll_enabled;
332 uint32_t DP;
333
334 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 pipe_name(pipe), port_name(intel_dig_port->port)))
337 return;
338
339 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 pipe_name(pipe), port_name(intel_dig_port->port));
341
342 /* Preserve the BIOS-computed detected bit. This is
343 * supposed to be read-only.
344 */
345 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347 DP |= DP_PORT_WIDTH(1);
348 DP |= DP_LINK_TRAIN_PAT_1;
349
350 if (IS_CHERRYVIEW(dev))
351 DP |= DP_PIPE_SELECT_CHV(pipe);
352 else if (pipe == PIPE_B)
353 DP |= DP_PIPEB_SELECT;
354
355 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356
357 /*
358 * The DPLL for the pipe must be enabled for this to work.
359 * So enable temporarily it if it's not already enabled.
360 */
361 if (!pll_enabled)
362 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364
365 /*
366 * Similar magic as in intel_dp_enable_port().
367 * We _must_ do this port enable + disable trick
368 * to make this power seqeuencer lock onto the port.
369 * Otherwise even VDD force bit won't work.
370 */
371 I915_WRITE(intel_dp->output_reg, DP);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
376
377 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
379
380 if (!pll_enabled)
381 vlv_force_pll_off(dev, pipe);
382 }
383
384 static enum pipe
385 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
386 {
387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
388 struct drm_device *dev = intel_dig_port->base.base.dev;
389 struct drm_i915_private *dev_priv = dev->dev_private;
390 struct intel_encoder *encoder;
391 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
392 enum pipe pipe;
393
394 lockdep_assert_held(&dev_priv->pps_mutex);
395
396 /* We should never land here with regular DP ports */
397 WARN_ON(!is_edp(intel_dp));
398
399 if (intel_dp->pps_pipe != INVALID_PIPE)
400 return intel_dp->pps_pipe;
401
402 /*
403 * We don't have power sequencer currently.
404 * Pick one that's not used by other ports.
405 */
406 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
407 base.head) {
408 struct intel_dp *tmp;
409
410 if (encoder->type != INTEL_OUTPUT_EDP)
411 continue;
412
413 tmp = enc_to_intel_dp(&encoder->base);
414
415 if (tmp->pps_pipe != INVALID_PIPE)
416 pipes &= ~(1 << tmp->pps_pipe);
417 }
418
419 /*
420 * Didn't find one. This should not happen since there
421 * are two power sequencers and up to two eDP ports.
422 */
423 if (WARN_ON(pipes == 0))
424 pipe = PIPE_A;
425 else
426 pipe = ffs(pipes) - 1;
427
428 vlv_steal_power_sequencer(dev, pipe);
429 intel_dp->pps_pipe = pipe;
430
431 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432 pipe_name(intel_dp->pps_pipe),
433 port_name(intel_dig_port->port));
434
435 /* init power sequencer on this pipe and port */
436 intel_dp_init_panel_power_sequencer(dev, intel_dp);
437 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
438
439 /*
440 * Even vdd force doesn't work until we've made
441 * the power sequencer lock in on the port.
442 */
443 vlv_power_sequencer_kick(intel_dp);
444
445 return intel_dp->pps_pipe;
446 }
447
448 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
449 enum pipe pipe);
450
451 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
452 enum pipe pipe)
453 {
454 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
455 }
456
457 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
458 enum pipe pipe)
459 {
460 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
461 }
462
463 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
464 enum pipe pipe)
465 {
466 return true;
467 }
468
469 static enum pipe
470 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
471 enum port port,
472 vlv_pipe_check pipe_check)
473 {
474 enum pipe pipe;
475
476 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478 PANEL_PORT_SELECT_MASK;
479
480 if (port_sel != PANEL_PORT_SELECT_VLV(port))
481 continue;
482
483 if (!pipe_check(dev_priv, pipe))
484 continue;
485
486 return pipe;
487 }
488
489 return INVALID_PIPE;
490 }
491
492 static void
493 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
494 {
495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 struct drm_device *dev = intel_dig_port->base.base.dev;
497 struct drm_i915_private *dev_priv = dev->dev_private;
498 enum port port = intel_dig_port->port;
499
500 lockdep_assert_held(&dev_priv->pps_mutex);
501
502 /* try to find a pipe with this port selected */
503 /* first pick one where the panel is on */
504 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
505 vlv_pipe_has_pp_on);
506 /* didn't find one? pick one where vdd is on */
507 if (intel_dp->pps_pipe == INVALID_PIPE)
508 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509 vlv_pipe_has_vdd_on);
510 /* didn't find one? pick one with just the correct port */
511 if (intel_dp->pps_pipe == INVALID_PIPE)
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_any);
514
515 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516 if (intel_dp->pps_pipe == INVALID_PIPE) {
517 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
518 port_name(port));
519 return;
520 }
521
522 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523 port_name(port), pipe_name(intel_dp->pps_pipe));
524
525 intel_dp_init_panel_power_sequencer(dev, intel_dp);
526 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
527 }
528
529 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
530 {
531 struct drm_device *dev = dev_priv->dev;
532 struct intel_encoder *encoder;
533
534 if (WARN_ON(!IS_VALLEYVIEW(dev)))
535 return;
536
537 /*
538 * We can't grab pps_mutex here due to deadlock with power_domain
539 * mutex when power_domain functions are called while holding pps_mutex.
540 * That also means that in order to use pps_pipe the code needs to
541 * hold both a power domain reference and pps_mutex, and the power domain
542 * reference get/put must be done while _not_ holding pps_mutex.
543 * pps_{lock,unlock}() do these steps in the correct order, so one
544 * should use them always.
545 */
546
547 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548 struct intel_dp *intel_dp;
549
550 if (encoder->type != INTEL_OUTPUT_EDP)
551 continue;
552
553 intel_dp = enc_to_intel_dp(&encoder->base);
554 intel_dp->pps_pipe = INVALID_PIPE;
555 }
556 }
557
558 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
559 {
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562 if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_CONTROL;
564 else
565 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
566 }
567
568 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
569 {
570 struct drm_device *dev = intel_dp_to_dev(intel_dp);
571
572 if (HAS_PCH_SPLIT(dev))
573 return PCH_PP_STATUS;
574 else
575 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
576 }
577
578 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579 This function only applicable when panel PM state is not to be tracked */
580 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
581 void *unused)
582 {
583 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
584 edp_notifier);
585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
586 struct drm_i915_private *dev_priv = dev->dev_private;
587 u32 pp_div;
588 u32 pp_ctrl_reg, pp_div_reg;
589
590 if (!is_edp(intel_dp) || code != SYS_RESTART)
591 return 0;
592
593 pps_lock(intel_dp);
594
595 if (IS_VALLEYVIEW(dev)) {
596 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
597
598 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
600 pp_div = I915_READ(pp_div_reg);
601 pp_div &= PP_REFERENCE_DIVIDER_MASK;
602
603 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606 msleep(intel_dp->panel_power_cycle_delay);
607 }
608
609 pps_unlock(intel_dp);
610
611 return 0;
612 }
613
614 static bool edp_have_panel_power(struct intel_dp *intel_dp)
615 {
616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
617 struct drm_i915_private *dev_priv = dev->dev_private;
618
619 lockdep_assert_held(&dev_priv->pps_mutex);
620
621 if (IS_VALLEYVIEW(dev) &&
622 intel_dp->pps_pipe == INVALID_PIPE)
623 return false;
624
625 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
626 }
627
628 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
629 {
630 struct drm_device *dev = intel_dp_to_dev(intel_dp);
631 struct drm_i915_private *dev_priv = dev->dev_private;
632
633 lockdep_assert_held(&dev_priv->pps_mutex);
634
635 if (IS_VALLEYVIEW(dev) &&
636 intel_dp->pps_pipe == INVALID_PIPE)
637 return false;
638
639 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
640 }
641
642 static void
643 intel_dp_check_edp(struct intel_dp *intel_dp)
644 {
645 struct drm_device *dev = intel_dp_to_dev(intel_dp);
646 struct drm_i915_private *dev_priv = dev->dev_private;
647
648 if (!is_edp(intel_dp))
649 return;
650
651 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
652 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
654 I915_READ(_pp_stat_reg(intel_dp)),
655 I915_READ(_pp_ctrl_reg(intel_dp)));
656 }
657 }
658
659 static uint32_t
660 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
661 {
662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663 struct drm_device *dev = intel_dig_port->base.base.dev;
664 struct drm_i915_private *dev_priv = dev->dev_private;
665 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
666 uint32_t status;
667 bool done;
668
669 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
670 if (has_aux_irq)
671 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
672 msecs_to_jiffies_timeout(10));
673 else
674 done = wait_for_atomic(C, 10) == 0;
675 if (!done)
676 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677 has_aux_irq);
678 #undef C
679
680 return status;
681 }
682
683 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
684 {
685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686 struct drm_device *dev = intel_dig_port->base.base.dev;
687
688 /*
689 * The clock divider is based off the hrawclk, and would like to run at
690 * 2MHz. So, take the hrawclk value and divide by 2 and use that
691 */
692 return index ? 0 : intel_hrawclk(dev) / 2;
693 }
694
695 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696 {
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
699
700 if (index)
701 return 0;
702
703 if (intel_dig_port->port == PORT_A) {
704 if (IS_GEN6(dev) || IS_GEN7(dev))
705 return 200; /* SNB & IVB eDP input clock at 400Mhz */
706 else
707 return 225; /* eDP input clock at 450Mhz */
708 } else {
709 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
710 }
711 }
712
713 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
714 {
715 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716 struct drm_device *dev = intel_dig_port->base.base.dev;
717 struct drm_i915_private *dev_priv = dev->dev_private;
718
719 if (intel_dig_port->port == PORT_A) {
720 if (index)
721 return 0;
722 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
723 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
724 /* Workaround for non-ULT HSW */
725 switch (index) {
726 case 0: return 63;
727 case 1: return 72;
728 default: return 0;
729 }
730 } else {
731 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
732 }
733 }
734
735 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
736 {
737 return index ? 0 : 100;
738 }
739
740 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741 {
742 /*
743 * SKL doesn't need us to program the AUX clock divider (Hardware will
744 * derive the clock from CDCLK automatically). We still implement the
745 * get_aux_clock_divider vfunc to plug-in into the existing code.
746 */
747 return index ? 0 : 1;
748 }
749
750 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
751 bool has_aux_irq,
752 int send_bytes,
753 uint32_t aux_clock_divider)
754 {
755 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756 struct drm_device *dev = intel_dig_port->base.base.dev;
757 uint32_t precharge, timeout;
758
759 if (IS_GEN6(dev))
760 precharge = 3;
761 else
762 precharge = 5;
763
764 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
765 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
766 else
767 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
768
769 return DP_AUX_CH_CTL_SEND_BUSY |
770 DP_AUX_CH_CTL_DONE |
771 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
772 DP_AUX_CH_CTL_TIME_OUT_ERROR |
773 timeout |
774 DP_AUX_CH_CTL_RECEIVE_ERROR |
775 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
776 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
777 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
778 }
779
780 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
781 bool has_aux_irq,
782 int send_bytes,
783 uint32_t unused)
784 {
785 return DP_AUX_CH_CTL_SEND_BUSY |
786 DP_AUX_CH_CTL_DONE |
787 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788 DP_AUX_CH_CTL_TIME_OUT_ERROR |
789 DP_AUX_CH_CTL_TIME_OUT_1600us |
790 DP_AUX_CH_CTL_RECEIVE_ERROR |
791 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
793 }
794
795 static int
796 intel_dp_aux_ch(struct intel_dp *intel_dp,
797 const uint8_t *send, int send_bytes,
798 uint8_t *recv, int recv_size)
799 {
800 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
801 struct drm_device *dev = intel_dig_port->base.base.dev;
802 struct drm_i915_private *dev_priv = dev->dev_private;
803 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
804 uint32_t ch_data = ch_ctl + 4;
805 uint32_t aux_clock_divider;
806 int i, ret, recv_bytes;
807 uint32_t status;
808 int try, clock = 0;
809 bool has_aux_irq = HAS_AUX_IRQ(dev);
810 bool vdd;
811
812 pps_lock(intel_dp);
813
814 /*
815 * We will be called with VDD already enabled for dpcd/edid/oui reads.
816 * In such cases we want to leave VDD enabled and it's up to upper layers
817 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
818 * ourselves.
819 */
820 vdd = edp_panel_vdd_on(intel_dp);
821
822 /* dp aux is extremely sensitive to irq latency, hence request the
823 * lowest possible wakeup latency and so prevent the cpu from going into
824 * deep sleep states.
825 */
826 pm_qos_update_request(&dev_priv->pm_qos, 0);
827
828 intel_dp_check_edp(intel_dp);
829
830 intel_aux_display_runtime_get(dev_priv);
831
832 /* Try to wait for any previous AUX channel activity */
833 for (try = 0; try < 3; try++) {
834 status = I915_READ_NOTRACE(ch_ctl);
835 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
836 break;
837 msleep(1);
838 }
839
840 if (try == 3) {
841 WARN(1, "dp_aux_ch not started status 0x%08x\n",
842 I915_READ(ch_ctl));
843 ret = -EBUSY;
844 goto out;
845 }
846
847 /* Only 5 data registers! */
848 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
849 ret = -E2BIG;
850 goto out;
851 }
852
853 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
854 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
855 has_aux_irq,
856 send_bytes,
857 aux_clock_divider);
858
859 /* Must try at least 3 times according to DP spec */
860 for (try = 0; try < 5; try++) {
861 /* Load the send data into the aux channel data registers */
862 for (i = 0; i < send_bytes; i += 4)
863 I915_WRITE(ch_data + i,
864 intel_dp_pack_aux(send + i,
865 send_bytes - i));
866
867 /* Send the command and wait for it to complete */
868 I915_WRITE(ch_ctl, send_ctl);
869
870 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
871
872 /* Clear done status and any errors */
873 I915_WRITE(ch_ctl,
874 status |
875 DP_AUX_CH_CTL_DONE |
876 DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR);
878
879 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
880 DP_AUX_CH_CTL_RECEIVE_ERROR))
881 continue;
882 if (status & DP_AUX_CH_CTL_DONE)
883 break;
884 }
885 if (status & DP_AUX_CH_CTL_DONE)
886 break;
887 }
888
889 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
890 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
891 ret = -EBUSY;
892 goto out;
893 }
894
895 /* Check for timeout or receive error.
896 * Timeouts occur when the sink is not connected
897 */
898 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
899 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
900 ret = -EIO;
901 goto out;
902 }
903
904 /* Timeouts occur when the device isn't connected, so they're
905 * "normal" -- don't fill the kernel log with these */
906 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
907 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
908 ret = -ETIMEDOUT;
909 goto out;
910 }
911
912 /* Unload any bytes sent back from the other side */
913 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
914 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
915 if (recv_bytes > recv_size)
916 recv_bytes = recv_size;
917
918 for (i = 0; i < recv_bytes; i += 4)
919 intel_dp_unpack_aux(I915_READ(ch_data + i),
920 recv + i, recv_bytes - i);
921
922 ret = recv_bytes;
923 out:
924 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
925 intel_aux_display_runtime_put(dev_priv);
926
927 if (vdd)
928 edp_panel_vdd_off(intel_dp, false);
929
930 pps_unlock(intel_dp);
931
932 return ret;
933 }
934
935 #define BARE_ADDRESS_SIZE 3
936 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
937 static ssize_t
938 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
939 {
940 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
941 uint8_t txbuf[20], rxbuf[20];
942 size_t txsize, rxsize;
943 int ret;
944
945 txbuf[0] = (msg->request << 4) |
946 ((msg->address >> 16) & 0xf);
947 txbuf[1] = (msg->address >> 8) & 0xff;
948 txbuf[2] = msg->address & 0xff;
949 txbuf[3] = msg->size - 1;
950
951 switch (msg->request & ~DP_AUX_I2C_MOT) {
952 case DP_AUX_NATIVE_WRITE:
953 case DP_AUX_I2C_WRITE:
954 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
955 rxsize = 2; /* 0 or 1 data bytes */
956
957 if (WARN_ON(txsize > 20))
958 return -E2BIG;
959
960 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
961
962 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
963 if (ret > 0) {
964 msg->reply = rxbuf[0] >> 4;
965
966 if (ret > 1) {
967 /* Number of bytes written in a short write. */
968 ret = clamp_t(int, rxbuf[1], 0, msg->size);
969 } else {
970 /* Return payload size. */
971 ret = msg->size;
972 }
973 }
974 break;
975
976 case DP_AUX_NATIVE_READ:
977 case DP_AUX_I2C_READ:
978 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
979 rxsize = msg->size + 1;
980
981 if (WARN_ON(rxsize > 20))
982 return -E2BIG;
983
984 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
985 if (ret > 0) {
986 msg->reply = rxbuf[0] >> 4;
987 /*
988 * Assume happy day, and copy the data. The caller is
989 * expected to check msg->reply before touching it.
990 *
991 * Return payload size.
992 */
993 ret--;
994 memcpy(msg->buffer, rxbuf + 1, ret);
995 }
996 break;
997
998 default:
999 ret = -EINVAL;
1000 break;
1001 }
1002
1003 return ret;
1004 }
1005
1006 static void
1007 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1008 {
1009 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1010 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1011 enum port port = intel_dig_port->port;
1012 const char *name = NULL;
1013 int ret;
1014
1015 switch (port) {
1016 case PORT_A:
1017 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1018 name = "DPDDC-A";
1019 break;
1020 case PORT_B:
1021 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1022 name = "DPDDC-B";
1023 break;
1024 case PORT_C:
1025 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1026 name = "DPDDC-C";
1027 break;
1028 case PORT_D:
1029 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1030 name = "DPDDC-D";
1031 break;
1032 default:
1033 BUG();
1034 }
1035
1036 /*
1037 * The AUX_CTL register is usually DP_CTL + 0x10.
1038 *
1039 * On Haswell and Broadwell though:
1040 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1041 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1042 *
1043 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1044 */
1045 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1046 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1047
1048 intel_dp->aux.name = name;
1049 intel_dp->aux.dev = dev->dev;
1050 intel_dp->aux.transfer = intel_dp_aux_transfer;
1051
1052 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1053 connector->base.kdev->kobj.name);
1054
1055 ret = drm_dp_aux_register(&intel_dp->aux);
1056 if (ret < 0) {
1057 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1058 name, ret);
1059 return;
1060 }
1061
1062 ret = sysfs_create_link(&connector->base.kdev->kobj,
1063 &intel_dp->aux.ddc.dev.kobj,
1064 intel_dp->aux.ddc.dev.kobj.name);
1065 if (ret < 0) {
1066 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1067 drm_dp_aux_unregister(&intel_dp->aux);
1068 }
1069 }
1070
1071 static void
1072 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1073 {
1074 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1075
1076 if (!intel_connector->mst_port)
1077 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1078 intel_dp->aux.ddc.dev.kobj.name);
1079 intel_connector_unregister(intel_connector);
1080 }
1081
1082 static void
1083 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1084 {
1085 u32 ctrl1;
1086
1087 pipe_config->ddi_pll_sel = SKL_DPLL0;
1088 pipe_config->dpll_hw_state.cfgcr1 = 0;
1089 pipe_config->dpll_hw_state.cfgcr2 = 0;
1090
1091 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1092 switch (link_clock / 2) {
1093 case 81000:
1094 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1095 SKL_DPLL0);
1096 break;
1097 case 135000:
1098 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1099 SKL_DPLL0);
1100 break;
1101 case 270000:
1102 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1103 SKL_DPLL0);
1104 break;
1105 case 162000:
1106 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1107 SKL_DPLL0);
1108 break;
1109 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1110 results in CDCLK change. Need to handle the change of CDCLK by
1111 disabling pipes and re-enabling them */
1112 case 108000:
1113 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1114 SKL_DPLL0);
1115 break;
1116 case 216000:
1117 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1118 SKL_DPLL0);
1119 break;
1120
1121 }
1122 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1123 }
1124
1125 static void
1126 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1127 {
1128 switch (link_bw) {
1129 case DP_LINK_BW_1_62:
1130 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1131 break;
1132 case DP_LINK_BW_2_7:
1133 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1134 break;
1135 case DP_LINK_BW_5_4:
1136 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1137 break;
1138 }
1139 }
1140
1141 static int
1142 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1143 {
1144 if (intel_dp->num_sink_rates) {
1145 *sink_rates = intel_dp->sink_rates;
1146 return intel_dp->num_sink_rates;
1147 }
1148
1149 *sink_rates = default_rates;
1150
1151 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1152 }
1153
1154 static int
1155 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1156 {
1157 if (INTEL_INFO(dev)->gen >= 9) {
1158 *source_rates = gen9_rates;
1159 return ARRAY_SIZE(gen9_rates);
1160 } else if (IS_CHERRYVIEW(dev)) {
1161 *source_rates = chv_rates;
1162 return ARRAY_SIZE(chv_rates);
1163 }
1164
1165 *source_rates = default_rates;
1166
1167 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1168 /* WaDisableHBR2:skl */
1169 return (DP_LINK_BW_2_7 >> 3) + 1;
1170 else if (INTEL_INFO(dev)->gen >= 8 ||
1171 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1172 return (DP_LINK_BW_5_4 >> 3) + 1;
1173 else
1174 return (DP_LINK_BW_2_7 >> 3) + 1;
1175 }
1176
1177 static void
1178 intel_dp_set_clock(struct intel_encoder *encoder,
1179 struct intel_crtc_state *pipe_config, int link_bw)
1180 {
1181 struct drm_device *dev = encoder->base.dev;
1182 const struct dp_link_dpll *divisor = NULL;
1183 int i, count = 0;
1184
1185 if (IS_G4X(dev)) {
1186 divisor = gen4_dpll;
1187 count = ARRAY_SIZE(gen4_dpll);
1188 } else if (HAS_PCH_SPLIT(dev)) {
1189 divisor = pch_dpll;
1190 count = ARRAY_SIZE(pch_dpll);
1191 } else if (IS_CHERRYVIEW(dev)) {
1192 divisor = chv_dpll;
1193 count = ARRAY_SIZE(chv_dpll);
1194 } else if (IS_VALLEYVIEW(dev)) {
1195 divisor = vlv_dpll;
1196 count = ARRAY_SIZE(vlv_dpll);
1197 }
1198
1199 if (divisor && count) {
1200 for (i = 0; i < count; i++) {
1201 if (link_bw == divisor[i].link_bw) {
1202 pipe_config->dpll = divisor[i].dpll;
1203 pipe_config->clock_set = true;
1204 break;
1205 }
1206 }
1207 }
1208 }
1209
1210 static int intersect_rates(const int *source_rates, int source_len,
1211 const int *sink_rates, int sink_len,
1212 int *common_rates)
1213 {
1214 int i = 0, j = 0, k = 0;
1215
1216 while (i < source_len && j < sink_len) {
1217 if (source_rates[i] == sink_rates[j]) {
1218 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1219 return k;
1220 common_rates[k] = source_rates[i];
1221 ++k;
1222 ++i;
1223 ++j;
1224 } else if (source_rates[i] < sink_rates[j]) {
1225 ++i;
1226 } else {
1227 ++j;
1228 }
1229 }
1230 return k;
1231 }
1232
1233 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1234 int *common_rates)
1235 {
1236 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1237 const int *source_rates, *sink_rates;
1238 int source_len, sink_len;
1239
1240 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1241 source_len = intel_dp_source_rates(dev, &source_rates);
1242
1243 return intersect_rates(source_rates, source_len,
1244 sink_rates, sink_len,
1245 common_rates);
1246 }
1247
1248 static void snprintf_int_array(char *str, size_t len,
1249 const int *array, int nelem)
1250 {
1251 int i;
1252
1253 str[0] = '\0';
1254
1255 for (i = 0; i < nelem; i++) {
1256 int r = snprintf(str, len, "%d,", array[i]);
1257 if (r >= len)
1258 return;
1259 str += r;
1260 len -= r;
1261 }
1262 }
1263
1264 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1265 {
1266 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1267 const int *source_rates, *sink_rates;
1268 int source_len, sink_len, common_len;
1269 int common_rates[DP_MAX_SUPPORTED_RATES];
1270 char str[128]; /* FIXME: too big for stack? */
1271
1272 if ((drm_debug & DRM_UT_KMS) == 0)
1273 return;
1274
1275 source_len = intel_dp_source_rates(dev, &source_rates);
1276 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1277 DRM_DEBUG_KMS("source rates: %s\n", str);
1278
1279 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1280 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1281 DRM_DEBUG_KMS("sink rates: %s\n", str);
1282
1283 common_len = intel_dp_common_rates(intel_dp, common_rates);
1284 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1285 DRM_DEBUG_KMS("common rates: %s\n", str);
1286 }
1287
1288 static int rate_to_index(int find, const int *rates)
1289 {
1290 int i = 0;
1291
1292 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1293 if (find == rates[i])
1294 break;
1295
1296 return i;
1297 }
1298
1299 int
1300 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1301 {
1302 int rates[DP_MAX_SUPPORTED_RATES] = {};
1303 int len;
1304
1305 len = intel_dp_common_rates(intel_dp, rates);
1306 if (WARN_ON(len <= 0))
1307 return 162000;
1308
1309 return rates[rate_to_index(0, rates) - 1];
1310 }
1311
1312 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1313 {
1314 return rate_to_index(rate, intel_dp->sink_rates);
1315 }
1316
1317 bool
1318 intel_dp_compute_config(struct intel_encoder *encoder,
1319 struct intel_crtc_state *pipe_config)
1320 {
1321 struct drm_device *dev = encoder->base.dev;
1322 struct drm_i915_private *dev_priv = dev->dev_private;
1323 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1324 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1325 enum port port = dp_to_dig_port(intel_dp)->port;
1326 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1327 struct intel_connector *intel_connector = intel_dp->attached_connector;
1328 int lane_count, clock;
1329 int min_lane_count = 1;
1330 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1331 /* Conveniently, the link BW constants become indices with a shift...*/
1332 int min_clock = 0;
1333 int max_clock;
1334 int bpp, mode_rate;
1335 int link_avail, link_clock;
1336 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1337 int common_len;
1338
1339 common_len = intel_dp_common_rates(intel_dp, common_rates);
1340
1341 /* No common link rates between source and sink */
1342 WARN_ON(common_len <= 0);
1343
1344 max_clock = common_len - 1;
1345
1346 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1347 pipe_config->has_pch_encoder = true;
1348
1349 pipe_config->has_dp_encoder = true;
1350 pipe_config->has_drrs = false;
1351 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1352
1353 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1354 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1355 adjusted_mode);
1356 if (!HAS_PCH_SPLIT(dev))
1357 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1358 intel_connector->panel.fitting_mode);
1359 else
1360 intel_pch_panel_fitting(intel_crtc, pipe_config,
1361 intel_connector->panel.fitting_mode);
1362 }
1363
1364 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1365 return false;
1366
1367 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1368 "max bw %d pixel clock %iKHz\n",
1369 max_lane_count, common_rates[max_clock],
1370 adjusted_mode->crtc_clock);
1371
1372 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1373 * bpc in between. */
1374 bpp = pipe_config->pipe_bpp;
1375 if (is_edp(intel_dp)) {
1376 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1377 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1378 dev_priv->vbt.edp_bpp);
1379 bpp = dev_priv->vbt.edp_bpp;
1380 }
1381
1382 /*
1383 * Use the maximum clock and number of lanes the eDP panel
1384 * advertizes being capable of. The panels are generally
1385 * designed to support only a single clock and lane
1386 * configuration, and typically these values correspond to the
1387 * native resolution of the panel.
1388 */
1389 min_lane_count = max_lane_count;
1390 min_clock = max_clock;
1391 }
1392
1393 for (; bpp >= 6*3; bpp -= 2*3) {
1394 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1395 bpp);
1396
1397 for (clock = min_clock; clock <= max_clock; clock++) {
1398 for (lane_count = min_lane_count;
1399 lane_count <= max_lane_count;
1400 lane_count <<= 1) {
1401
1402 link_clock = common_rates[clock];
1403 link_avail = intel_dp_max_data_rate(link_clock,
1404 lane_count);
1405
1406 if (mode_rate <= link_avail) {
1407 goto found;
1408 }
1409 }
1410 }
1411 }
1412
1413 return false;
1414
1415 found:
1416 if (intel_dp->color_range_auto) {
1417 /*
1418 * See:
1419 * CEA-861-E - 5.1 Default Encoding Parameters
1420 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1421 */
1422 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1423 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1424 else
1425 intel_dp->color_range = 0;
1426 }
1427
1428 if (intel_dp->color_range)
1429 pipe_config->limited_color_range = true;
1430
1431 intel_dp->lane_count = lane_count;
1432
1433 if (intel_dp->num_sink_rates) {
1434 intel_dp->link_bw = 0;
1435 intel_dp->rate_select =
1436 intel_dp_rate_select(intel_dp, common_rates[clock]);
1437 } else {
1438 intel_dp->link_bw =
1439 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1440 intel_dp->rate_select = 0;
1441 }
1442
1443 pipe_config->pipe_bpp = bpp;
1444 pipe_config->port_clock = common_rates[clock];
1445
1446 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1447 intel_dp->link_bw, intel_dp->lane_count,
1448 pipe_config->port_clock, bpp);
1449 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1450 mode_rate, link_avail);
1451
1452 intel_link_compute_m_n(bpp, lane_count,
1453 adjusted_mode->crtc_clock,
1454 pipe_config->port_clock,
1455 &pipe_config->dp_m_n);
1456
1457 if (intel_connector->panel.downclock_mode != NULL &&
1458 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1459 pipe_config->has_drrs = true;
1460 intel_link_compute_m_n(bpp, lane_count,
1461 intel_connector->panel.downclock_mode->clock,
1462 pipe_config->port_clock,
1463 &pipe_config->dp_m2_n2);
1464 }
1465
1466 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1467 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1468 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1469 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1470 else
1471 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1472
1473 return true;
1474 }
1475
1476 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1477 {
1478 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1479 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1480 struct drm_device *dev = crtc->base.dev;
1481 struct drm_i915_private *dev_priv = dev->dev_private;
1482 u32 dpa_ctl;
1483
1484 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1485 crtc->config->port_clock);
1486 dpa_ctl = I915_READ(DP_A);
1487 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1488
1489 if (crtc->config->port_clock == 162000) {
1490 /* For a long time we've carried around a ILK-DevA w/a for the
1491 * 160MHz clock. If we're really unlucky, it's still required.
1492 */
1493 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1494 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1495 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1496 } else {
1497 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1498 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1499 }
1500
1501 I915_WRITE(DP_A, dpa_ctl);
1502
1503 POSTING_READ(DP_A);
1504 udelay(500);
1505 }
1506
1507 static void intel_dp_prepare(struct intel_encoder *encoder)
1508 {
1509 struct drm_device *dev = encoder->base.dev;
1510 struct drm_i915_private *dev_priv = dev->dev_private;
1511 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1512 enum port port = dp_to_dig_port(intel_dp)->port;
1513 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1514 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1515
1516 /*
1517 * There are four kinds of DP registers:
1518 *
1519 * IBX PCH
1520 * SNB CPU
1521 * IVB CPU
1522 * CPT PCH
1523 *
1524 * IBX PCH and CPU are the same for almost everything,
1525 * except that the CPU DP PLL is configured in this
1526 * register
1527 *
1528 * CPT PCH is quite different, having many bits moved
1529 * to the TRANS_DP_CTL register instead. That
1530 * configuration happens (oddly) in ironlake_pch_enable
1531 */
1532
1533 /* Preserve the BIOS-computed detected bit. This is
1534 * supposed to be read-only.
1535 */
1536 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1537
1538 /* Handle DP bits in common between all three register formats */
1539 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1540 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1541
1542 if (crtc->config->has_audio)
1543 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1544
1545 /* Split out the IBX/CPU vs CPT settings */
1546
1547 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1548 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1549 intel_dp->DP |= DP_SYNC_HS_HIGH;
1550 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1551 intel_dp->DP |= DP_SYNC_VS_HIGH;
1552 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1553
1554 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1555 intel_dp->DP |= DP_ENHANCED_FRAMING;
1556
1557 intel_dp->DP |= crtc->pipe << 29;
1558 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1559 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1560 intel_dp->DP |= intel_dp->color_range;
1561
1562 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1563 intel_dp->DP |= DP_SYNC_HS_HIGH;
1564 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1565 intel_dp->DP |= DP_SYNC_VS_HIGH;
1566 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1567
1568 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1569 intel_dp->DP |= DP_ENHANCED_FRAMING;
1570
1571 if (!IS_CHERRYVIEW(dev)) {
1572 if (crtc->pipe == 1)
1573 intel_dp->DP |= DP_PIPEB_SELECT;
1574 } else {
1575 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1576 }
1577 } else {
1578 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1579 }
1580 }
1581
1582 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1583 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1584
1585 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1586 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1587
1588 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1589 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1590
1591 static void wait_panel_status(struct intel_dp *intel_dp,
1592 u32 mask,
1593 u32 value)
1594 {
1595 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1596 struct drm_i915_private *dev_priv = dev->dev_private;
1597 u32 pp_stat_reg, pp_ctrl_reg;
1598
1599 lockdep_assert_held(&dev_priv->pps_mutex);
1600
1601 pp_stat_reg = _pp_stat_reg(intel_dp);
1602 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1603
1604 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1605 mask, value,
1606 I915_READ(pp_stat_reg),
1607 I915_READ(pp_ctrl_reg));
1608
1609 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1610 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1611 I915_READ(pp_stat_reg),
1612 I915_READ(pp_ctrl_reg));
1613 }
1614
1615 DRM_DEBUG_KMS("Wait complete\n");
1616 }
1617
1618 static void wait_panel_on(struct intel_dp *intel_dp)
1619 {
1620 DRM_DEBUG_KMS("Wait for panel power on\n");
1621 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1622 }
1623
1624 static void wait_panel_off(struct intel_dp *intel_dp)
1625 {
1626 DRM_DEBUG_KMS("Wait for panel power off time\n");
1627 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1628 }
1629
1630 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1631 {
1632 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1633
1634 /* When we disable the VDD override bit last we have to do the manual
1635 * wait. */
1636 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1637 intel_dp->panel_power_cycle_delay);
1638
1639 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1640 }
1641
1642 static void wait_backlight_on(struct intel_dp *intel_dp)
1643 {
1644 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1645 intel_dp->backlight_on_delay);
1646 }
1647
1648 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1649 {
1650 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1651 intel_dp->backlight_off_delay);
1652 }
1653
1654 /* Read the current pp_control value, unlocking the register if it
1655 * is locked
1656 */
1657
1658 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1659 {
1660 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1661 struct drm_i915_private *dev_priv = dev->dev_private;
1662 u32 control;
1663
1664 lockdep_assert_held(&dev_priv->pps_mutex);
1665
1666 control = I915_READ(_pp_ctrl_reg(intel_dp));
1667 control &= ~PANEL_UNLOCK_MASK;
1668 control |= PANEL_UNLOCK_REGS;
1669 return control;
1670 }
1671
1672 /*
1673 * Must be paired with edp_panel_vdd_off().
1674 * Must hold pps_mutex around the whole on/off sequence.
1675 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1676 */
1677 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1678 {
1679 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1680 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1681 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1682 struct drm_i915_private *dev_priv = dev->dev_private;
1683 enum intel_display_power_domain power_domain;
1684 u32 pp;
1685 u32 pp_stat_reg, pp_ctrl_reg;
1686 bool need_to_disable = !intel_dp->want_panel_vdd;
1687
1688 lockdep_assert_held(&dev_priv->pps_mutex);
1689
1690 if (!is_edp(intel_dp))
1691 return false;
1692
1693 cancel_delayed_work(&intel_dp->panel_vdd_work);
1694 intel_dp->want_panel_vdd = true;
1695
1696 if (edp_have_panel_vdd(intel_dp))
1697 return need_to_disable;
1698
1699 power_domain = intel_display_port_power_domain(intel_encoder);
1700 intel_display_power_get(dev_priv, power_domain);
1701
1702 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1703 port_name(intel_dig_port->port));
1704
1705 if (!edp_have_panel_power(intel_dp))
1706 wait_panel_power_cycle(intel_dp);
1707
1708 pp = ironlake_get_pp_control(intel_dp);
1709 pp |= EDP_FORCE_VDD;
1710
1711 pp_stat_reg = _pp_stat_reg(intel_dp);
1712 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1713
1714 I915_WRITE(pp_ctrl_reg, pp);
1715 POSTING_READ(pp_ctrl_reg);
1716 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1717 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1718 /*
1719 * If the panel wasn't on, delay before accessing aux channel
1720 */
1721 if (!edp_have_panel_power(intel_dp)) {
1722 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1723 port_name(intel_dig_port->port));
1724 msleep(intel_dp->panel_power_up_delay);
1725 }
1726
1727 return need_to_disable;
1728 }
1729
1730 /*
1731 * Must be paired with intel_edp_panel_vdd_off() or
1732 * intel_edp_panel_off().
1733 * Nested calls to these functions are not allowed since
1734 * we drop the lock. Caller must use some higher level
1735 * locking to prevent nested calls from other threads.
1736 */
1737 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1738 {
1739 bool vdd;
1740
1741 if (!is_edp(intel_dp))
1742 return;
1743
1744 pps_lock(intel_dp);
1745 vdd = edp_panel_vdd_on(intel_dp);
1746 pps_unlock(intel_dp);
1747
1748 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1749 port_name(dp_to_dig_port(intel_dp)->port));
1750 }
1751
1752 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1753 {
1754 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1755 struct drm_i915_private *dev_priv = dev->dev_private;
1756 struct intel_digital_port *intel_dig_port =
1757 dp_to_dig_port(intel_dp);
1758 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1759 enum intel_display_power_domain power_domain;
1760 u32 pp;
1761 u32 pp_stat_reg, pp_ctrl_reg;
1762
1763 lockdep_assert_held(&dev_priv->pps_mutex);
1764
1765 WARN_ON(intel_dp->want_panel_vdd);
1766
1767 if (!edp_have_panel_vdd(intel_dp))
1768 return;
1769
1770 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1771 port_name(intel_dig_port->port));
1772
1773 pp = ironlake_get_pp_control(intel_dp);
1774 pp &= ~EDP_FORCE_VDD;
1775
1776 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1777 pp_stat_reg = _pp_stat_reg(intel_dp);
1778
1779 I915_WRITE(pp_ctrl_reg, pp);
1780 POSTING_READ(pp_ctrl_reg);
1781
1782 /* Make sure sequencer is idle before allowing subsequent activity */
1783 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1784 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1785
1786 if ((pp & POWER_TARGET_ON) == 0)
1787 intel_dp->last_power_cycle = jiffies;
1788
1789 power_domain = intel_display_port_power_domain(intel_encoder);
1790 intel_display_power_put(dev_priv, power_domain);
1791 }
1792
1793 static void edp_panel_vdd_work(struct work_struct *__work)
1794 {
1795 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1796 struct intel_dp, panel_vdd_work);
1797
1798 pps_lock(intel_dp);
1799 if (!intel_dp->want_panel_vdd)
1800 edp_panel_vdd_off_sync(intel_dp);
1801 pps_unlock(intel_dp);
1802 }
1803
1804 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1805 {
1806 unsigned long delay;
1807
1808 /*
1809 * Queue the timer to fire a long time from now (relative to the power
1810 * down delay) to keep the panel power up across a sequence of
1811 * operations.
1812 */
1813 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1814 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1815 }
1816
1817 /*
1818 * Must be paired with edp_panel_vdd_on().
1819 * Must hold pps_mutex around the whole on/off sequence.
1820 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1821 */
1822 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1823 {
1824 struct drm_i915_private *dev_priv =
1825 intel_dp_to_dev(intel_dp)->dev_private;
1826
1827 lockdep_assert_held(&dev_priv->pps_mutex);
1828
1829 if (!is_edp(intel_dp))
1830 return;
1831
1832 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1833 port_name(dp_to_dig_port(intel_dp)->port));
1834
1835 intel_dp->want_panel_vdd = false;
1836
1837 if (sync)
1838 edp_panel_vdd_off_sync(intel_dp);
1839 else
1840 edp_panel_vdd_schedule_off(intel_dp);
1841 }
1842
1843 static void edp_panel_on(struct intel_dp *intel_dp)
1844 {
1845 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1846 struct drm_i915_private *dev_priv = dev->dev_private;
1847 u32 pp;
1848 u32 pp_ctrl_reg;
1849
1850 lockdep_assert_held(&dev_priv->pps_mutex);
1851
1852 if (!is_edp(intel_dp))
1853 return;
1854
1855 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1856 port_name(dp_to_dig_port(intel_dp)->port));
1857
1858 if (WARN(edp_have_panel_power(intel_dp),
1859 "eDP port %c panel power already on\n",
1860 port_name(dp_to_dig_port(intel_dp)->port)))
1861 return;
1862
1863 wait_panel_power_cycle(intel_dp);
1864
1865 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1866 pp = ironlake_get_pp_control(intel_dp);
1867 if (IS_GEN5(dev)) {
1868 /* ILK workaround: disable reset around power sequence */
1869 pp &= ~PANEL_POWER_RESET;
1870 I915_WRITE(pp_ctrl_reg, pp);
1871 POSTING_READ(pp_ctrl_reg);
1872 }
1873
1874 pp |= POWER_TARGET_ON;
1875 if (!IS_GEN5(dev))
1876 pp |= PANEL_POWER_RESET;
1877
1878 I915_WRITE(pp_ctrl_reg, pp);
1879 POSTING_READ(pp_ctrl_reg);
1880
1881 wait_panel_on(intel_dp);
1882 intel_dp->last_power_on = jiffies;
1883
1884 if (IS_GEN5(dev)) {
1885 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1886 I915_WRITE(pp_ctrl_reg, pp);
1887 POSTING_READ(pp_ctrl_reg);
1888 }
1889 }
1890
1891 void intel_edp_panel_on(struct intel_dp *intel_dp)
1892 {
1893 if (!is_edp(intel_dp))
1894 return;
1895
1896 pps_lock(intel_dp);
1897 edp_panel_on(intel_dp);
1898 pps_unlock(intel_dp);
1899 }
1900
1901
1902 static void edp_panel_off(struct intel_dp *intel_dp)
1903 {
1904 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1905 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1906 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1907 struct drm_i915_private *dev_priv = dev->dev_private;
1908 enum intel_display_power_domain power_domain;
1909 u32 pp;
1910 u32 pp_ctrl_reg;
1911
1912 lockdep_assert_held(&dev_priv->pps_mutex);
1913
1914 if (!is_edp(intel_dp))
1915 return;
1916
1917 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1918 port_name(dp_to_dig_port(intel_dp)->port));
1919
1920 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1921 port_name(dp_to_dig_port(intel_dp)->port));
1922
1923 pp = ironlake_get_pp_control(intel_dp);
1924 /* We need to switch off panel power _and_ force vdd, for otherwise some
1925 * panels get very unhappy and cease to work. */
1926 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1927 EDP_BLC_ENABLE);
1928
1929 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1930
1931 intel_dp->want_panel_vdd = false;
1932
1933 I915_WRITE(pp_ctrl_reg, pp);
1934 POSTING_READ(pp_ctrl_reg);
1935
1936 intel_dp->last_power_cycle = jiffies;
1937 wait_panel_off(intel_dp);
1938
1939 /* We got a reference when we enabled the VDD. */
1940 power_domain = intel_display_port_power_domain(intel_encoder);
1941 intel_display_power_put(dev_priv, power_domain);
1942 }
1943
1944 void intel_edp_panel_off(struct intel_dp *intel_dp)
1945 {
1946 if (!is_edp(intel_dp))
1947 return;
1948
1949 pps_lock(intel_dp);
1950 edp_panel_off(intel_dp);
1951 pps_unlock(intel_dp);
1952 }
1953
1954 /* Enable backlight in the panel power control. */
1955 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1956 {
1957 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1958 struct drm_device *dev = intel_dig_port->base.base.dev;
1959 struct drm_i915_private *dev_priv = dev->dev_private;
1960 u32 pp;
1961 u32 pp_ctrl_reg;
1962
1963 /*
1964 * If we enable the backlight right away following a panel power
1965 * on, we may see slight flicker as the panel syncs with the eDP
1966 * link. So delay a bit to make sure the image is solid before
1967 * allowing it to appear.
1968 */
1969 wait_backlight_on(intel_dp);
1970
1971 pps_lock(intel_dp);
1972
1973 pp = ironlake_get_pp_control(intel_dp);
1974 pp |= EDP_BLC_ENABLE;
1975
1976 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1977
1978 I915_WRITE(pp_ctrl_reg, pp);
1979 POSTING_READ(pp_ctrl_reg);
1980
1981 pps_unlock(intel_dp);
1982 }
1983
1984 /* Enable backlight PWM and backlight PP control. */
1985 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1986 {
1987 if (!is_edp(intel_dp))
1988 return;
1989
1990 DRM_DEBUG_KMS("\n");
1991
1992 intel_panel_enable_backlight(intel_dp->attached_connector);
1993 _intel_edp_backlight_on(intel_dp);
1994 }
1995
1996 /* Disable backlight in the panel power control. */
1997 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1998 {
1999 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2000 struct drm_i915_private *dev_priv = dev->dev_private;
2001 u32 pp;
2002 u32 pp_ctrl_reg;
2003
2004 if (!is_edp(intel_dp))
2005 return;
2006
2007 pps_lock(intel_dp);
2008
2009 pp = ironlake_get_pp_control(intel_dp);
2010 pp &= ~EDP_BLC_ENABLE;
2011
2012 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2013
2014 I915_WRITE(pp_ctrl_reg, pp);
2015 POSTING_READ(pp_ctrl_reg);
2016
2017 pps_unlock(intel_dp);
2018
2019 intel_dp->last_backlight_off = jiffies;
2020 edp_wait_backlight_off(intel_dp);
2021 }
2022
2023 /* Disable backlight PP control and backlight PWM. */
2024 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2025 {
2026 if (!is_edp(intel_dp))
2027 return;
2028
2029 DRM_DEBUG_KMS("\n");
2030
2031 _intel_edp_backlight_off(intel_dp);
2032 intel_panel_disable_backlight(intel_dp->attached_connector);
2033 }
2034
2035 /*
2036 * Hook for controlling the panel power control backlight through the bl_power
2037 * sysfs attribute. Take care to handle multiple calls.
2038 */
2039 static void intel_edp_backlight_power(struct intel_connector *connector,
2040 bool enable)
2041 {
2042 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2043 bool is_enabled;
2044
2045 pps_lock(intel_dp);
2046 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2047 pps_unlock(intel_dp);
2048
2049 if (is_enabled == enable)
2050 return;
2051
2052 DRM_DEBUG_KMS("panel power control backlight %s\n",
2053 enable ? "enable" : "disable");
2054
2055 if (enable)
2056 _intel_edp_backlight_on(intel_dp);
2057 else
2058 _intel_edp_backlight_off(intel_dp);
2059 }
2060
2061 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2062 {
2063 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2064 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2065 struct drm_device *dev = crtc->dev;
2066 struct drm_i915_private *dev_priv = dev->dev_private;
2067 u32 dpa_ctl;
2068
2069 assert_pipe_disabled(dev_priv,
2070 to_intel_crtc(crtc)->pipe);
2071
2072 DRM_DEBUG_KMS("\n");
2073 dpa_ctl = I915_READ(DP_A);
2074 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2075 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2076
2077 /* We don't adjust intel_dp->DP while tearing down the link, to
2078 * facilitate link retraining (e.g. after hotplug). Hence clear all
2079 * enable bits here to ensure that we don't enable too much. */
2080 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2081 intel_dp->DP |= DP_PLL_ENABLE;
2082 I915_WRITE(DP_A, intel_dp->DP);
2083 POSTING_READ(DP_A);
2084 udelay(200);
2085 }
2086
2087 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2088 {
2089 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2090 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2091 struct drm_device *dev = crtc->dev;
2092 struct drm_i915_private *dev_priv = dev->dev_private;
2093 u32 dpa_ctl;
2094
2095 assert_pipe_disabled(dev_priv,
2096 to_intel_crtc(crtc)->pipe);
2097
2098 dpa_ctl = I915_READ(DP_A);
2099 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2100 "dp pll off, should be on\n");
2101 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2102
2103 /* We can't rely on the value tracked for the DP register in
2104 * intel_dp->DP because link_down must not change that (otherwise link
2105 * re-training will fail. */
2106 dpa_ctl &= ~DP_PLL_ENABLE;
2107 I915_WRITE(DP_A, dpa_ctl);
2108 POSTING_READ(DP_A);
2109 udelay(200);
2110 }
2111
2112 /* If the sink supports it, try to set the power state appropriately */
2113 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2114 {
2115 int ret, i;
2116
2117 /* Should have a valid DPCD by this point */
2118 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2119 return;
2120
2121 if (mode != DRM_MODE_DPMS_ON) {
2122 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2123 DP_SET_POWER_D3);
2124 } else {
2125 /*
2126 * When turning on, we need to retry for 1ms to give the sink
2127 * time to wake up.
2128 */
2129 for (i = 0; i < 3; i++) {
2130 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2131 DP_SET_POWER_D0);
2132 if (ret == 1)
2133 break;
2134 msleep(1);
2135 }
2136 }
2137
2138 if (ret != 1)
2139 DRM_DEBUG_KMS("failed to %s sink power state\n",
2140 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2141 }
2142
2143 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2144 enum pipe *pipe)
2145 {
2146 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2147 enum port port = dp_to_dig_port(intel_dp)->port;
2148 struct drm_device *dev = encoder->base.dev;
2149 struct drm_i915_private *dev_priv = dev->dev_private;
2150 enum intel_display_power_domain power_domain;
2151 u32 tmp;
2152
2153 power_domain = intel_display_port_power_domain(encoder);
2154 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2155 return false;
2156
2157 tmp = I915_READ(intel_dp->output_reg);
2158
2159 if (!(tmp & DP_PORT_EN))
2160 return false;
2161
2162 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2163 *pipe = PORT_TO_PIPE_CPT(tmp);
2164 } else if (IS_CHERRYVIEW(dev)) {
2165 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2166 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2167 *pipe = PORT_TO_PIPE(tmp);
2168 } else {
2169 u32 trans_sel;
2170 u32 trans_dp;
2171 int i;
2172
2173 switch (intel_dp->output_reg) {
2174 case PCH_DP_B:
2175 trans_sel = TRANS_DP_PORT_SEL_B;
2176 break;
2177 case PCH_DP_C:
2178 trans_sel = TRANS_DP_PORT_SEL_C;
2179 break;
2180 case PCH_DP_D:
2181 trans_sel = TRANS_DP_PORT_SEL_D;
2182 break;
2183 default:
2184 return true;
2185 }
2186
2187 for_each_pipe(dev_priv, i) {
2188 trans_dp = I915_READ(TRANS_DP_CTL(i));
2189 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2190 *pipe = i;
2191 return true;
2192 }
2193 }
2194
2195 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2196 intel_dp->output_reg);
2197 }
2198
2199 return true;
2200 }
2201
2202 static void intel_dp_get_config(struct intel_encoder *encoder,
2203 struct intel_crtc_state *pipe_config)
2204 {
2205 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2206 u32 tmp, flags = 0;
2207 struct drm_device *dev = encoder->base.dev;
2208 struct drm_i915_private *dev_priv = dev->dev_private;
2209 enum port port = dp_to_dig_port(intel_dp)->port;
2210 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2211 int dotclock;
2212
2213 tmp = I915_READ(intel_dp->output_reg);
2214
2215 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2216
2217 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2218 if (tmp & DP_SYNC_HS_HIGH)
2219 flags |= DRM_MODE_FLAG_PHSYNC;
2220 else
2221 flags |= DRM_MODE_FLAG_NHSYNC;
2222
2223 if (tmp & DP_SYNC_VS_HIGH)
2224 flags |= DRM_MODE_FLAG_PVSYNC;
2225 else
2226 flags |= DRM_MODE_FLAG_NVSYNC;
2227 } else {
2228 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2229 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2230 flags |= DRM_MODE_FLAG_PHSYNC;
2231 else
2232 flags |= DRM_MODE_FLAG_NHSYNC;
2233
2234 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2235 flags |= DRM_MODE_FLAG_PVSYNC;
2236 else
2237 flags |= DRM_MODE_FLAG_NVSYNC;
2238 }
2239
2240 pipe_config->base.adjusted_mode.flags |= flags;
2241
2242 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2243 tmp & DP_COLOR_RANGE_16_235)
2244 pipe_config->limited_color_range = true;
2245
2246 pipe_config->has_dp_encoder = true;
2247
2248 intel_dp_get_m_n(crtc, pipe_config);
2249
2250 if (port == PORT_A) {
2251 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2252 pipe_config->port_clock = 162000;
2253 else
2254 pipe_config->port_clock = 270000;
2255 }
2256
2257 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2258 &pipe_config->dp_m_n);
2259
2260 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2261 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2262
2263 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2264
2265 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2266 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2267 /*
2268 * This is a big fat ugly hack.
2269 *
2270 * Some machines in UEFI boot mode provide us a VBT that has 18
2271 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2272 * unknown we fail to light up. Yet the same BIOS boots up with
2273 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2274 * max, not what it tells us to use.
2275 *
2276 * Note: This will still be broken if the eDP panel is not lit
2277 * up by the BIOS, and thus we can't get the mode at module
2278 * load.
2279 */
2280 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2281 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2282 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2283 }
2284 }
2285
2286 static void intel_disable_dp(struct intel_encoder *encoder)
2287 {
2288 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2289 struct drm_device *dev = encoder->base.dev;
2290 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2291
2292 if (crtc->config->has_audio)
2293 intel_audio_codec_disable(encoder);
2294
2295 if (HAS_PSR(dev) && !HAS_DDI(dev))
2296 intel_psr_disable(intel_dp);
2297
2298 /* Make sure the panel is off before trying to change the mode. But also
2299 * ensure that we have vdd while we switch off the panel. */
2300 intel_edp_panel_vdd_on(intel_dp);
2301 intel_edp_backlight_off(intel_dp);
2302 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2303 intel_edp_panel_off(intel_dp);
2304
2305 /* disable the port before the pipe on g4x */
2306 if (INTEL_INFO(dev)->gen < 5)
2307 intel_dp_link_down(intel_dp);
2308 }
2309
2310 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2311 {
2312 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2313 enum port port = dp_to_dig_port(intel_dp)->port;
2314
2315 intel_dp_link_down(intel_dp);
2316 if (port == PORT_A)
2317 ironlake_edp_pll_off(intel_dp);
2318 }
2319
2320 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2321 {
2322 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2323
2324 intel_dp_link_down(intel_dp);
2325 }
2326
2327 static void chv_post_disable_dp(struct intel_encoder *encoder)
2328 {
2329 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2330 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2331 struct drm_device *dev = encoder->base.dev;
2332 struct drm_i915_private *dev_priv = dev->dev_private;
2333 struct intel_crtc *intel_crtc =
2334 to_intel_crtc(encoder->base.crtc);
2335 enum dpio_channel ch = vlv_dport_to_channel(dport);
2336 enum pipe pipe = intel_crtc->pipe;
2337 u32 val;
2338
2339 intel_dp_link_down(intel_dp);
2340
2341 mutex_lock(&dev_priv->dpio_lock);
2342
2343 /* Propagate soft reset to data lane reset */
2344 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2345 val |= CHV_PCS_REQ_SOFTRESET_EN;
2346 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2347
2348 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2349 val |= CHV_PCS_REQ_SOFTRESET_EN;
2350 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2351
2352 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2353 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2354 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2355
2356 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2357 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2358 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2359
2360 mutex_unlock(&dev_priv->dpio_lock);
2361 }
2362
2363 static void
2364 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2365 uint32_t *DP,
2366 uint8_t dp_train_pat)
2367 {
2368 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2369 struct drm_device *dev = intel_dig_port->base.base.dev;
2370 struct drm_i915_private *dev_priv = dev->dev_private;
2371 enum port port = intel_dig_port->port;
2372
2373 if (HAS_DDI(dev)) {
2374 uint32_t temp = I915_READ(DP_TP_CTL(port));
2375
2376 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2377 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2378 else
2379 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2380
2381 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2382 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2383 case DP_TRAINING_PATTERN_DISABLE:
2384 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2385
2386 break;
2387 case DP_TRAINING_PATTERN_1:
2388 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2389 break;
2390 case DP_TRAINING_PATTERN_2:
2391 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2392 break;
2393 case DP_TRAINING_PATTERN_3:
2394 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2395 break;
2396 }
2397 I915_WRITE(DP_TP_CTL(port), temp);
2398
2399 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2400 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2401
2402 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2403 case DP_TRAINING_PATTERN_DISABLE:
2404 *DP |= DP_LINK_TRAIN_OFF_CPT;
2405 break;
2406 case DP_TRAINING_PATTERN_1:
2407 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2408 break;
2409 case DP_TRAINING_PATTERN_2:
2410 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2411 break;
2412 case DP_TRAINING_PATTERN_3:
2413 DRM_ERROR("DP training pattern 3 not supported\n");
2414 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2415 break;
2416 }
2417
2418 } else {
2419 if (IS_CHERRYVIEW(dev))
2420 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2421 else
2422 *DP &= ~DP_LINK_TRAIN_MASK;
2423
2424 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2425 case DP_TRAINING_PATTERN_DISABLE:
2426 *DP |= DP_LINK_TRAIN_OFF;
2427 break;
2428 case DP_TRAINING_PATTERN_1:
2429 *DP |= DP_LINK_TRAIN_PAT_1;
2430 break;
2431 case DP_TRAINING_PATTERN_2:
2432 *DP |= DP_LINK_TRAIN_PAT_2;
2433 break;
2434 case DP_TRAINING_PATTERN_3:
2435 if (IS_CHERRYVIEW(dev)) {
2436 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2437 } else {
2438 DRM_ERROR("DP training pattern 3 not supported\n");
2439 *DP |= DP_LINK_TRAIN_PAT_2;
2440 }
2441 break;
2442 }
2443 }
2444 }
2445
2446 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2447 {
2448 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2449 struct drm_i915_private *dev_priv = dev->dev_private;
2450
2451 /* enable with pattern 1 (as per spec) */
2452 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2453 DP_TRAINING_PATTERN_1);
2454
2455 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2456 POSTING_READ(intel_dp->output_reg);
2457
2458 /*
2459 * Magic for VLV/CHV. We _must_ first set up the register
2460 * without actually enabling the port, and then do another
2461 * write to enable the port. Otherwise link training will
2462 * fail when the power sequencer is freshly used for this port.
2463 */
2464 intel_dp->DP |= DP_PORT_EN;
2465
2466 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2467 POSTING_READ(intel_dp->output_reg);
2468 }
2469
2470 static void intel_enable_dp(struct intel_encoder *encoder)
2471 {
2472 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2473 struct drm_device *dev = encoder->base.dev;
2474 struct drm_i915_private *dev_priv = dev->dev_private;
2475 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2476 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2477
2478 if (WARN_ON(dp_reg & DP_PORT_EN))
2479 return;
2480
2481 pps_lock(intel_dp);
2482
2483 if (IS_VALLEYVIEW(dev))
2484 vlv_init_panel_power_sequencer(intel_dp);
2485
2486 intel_dp_enable_port(intel_dp);
2487
2488 edp_panel_vdd_on(intel_dp);
2489 edp_panel_on(intel_dp);
2490 edp_panel_vdd_off(intel_dp, true);
2491
2492 pps_unlock(intel_dp);
2493
2494 if (IS_VALLEYVIEW(dev))
2495 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2496
2497 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2498 intel_dp_start_link_train(intel_dp);
2499 intel_dp_complete_link_train(intel_dp);
2500 intel_dp_stop_link_train(intel_dp);
2501
2502 if (crtc->config->has_audio) {
2503 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2504 pipe_name(crtc->pipe));
2505 intel_audio_codec_enable(encoder);
2506 }
2507 }
2508
2509 static void g4x_enable_dp(struct intel_encoder *encoder)
2510 {
2511 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2512
2513 intel_enable_dp(encoder);
2514 intel_edp_backlight_on(intel_dp);
2515 }
2516
2517 static void vlv_enable_dp(struct intel_encoder *encoder)
2518 {
2519 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2520
2521 intel_edp_backlight_on(intel_dp);
2522 intel_psr_enable(intel_dp);
2523 }
2524
2525 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2526 {
2527 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2528 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2529
2530 intel_dp_prepare(encoder);
2531
2532 /* Only ilk+ has port A */
2533 if (dport->port == PORT_A) {
2534 ironlake_set_pll_cpu_edp(intel_dp);
2535 ironlake_edp_pll_on(intel_dp);
2536 }
2537 }
2538
2539 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2540 {
2541 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2542 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2543 enum pipe pipe = intel_dp->pps_pipe;
2544 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2545
2546 edp_panel_vdd_off_sync(intel_dp);
2547
2548 /*
2549 * VLV seems to get confused when multiple power seqeuencers
2550 * have the same port selected (even if only one has power/vdd
2551 * enabled). The failure manifests as vlv_wait_port_ready() failing
2552 * CHV on the other hand doesn't seem to mind having the same port
2553 * selected in multiple power seqeuencers, but let's clear the
2554 * port select always when logically disconnecting a power sequencer
2555 * from a port.
2556 */
2557 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2558 pipe_name(pipe), port_name(intel_dig_port->port));
2559 I915_WRITE(pp_on_reg, 0);
2560 POSTING_READ(pp_on_reg);
2561
2562 intel_dp->pps_pipe = INVALID_PIPE;
2563 }
2564
2565 static void vlv_steal_power_sequencer(struct drm_device *dev,
2566 enum pipe pipe)
2567 {
2568 struct drm_i915_private *dev_priv = dev->dev_private;
2569 struct intel_encoder *encoder;
2570
2571 lockdep_assert_held(&dev_priv->pps_mutex);
2572
2573 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2574 return;
2575
2576 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2577 base.head) {
2578 struct intel_dp *intel_dp;
2579 enum port port;
2580
2581 if (encoder->type != INTEL_OUTPUT_EDP)
2582 continue;
2583
2584 intel_dp = enc_to_intel_dp(&encoder->base);
2585 port = dp_to_dig_port(intel_dp)->port;
2586
2587 if (intel_dp->pps_pipe != pipe)
2588 continue;
2589
2590 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2591 pipe_name(pipe), port_name(port));
2592
2593 WARN(encoder->connectors_active,
2594 "stealing pipe %c power sequencer from active eDP port %c\n",
2595 pipe_name(pipe), port_name(port));
2596
2597 /* make sure vdd is off before we steal it */
2598 vlv_detach_power_sequencer(intel_dp);
2599 }
2600 }
2601
2602 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2603 {
2604 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2605 struct intel_encoder *encoder = &intel_dig_port->base;
2606 struct drm_device *dev = encoder->base.dev;
2607 struct drm_i915_private *dev_priv = dev->dev_private;
2608 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2609
2610 lockdep_assert_held(&dev_priv->pps_mutex);
2611
2612 if (!is_edp(intel_dp))
2613 return;
2614
2615 if (intel_dp->pps_pipe == crtc->pipe)
2616 return;
2617
2618 /*
2619 * If another power sequencer was being used on this
2620 * port previously make sure to turn off vdd there while
2621 * we still have control of it.
2622 */
2623 if (intel_dp->pps_pipe != INVALID_PIPE)
2624 vlv_detach_power_sequencer(intel_dp);
2625
2626 /*
2627 * We may be stealing the power
2628 * sequencer from another port.
2629 */
2630 vlv_steal_power_sequencer(dev, crtc->pipe);
2631
2632 /* now it's all ours */
2633 intel_dp->pps_pipe = crtc->pipe;
2634
2635 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2636 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2637
2638 /* init power sequencer on this pipe and port */
2639 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2640 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2641 }
2642
2643 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2644 {
2645 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2646 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2647 struct drm_device *dev = encoder->base.dev;
2648 struct drm_i915_private *dev_priv = dev->dev_private;
2649 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2650 enum dpio_channel port = vlv_dport_to_channel(dport);
2651 int pipe = intel_crtc->pipe;
2652 u32 val;
2653
2654 mutex_lock(&dev_priv->dpio_lock);
2655
2656 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2657 val = 0;
2658 if (pipe)
2659 val |= (1<<21);
2660 else
2661 val &= ~(1<<21);
2662 val |= 0x001000c4;
2663 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2664 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2665 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2666
2667 mutex_unlock(&dev_priv->dpio_lock);
2668
2669 intel_enable_dp(encoder);
2670 }
2671
2672 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2673 {
2674 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2675 struct drm_device *dev = encoder->base.dev;
2676 struct drm_i915_private *dev_priv = dev->dev_private;
2677 struct intel_crtc *intel_crtc =
2678 to_intel_crtc(encoder->base.crtc);
2679 enum dpio_channel port = vlv_dport_to_channel(dport);
2680 int pipe = intel_crtc->pipe;
2681
2682 intel_dp_prepare(encoder);
2683
2684 /* Program Tx lane resets to default */
2685 mutex_lock(&dev_priv->dpio_lock);
2686 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2687 DPIO_PCS_TX_LANE2_RESET |
2688 DPIO_PCS_TX_LANE1_RESET);
2689 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2690 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2691 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2692 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2693 DPIO_PCS_CLK_SOFT_RESET);
2694
2695 /* Fix up inter-pair skew failure */
2696 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2697 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2698 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2699 mutex_unlock(&dev_priv->dpio_lock);
2700 }
2701
2702 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2703 {
2704 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2705 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2706 struct drm_device *dev = encoder->base.dev;
2707 struct drm_i915_private *dev_priv = dev->dev_private;
2708 struct intel_crtc *intel_crtc =
2709 to_intel_crtc(encoder->base.crtc);
2710 enum dpio_channel ch = vlv_dport_to_channel(dport);
2711 int pipe = intel_crtc->pipe;
2712 int data, i;
2713 u32 val;
2714
2715 mutex_lock(&dev_priv->dpio_lock);
2716
2717 /* allow hardware to manage TX FIFO reset source */
2718 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2719 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2720 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2721
2722 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2723 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2724 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2725
2726 /* Deassert soft data lane reset*/
2727 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2728 val |= CHV_PCS_REQ_SOFTRESET_EN;
2729 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2730
2731 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2732 val |= CHV_PCS_REQ_SOFTRESET_EN;
2733 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2734
2735 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2736 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2737 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2738
2739 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2740 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2741 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2742
2743 /* Program Tx lane latency optimal setting*/
2744 for (i = 0; i < 4; i++) {
2745 /* Set the upar bit */
2746 data = (i == 1) ? 0x0 : 0x1;
2747 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2748 data << DPIO_UPAR_SHIFT);
2749 }
2750
2751 /* Data lane stagger programming */
2752 /* FIXME: Fix up value only after power analysis */
2753
2754 mutex_unlock(&dev_priv->dpio_lock);
2755
2756 intel_enable_dp(encoder);
2757 }
2758
2759 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2760 {
2761 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2762 struct drm_device *dev = encoder->base.dev;
2763 struct drm_i915_private *dev_priv = dev->dev_private;
2764 struct intel_crtc *intel_crtc =
2765 to_intel_crtc(encoder->base.crtc);
2766 enum dpio_channel ch = vlv_dport_to_channel(dport);
2767 enum pipe pipe = intel_crtc->pipe;
2768 u32 val;
2769
2770 intel_dp_prepare(encoder);
2771
2772 mutex_lock(&dev_priv->dpio_lock);
2773
2774 /* program left/right clock distribution */
2775 if (pipe != PIPE_B) {
2776 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2777 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2778 if (ch == DPIO_CH0)
2779 val |= CHV_BUFLEFTENA1_FORCE;
2780 if (ch == DPIO_CH1)
2781 val |= CHV_BUFRIGHTENA1_FORCE;
2782 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2783 } else {
2784 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2785 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2786 if (ch == DPIO_CH0)
2787 val |= CHV_BUFLEFTENA2_FORCE;
2788 if (ch == DPIO_CH1)
2789 val |= CHV_BUFRIGHTENA2_FORCE;
2790 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2791 }
2792
2793 /* program clock channel usage */
2794 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2795 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2796 if (pipe != PIPE_B)
2797 val &= ~CHV_PCS_USEDCLKCHANNEL;
2798 else
2799 val |= CHV_PCS_USEDCLKCHANNEL;
2800 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2801
2802 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2803 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2804 if (pipe != PIPE_B)
2805 val &= ~CHV_PCS_USEDCLKCHANNEL;
2806 else
2807 val |= CHV_PCS_USEDCLKCHANNEL;
2808 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2809
2810 /*
2811 * This a a bit weird since generally CL
2812 * matches the pipe, but here we need to
2813 * pick the CL based on the port.
2814 */
2815 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2816 if (pipe != PIPE_B)
2817 val &= ~CHV_CMN_USEDCLKCHANNEL;
2818 else
2819 val |= CHV_CMN_USEDCLKCHANNEL;
2820 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2821
2822 mutex_unlock(&dev_priv->dpio_lock);
2823 }
2824
2825 /*
2826 * Native read with retry for link status and receiver capability reads for
2827 * cases where the sink may still be asleep.
2828 *
2829 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2830 * supposed to retry 3 times per the spec.
2831 */
2832 static ssize_t
2833 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2834 void *buffer, size_t size)
2835 {
2836 ssize_t ret;
2837 int i;
2838
2839 /*
2840 * Sometime we just get the same incorrect byte repeated
2841 * over the entire buffer. Doing just one throw away read
2842 * initially seems to "solve" it.
2843 */
2844 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2845
2846 for (i = 0; i < 3; i++) {
2847 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2848 if (ret == size)
2849 return ret;
2850 msleep(1);
2851 }
2852
2853 return ret;
2854 }
2855
2856 /*
2857 * Fetch AUX CH registers 0x202 - 0x207 which contain
2858 * link status information
2859 */
2860 static bool
2861 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2862 {
2863 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2864 DP_LANE0_1_STATUS,
2865 link_status,
2866 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2867 }
2868
2869 /* These are source-specific values. */
2870 static uint8_t
2871 intel_dp_voltage_max(struct intel_dp *intel_dp)
2872 {
2873 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2874 struct drm_i915_private *dev_priv = dev->dev_private;
2875 enum port port = dp_to_dig_port(intel_dp)->port;
2876
2877 if (INTEL_INFO(dev)->gen >= 9) {
2878 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2879 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2880 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2881 } else if (IS_VALLEYVIEW(dev))
2882 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2883 else if (IS_GEN7(dev) && port == PORT_A)
2884 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2885 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2886 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2887 else
2888 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2889 }
2890
2891 static uint8_t
2892 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2893 {
2894 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2895 enum port port = dp_to_dig_port(intel_dp)->port;
2896
2897 if (INTEL_INFO(dev)->gen >= 9) {
2898 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2899 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2900 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2901 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2902 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2903 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2904 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2905 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2906 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2907 default:
2908 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2909 }
2910 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2911 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2912 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2913 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2914 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2915 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2916 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2917 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2918 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2919 default:
2920 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2921 }
2922 } else if (IS_VALLEYVIEW(dev)) {
2923 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2924 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2925 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2926 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2927 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2928 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2929 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2930 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2931 default:
2932 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2933 }
2934 } else if (IS_GEN7(dev) && port == PORT_A) {
2935 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2936 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2937 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2938 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2939 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2940 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2941 default:
2942 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2943 }
2944 } else {
2945 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2946 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2947 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2948 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2949 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2950 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2951 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2952 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2953 default:
2954 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2955 }
2956 }
2957 }
2958
2959 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2960 {
2961 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2962 struct drm_i915_private *dev_priv = dev->dev_private;
2963 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2964 struct intel_crtc *intel_crtc =
2965 to_intel_crtc(dport->base.base.crtc);
2966 unsigned long demph_reg_value, preemph_reg_value,
2967 uniqtranscale_reg_value;
2968 uint8_t train_set = intel_dp->train_set[0];
2969 enum dpio_channel port = vlv_dport_to_channel(dport);
2970 int pipe = intel_crtc->pipe;
2971
2972 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2973 case DP_TRAIN_PRE_EMPH_LEVEL_0:
2974 preemph_reg_value = 0x0004000;
2975 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2976 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2977 demph_reg_value = 0x2B405555;
2978 uniqtranscale_reg_value = 0x552AB83A;
2979 break;
2980 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2981 demph_reg_value = 0x2B404040;
2982 uniqtranscale_reg_value = 0x5548B83A;
2983 break;
2984 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2985 demph_reg_value = 0x2B245555;
2986 uniqtranscale_reg_value = 0x5560B83A;
2987 break;
2988 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2989 demph_reg_value = 0x2B405555;
2990 uniqtranscale_reg_value = 0x5598DA3A;
2991 break;
2992 default:
2993 return 0;
2994 }
2995 break;
2996 case DP_TRAIN_PRE_EMPH_LEVEL_1:
2997 preemph_reg_value = 0x0002000;
2998 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2999 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3000 demph_reg_value = 0x2B404040;
3001 uniqtranscale_reg_value = 0x5552B83A;
3002 break;
3003 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3004 demph_reg_value = 0x2B404848;
3005 uniqtranscale_reg_value = 0x5580B83A;
3006 break;
3007 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3008 demph_reg_value = 0x2B404040;
3009 uniqtranscale_reg_value = 0x55ADDA3A;
3010 break;
3011 default:
3012 return 0;
3013 }
3014 break;
3015 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3016 preemph_reg_value = 0x0000000;
3017 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3018 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3019 demph_reg_value = 0x2B305555;
3020 uniqtranscale_reg_value = 0x5570B83A;
3021 break;
3022 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3023 demph_reg_value = 0x2B2B4040;
3024 uniqtranscale_reg_value = 0x55ADDA3A;
3025 break;
3026 default:
3027 return 0;
3028 }
3029 break;
3030 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3031 preemph_reg_value = 0x0006000;
3032 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3034 demph_reg_value = 0x1B405555;
3035 uniqtranscale_reg_value = 0x55ADDA3A;
3036 break;
3037 default:
3038 return 0;
3039 }
3040 break;
3041 default:
3042 return 0;
3043 }
3044
3045 mutex_lock(&dev_priv->dpio_lock);
3046 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3047 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3048 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3049 uniqtranscale_reg_value);
3050 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3051 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3052 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3053 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3054 mutex_unlock(&dev_priv->dpio_lock);
3055
3056 return 0;
3057 }
3058
3059 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3060 {
3061 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3062 struct drm_i915_private *dev_priv = dev->dev_private;
3063 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3064 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3065 u32 deemph_reg_value, margin_reg_value, val;
3066 uint8_t train_set = intel_dp->train_set[0];
3067 enum dpio_channel ch = vlv_dport_to_channel(dport);
3068 enum pipe pipe = intel_crtc->pipe;
3069 int i;
3070
3071 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3072 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3073 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3074 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3075 deemph_reg_value = 128;
3076 margin_reg_value = 52;
3077 break;
3078 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3079 deemph_reg_value = 128;
3080 margin_reg_value = 77;
3081 break;
3082 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3083 deemph_reg_value = 128;
3084 margin_reg_value = 102;
3085 break;
3086 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3087 deemph_reg_value = 128;
3088 margin_reg_value = 154;
3089 /* FIXME extra to set for 1200 */
3090 break;
3091 default:
3092 return 0;
3093 }
3094 break;
3095 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3096 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3097 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3098 deemph_reg_value = 85;
3099 margin_reg_value = 78;
3100 break;
3101 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3102 deemph_reg_value = 85;
3103 margin_reg_value = 116;
3104 break;
3105 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3106 deemph_reg_value = 85;
3107 margin_reg_value = 154;
3108 break;
3109 default:
3110 return 0;
3111 }
3112 break;
3113 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3114 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3115 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3116 deemph_reg_value = 64;
3117 margin_reg_value = 104;
3118 break;
3119 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3120 deemph_reg_value = 64;
3121 margin_reg_value = 154;
3122 break;
3123 default:
3124 return 0;
3125 }
3126 break;
3127 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3128 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3130 deemph_reg_value = 43;
3131 margin_reg_value = 154;
3132 break;
3133 default:
3134 return 0;
3135 }
3136 break;
3137 default:
3138 return 0;
3139 }
3140
3141 mutex_lock(&dev_priv->dpio_lock);
3142
3143 /* Clear calc init */
3144 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3145 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3146 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3147 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3148 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3149
3150 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3151 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3152 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3153 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3154 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3155
3156 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3157 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3158 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3159 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3160
3161 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3162 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3163 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3164 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3165
3166 /* Program swing deemph */
3167 for (i = 0; i < 4; i++) {
3168 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3169 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3170 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3171 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3172 }
3173
3174 /* Program swing margin */
3175 for (i = 0; i < 4; i++) {
3176 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3177 val &= ~DPIO_SWING_MARGIN000_MASK;
3178 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3179 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3180 }
3181
3182 /* Disable unique transition scale */
3183 for (i = 0; i < 4; i++) {
3184 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3185 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3186 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3187 }
3188
3189 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3190 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3191 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3192 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3193
3194 /*
3195 * The document said it needs to set bit 27 for ch0 and bit 26
3196 * for ch1. Might be a typo in the doc.
3197 * For now, for this unique transition scale selection, set bit
3198 * 27 for ch0 and ch1.
3199 */
3200 for (i = 0; i < 4; i++) {
3201 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3202 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3203 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3204 }
3205
3206 for (i = 0; i < 4; i++) {
3207 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3208 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3209 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3210 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3211 }
3212 }
3213
3214 /* Start swing calculation */
3215 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3216 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3217 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3218
3219 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3220 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3221 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3222
3223 /* LRC Bypass */
3224 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3225 val |= DPIO_LRC_BYPASS;
3226 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3227
3228 mutex_unlock(&dev_priv->dpio_lock);
3229
3230 return 0;
3231 }
3232
3233 static void
3234 intel_get_adjust_train(struct intel_dp *intel_dp,
3235 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3236 {
3237 uint8_t v = 0;
3238 uint8_t p = 0;
3239 int lane;
3240 uint8_t voltage_max;
3241 uint8_t preemph_max;
3242
3243 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3244 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3245 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3246
3247 if (this_v > v)
3248 v = this_v;
3249 if (this_p > p)
3250 p = this_p;
3251 }
3252
3253 voltage_max = intel_dp_voltage_max(intel_dp);
3254 if (v >= voltage_max)
3255 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3256
3257 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3258 if (p >= preemph_max)
3259 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3260
3261 for (lane = 0; lane < 4; lane++)
3262 intel_dp->train_set[lane] = v | p;
3263 }
3264
3265 static uint32_t
3266 intel_gen4_signal_levels(uint8_t train_set)
3267 {
3268 uint32_t signal_levels = 0;
3269
3270 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3272 default:
3273 signal_levels |= DP_VOLTAGE_0_4;
3274 break;
3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3276 signal_levels |= DP_VOLTAGE_0_6;
3277 break;
3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3279 signal_levels |= DP_VOLTAGE_0_8;
3280 break;
3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3282 signal_levels |= DP_VOLTAGE_1_2;
3283 break;
3284 }
3285 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3286 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3287 default:
3288 signal_levels |= DP_PRE_EMPHASIS_0;
3289 break;
3290 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3291 signal_levels |= DP_PRE_EMPHASIS_3_5;
3292 break;
3293 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3294 signal_levels |= DP_PRE_EMPHASIS_6;
3295 break;
3296 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3297 signal_levels |= DP_PRE_EMPHASIS_9_5;
3298 break;
3299 }
3300 return signal_levels;
3301 }
3302
3303 /* Gen6's DP voltage swing and pre-emphasis control */
3304 static uint32_t
3305 intel_gen6_edp_signal_levels(uint8_t train_set)
3306 {
3307 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3308 DP_TRAIN_PRE_EMPHASIS_MASK);
3309 switch (signal_levels) {
3310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3312 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3314 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3317 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3319 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3320 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3321 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3323 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3324 default:
3325 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3326 "0x%x\n", signal_levels);
3327 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3328 }
3329 }
3330
3331 /* Gen7's DP voltage swing and pre-emphasis control */
3332 static uint32_t
3333 intel_gen7_edp_signal_levels(uint8_t train_set)
3334 {
3335 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3336 DP_TRAIN_PRE_EMPHASIS_MASK);
3337 switch (signal_levels) {
3338 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3339 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3340 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3341 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3342 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3343 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3344
3345 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3346 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3348 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3349
3350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3351 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3352 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3353 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3354
3355 default:
3356 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3357 "0x%x\n", signal_levels);
3358 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3359 }
3360 }
3361
3362 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3363 static uint32_t
3364 intel_hsw_signal_levels(uint8_t train_set)
3365 {
3366 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3367 DP_TRAIN_PRE_EMPHASIS_MASK);
3368 switch (signal_levels) {
3369 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3370 return DDI_BUF_TRANS_SELECT(0);
3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3372 return DDI_BUF_TRANS_SELECT(1);
3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3374 return DDI_BUF_TRANS_SELECT(2);
3375 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3376 return DDI_BUF_TRANS_SELECT(3);
3377
3378 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3379 return DDI_BUF_TRANS_SELECT(4);
3380 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3381 return DDI_BUF_TRANS_SELECT(5);
3382 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3383 return DDI_BUF_TRANS_SELECT(6);
3384
3385 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3386 return DDI_BUF_TRANS_SELECT(7);
3387 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3388 return DDI_BUF_TRANS_SELECT(8);
3389
3390 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3391 return DDI_BUF_TRANS_SELECT(9);
3392 default:
3393 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3394 "0x%x\n", signal_levels);
3395 return DDI_BUF_TRANS_SELECT(0);
3396 }
3397 }
3398
3399 /* Properly updates "DP" with the correct signal levels. */
3400 static void
3401 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3402 {
3403 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3404 enum port port = intel_dig_port->port;
3405 struct drm_device *dev = intel_dig_port->base.base.dev;
3406 uint32_t signal_levels, mask;
3407 uint8_t train_set = intel_dp->train_set[0];
3408
3409 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3410 signal_levels = intel_hsw_signal_levels(train_set);
3411 mask = DDI_BUF_EMP_MASK;
3412 } else if (IS_CHERRYVIEW(dev)) {
3413 signal_levels = intel_chv_signal_levels(intel_dp);
3414 mask = 0;
3415 } else if (IS_VALLEYVIEW(dev)) {
3416 signal_levels = intel_vlv_signal_levels(intel_dp);
3417 mask = 0;
3418 } else if (IS_GEN7(dev) && port == PORT_A) {
3419 signal_levels = intel_gen7_edp_signal_levels(train_set);
3420 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3421 } else if (IS_GEN6(dev) && port == PORT_A) {
3422 signal_levels = intel_gen6_edp_signal_levels(train_set);
3423 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3424 } else {
3425 signal_levels = intel_gen4_signal_levels(train_set);
3426 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3427 }
3428
3429 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3430
3431 *DP = (*DP & ~mask) | signal_levels;
3432 }
3433
3434 static bool
3435 intel_dp_set_link_train(struct intel_dp *intel_dp,
3436 uint32_t *DP,
3437 uint8_t dp_train_pat)
3438 {
3439 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3440 struct drm_device *dev = intel_dig_port->base.base.dev;
3441 struct drm_i915_private *dev_priv = dev->dev_private;
3442 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3443 int ret, len;
3444
3445 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3446
3447 I915_WRITE(intel_dp->output_reg, *DP);
3448 POSTING_READ(intel_dp->output_reg);
3449
3450 buf[0] = dp_train_pat;
3451 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3452 DP_TRAINING_PATTERN_DISABLE) {
3453 /* don't write DP_TRAINING_LANEx_SET on disable */
3454 len = 1;
3455 } else {
3456 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3457 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3458 len = intel_dp->lane_count + 1;
3459 }
3460
3461 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3462 buf, len);
3463
3464 return ret == len;
3465 }
3466
3467 static bool
3468 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3469 uint8_t dp_train_pat)
3470 {
3471 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3472 intel_dp_set_signal_levels(intel_dp, DP);
3473 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3474 }
3475
3476 static bool
3477 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3478 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3479 {
3480 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3481 struct drm_device *dev = intel_dig_port->base.base.dev;
3482 struct drm_i915_private *dev_priv = dev->dev_private;
3483 int ret;
3484
3485 intel_get_adjust_train(intel_dp, link_status);
3486 intel_dp_set_signal_levels(intel_dp, DP);
3487
3488 I915_WRITE(intel_dp->output_reg, *DP);
3489 POSTING_READ(intel_dp->output_reg);
3490
3491 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3492 intel_dp->train_set, intel_dp->lane_count);
3493
3494 return ret == intel_dp->lane_count;
3495 }
3496
3497 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3498 {
3499 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3500 struct drm_device *dev = intel_dig_port->base.base.dev;
3501 struct drm_i915_private *dev_priv = dev->dev_private;
3502 enum port port = intel_dig_port->port;
3503 uint32_t val;
3504
3505 if (!HAS_DDI(dev))
3506 return;
3507
3508 val = I915_READ(DP_TP_CTL(port));
3509 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3510 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3511 I915_WRITE(DP_TP_CTL(port), val);
3512
3513 /*
3514 * On PORT_A we can have only eDP in SST mode. There the only reason
3515 * we need to set idle transmission mode is to work around a HW issue
3516 * where we enable the pipe while not in idle link-training mode.
3517 * In this case there is requirement to wait for a minimum number of
3518 * idle patterns to be sent.
3519 */
3520 if (port == PORT_A)
3521 return;
3522
3523 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3524 1))
3525 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3526 }
3527
3528 /* Enable corresponding port and start training pattern 1 */
3529 void
3530 intel_dp_start_link_train(struct intel_dp *intel_dp)
3531 {
3532 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3533 struct drm_device *dev = encoder->dev;
3534 int i;
3535 uint8_t voltage;
3536 int voltage_tries, loop_tries;
3537 uint32_t DP = intel_dp->DP;
3538 uint8_t link_config[2];
3539
3540 if (HAS_DDI(dev))
3541 intel_ddi_prepare_link_retrain(encoder);
3542
3543 /* Write the link configuration data */
3544 link_config[0] = intel_dp->link_bw;
3545 link_config[1] = intel_dp->lane_count;
3546 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3547 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3548 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3549 if (intel_dp->num_sink_rates)
3550 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3551 &intel_dp->rate_select, 1);
3552
3553 link_config[0] = 0;
3554 link_config[1] = DP_SET_ANSI_8B10B;
3555 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3556
3557 DP |= DP_PORT_EN;
3558
3559 /* clock recovery */
3560 if (!intel_dp_reset_link_train(intel_dp, &DP,
3561 DP_TRAINING_PATTERN_1 |
3562 DP_LINK_SCRAMBLING_DISABLE)) {
3563 DRM_ERROR("failed to enable link training\n");
3564 return;
3565 }
3566
3567 voltage = 0xff;
3568 voltage_tries = 0;
3569 loop_tries = 0;
3570 for (;;) {
3571 uint8_t link_status[DP_LINK_STATUS_SIZE];
3572
3573 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3574 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3575 DRM_ERROR("failed to get link status\n");
3576 break;
3577 }
3578
3579 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3580 DRM_DEBUG_KMS("clock recovery OK\n");
3581 break;
3582 }
3583
3584 /* Check to see if we've tried the max voltage */
3585 for (i = 0; i < intel_dp->lane_count; i++)
3586 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3587 break;
3588 if (i == intel_dp->lane_count) {
3589 ++loop_tries;
3590 if (loop_tries == 5) {
3591 DRM_ERROR("too many full retries, give up\n");
3592 break;
3593 }
3594 intel_dp_reset_link_train(intel_dp, &DP,
3595 DP_TRAINING_PATTERN_1 |
3596 DP_LINK_SCRAMBLING_DISABLE);
3597 voltage_tries = 0;
3598 continue;
3599 }
3600
3601 /* Check to see if we've tried the same voltage 5 times */
3602 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3603 ++voltage_tries;
3604 if (voltage_tries == 5) {
3605 DRM_ERROR("too many voltage retries, give up\n");
3606 break;
3607 }
3608 } else
3609 voltage_tries = 0;
3610 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3611
3612 /* Update training set as requested by target */
3613 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3614 DRM_ERROR("failed to update link training\n");
3615 break;
3616 }
3617 }
3618
3619 intel_dp->DP = DP;
3620 }
3621
3622 void
3623 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3624 {
3625 bool channel_eq = false;
3626 int tries, cr_tries;
3627 uint32_t DP = intel_dp->DP;
3628 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3629
3630 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3631 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3632 training_pattern = DP_TRAINING_PATTERN_3;
3633
3634 /* channel equalization */
3635 if (!intel_dp_set_link_train(intel_dp, &DP,
3636 training_pattern |
3637 DP_LINK_SCRAMBLING_DISABLE)) {
3638 DRM_ERROR("failed to start channel equalization\n");
3639 return;
3640 }
3641
3642 tries = 0;
3643 cr_tries = 0;
3644 channel_eq = false;
3645 for (;;) {
3646 uint8_t link_status[DP_LINK_STATUS_SIZE];
3647
3648 if (cr_tries > 5) {
3649 DRM_ERROR("failed to train DP, aborting\n");
3650 break;
3651 }
3652
3653 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3654 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3655 DRM_ERROR("failed to get link status\n");
3656 break;
3657 }
3658
3659 /* Make sure clock is still ok */
3660 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3661 intel_dp_start_link_train(intel_dp);
3662 intel_dp_set_link_train(intel_dp, &DP,
3663 training_pattern |
3664 DP_LINK_SCRAMBLING_DISABLE);
3665 cr_tries++;
3666 continue;
3667 }
3668
3669 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3670 channel_eq = true;
3671 break;
3672 }
3673
3674 /* Try 5 times, then try clock recovery if that fails */
3675 if (tries > 5) {
3676 intel_dp_start_link_train(intel_dp);
3677 intel_dp_set_link_train(intel_dp, &DP,
3678 training_pattern |
3679 DP_LINK_SCRAMBLING_DISABLE);
3680 tries = 0;
3681 cr_tries++;
3682 continue;
3683 }
3684
3685 /* Update training set as requested by target */
3686 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3687 DRM_ERROR("failed to update link training\n");
3688 break;
3689 }
3690 ++tries;
3691 }
3692
3693 intel_dp_set_idle_link_train(intel_dp);
3694
3695 intel_dp->DP = DP;
3696
3697 if (channel_eq)
3698 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3699
3700 }
3701
3702 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3703 {
3704 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3705 DP_TRAINING_PATTERN_DISABLE);
3706 }
3707
3708 static void
3709 intel_dp_link_down(struct intel_dp *intel_dp)
3710 {
3711 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3712 enum port port = intel_dig_port->port;
3713 struct drm_device *dev = intel_dig_port->base.base.dev;
3714 struct drm_i915_private *dev_priv = dev->dev_private;
3715 uint32_t DP = intel_dp->DP;
3716
3717 if (WARN_ON(HAS_DDI(dev)))
3718 return;
3719
3720 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3721 return;
3722
3723 DRM_DEBUG_KMS("\n");
3724
3725 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3726 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3727 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3728 } else {
3729 if (IS_CHERRYVIEW(dev))
3730 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3731 else
3732 DP &= ~DP_LINK_TRAIN_MASK;
3733 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3734 }
3735 POSTING_READ(intel_dp->output_reg);
3736
3737 if (HAS_PCH_IBX(dev) &&
3738 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3739 /* Hardware workaround: leaving our transcoder select
3740 * set to transcoder B while it's off will prevent the
3741 * corresponding HDMI output on transcoder A.
3742 *
3743 * Combine this with another hardware workaround:
3744 * transcoder select bit can only be cleared while the
3745 * port is enabled.
3746 */
3747 DP &= ~DP_PIPEB_SELECT;
3748 I915_WRITE(intel_dp->output_reg, DP);
3749 POSTING_READ(intel_dp->output_reg);
3750 }
3751
3752 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3753 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3754 POSTING_READ(intel_dp->output_reg);
3755 msleep(intel_dp->panel_power_down_delay);
3756 }
3757
3758 static bool
3759 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3760 {
3761 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3762 struct drm_device *dev = dig_port->base.base.dev;
3763 struct drm_i915_private *dev_priv = dev->dev_private;
3764 uint8_t rev;
3765
3766 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3767 sizeof(intel_dp->dpcd)) < 0)
3768 return false; /* aux transfer failed */
3769
3770 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3771
3772 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3773 return false; /* DPCD not present */
3774
3775 /* Check if the panel supports PSR */
3776 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3777 if (is_edp(intel_dp)) {
3778 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3779 intel_dp->psr_dpcd,
3780 sizeof(intel_dp->psr_dpcd));
3781 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3782 dev_priv->psr.sink_support = true;
3783 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3784 }
3785 }
3786
3787 /* Training Pattern 3 support, both source and sink */
3788 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3789 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3790 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3791 intel_dp->use_tps3 = true;
3792 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3793 } else
3794 intel_dp->use_tps3 = false;
3795
3796 /* Intermediate frequency support */
3797 if (is_edp(intel_dp) &&
3798 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3799 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3800 (rev >= 0x03)) { /* eDp v1.4 or higher */
3801 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3802 int i;
3803
3804 intel_dp_dpcd_read_wake(&intel_dp->aux,
3805 DP_SUPPORTED_LINK_RATES,
3806 sink_rates,
3807 sizeof(sink_rates));
3808
3809 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3810 int val = le16_to_cpu(sink_rates[i]);
3811
3812 if (val == 0)
3813 break;
3814
3815 intel_dp->sink_rates[i] = val * 200;
3816 }
3817 intel_dp->num_sink_rates = i;
3818 }
3819
3820 intel_dp_print_rates(intel_dp);
3821
3822 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3823 DP_DWN_STRM_PORT_PRESENT))
3824 return true; /* native DP sink */
3825
3826 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3827 return true; /* no per-port downstream info */
3828
3829 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3830 intel_dp->downstream_ports,
3831 DP_MAX_DOWNSTREAM_PORTS) < 0)
3832 return false; /* downstream port status fetch failed */
3833
3834 return true;
3835 }
3836
3837 static void
3838 intel_dp_probe_oui(struct intel_dp *intel_dp)
3839 {
3840 u8 buf[3];
3841
3842 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3843 return;
3844
3845 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3846 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3847 buf[0], buf[1], buf[2]);
3848
3849 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3850 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3851 buf[0], buf[1], buf[2]);
3852 }
3853
3854 static bool
3855 intel_dp_probe_mst(struct intel_dp *intel_dp)
3856 {
3857 u8 buf[1];
3858
3859 if (!intel_dp->can_mst)
3860 return false;
3861
3862 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3863 return false;
3864
3865 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3866 if (buf[0] & DP_MST_CAP) {
3867 DRM_DEBUG_KMS("Sink is MST capable\n");
3868 intel_dp->is_mst = true;
3869 } else {
3870 DRM_DEBUG_KMS("Sink is not MST capable\n");
3871 intel_dp->is_mst = false;
3872 }
3873 }
3874
3875 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3876 return intel_dp->is_mst;
3877 }
3878
3879 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3880 {
3881 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3882 struct drm_device *dev = intel_dig_port->base.base.dev;
3883 struct intel_crtc *intel_crtc =
3884 to_intel_crtc(intel_dig_port->base.base.crtc);
3885 u8 buf;
3886 int test_crc_count;
3887 int attempts = 6;
3888
3889 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3890 return -EIO;
3891
3892 if (!(buf & DP_TEST_CRC_SUPPORTED))
3893 return -ENOTTY;
3894
3895 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3896 return -EIO;
3897
3898 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3899 buf | DP_TEST_SINK_START) < 0)
3900 return -EIO;
3901
3902 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3903 return -EIO;
3904 test_crc_count = buf & DP_TEST_COUNT_MASK;
3905
3906 do {
3907 if (drm_dp_dpcd_readb(&intel_dp->aux,
3908 DP_TEST_SINK_MISC, &buf) < 0)
3909 return -EIO;
3910 intel_wait_for_vblank(dev, intel_crtc->pipe);
3911 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3912
3913 if (attempts == 0) {
3914 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3915 return -ETIMEDOUT;
3916 }
3917
3918 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3919 return -EIO;
3920
3921 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3922 return -EIO;
3923 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3924 buf & ~DP_TEST_SINK_START) < 0)
3925 return -EIO;
3926
3927 return 0;
3928 }
3929
3930 static bool
3931 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3932 {
3933 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3934 DP_DEVICE_SERVICE_IRQ_VECTOR,
3935 sink_irq_vector, 1) == 1;
3936 }
3937
3938 static bool
3939 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3940 {
3941 int ret;
3942
3943 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3944 DP_SINK_COUNT_ESI,
3945 sink_irq_vector, 14);
3946 if (ret != 14)
3947 return false;
3948
3949 return true;
3950 }
3951
3952 static void
3953 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3954 {
3955 /* NAK by default */
3956 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3957 }
3958
3959 static int
3960 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3961 {
3962 bool bret;
3963
3964 if (intel_dp->is_mst) {
3965 u8 esi[16] = { 0 };
3966 int ret = 0;
3967 int retry;
3968 bool handled;
3969 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3970 go_again:
3971 if (bret == true) {
3972
3973 /* check link status - esi[10] = 0x200c */
3974 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3975 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3976 intel_dp_start_link_train(intel_dp);
3977 intel_dp_complete_link_train(intel_dp);
3978 intel_dp_stop_link_train(intel_dp);
3979 }
3980
3981 DRM_DEBUG_KMS("got esi %3ph\n", esi);
3982 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3983
3984 if (handled) {
3985 for (retry = 0; retry < 3; retry++) {
3986 int wret;
3987 wret = drm_dp_dpcd_write(&intel_dp->aux,
3988 DP_SINK_COUNT_ESI+1,
3989 &esi[1], 3);
3990 if (wret == 3) {
3991 break;
3992 }
3993 }
3994
3995 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3996 if (bret == true) {
3997 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
3998 goto go_again;
3999 }
4000 } else
4001 ret = 0;
4002
4003 return ret;
4004 } else {
4005 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4006 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4007 intel_dp->is_mst = false;
4008 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4009 /* send a hotplug event */
4010 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4011 }
4012 }
4013 return -EINVAL;
4014 }
4015
4016 /*
4017 * According to DP spec
4018 * 5.1.2:
4019 * 1. Read DPCD
4020 * 2. Configure link according to Receiver Capabilities
4021 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4022 * 4. Check link status on receipt of hot-plug interrupt
4023 */
4024 static void
4025 intel_dp_check_link_status(struct intel_dp *intel_dp)
4026 {
4027 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4028 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4029 u8 sink_irq_vector;
4030 u8 link_status[DP_LINK_STATUS_SIZE];
4031
4032 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4033
4034 if (!intel_encoder->connectors_active)
4035 return;
4036
4037 if (WARN_ON(!intel_encoder->base.crtc))
4038 return;
4039
4040 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4041 return;
4042
4043 /* Try to read receiver status if the link appears to be up */
4044 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4045 return;
4046 }
4047
4048 /* Now read the DPCD to see if it's actually running */
4049 if (!intel_dp_get_dpcd(intel_dp)) {
4050 return;
4051 }
4052
4053 /* Try to read the source of the interrupt */
4054 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4055 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4056 /* Clear interrupt source */
4057 drm_dp_dpcd_writeb(&intel_dp->aux,
4058 DP_DEVICE_SERVICE_IRQ_VECTOR,
4059 sink_irq_vector);
4060
4061 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4062 intel_dp_handle_test_request(intel_dp);
4063 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4064 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4065 }
4066
4067 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4068 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4069 intel_encoder->base.name);
4070 intel_dp_start_link_train(intel_dp);
4071 intel_dp_complete_link_train(intel_dp);
4072 intel_dp_stop_link_train(intel_dp);
4073 }
4074 }
4075
4076 /* XXX this is probably wrong for multiple downstream ports */
4077 static enum drm_connector_status
4078 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4079 {
4080 uint8_t *dpcd = intel_dp->dpcd;
4081 uint8_t type;
4082
4083 if (!intel_dp_get_dpcd(intel_dp))
4084 return connector_status_disconnected;
4085
4086 /* if there's no downstream port, we're done */
4087 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4088 return connector_status_connected;
4089
4090 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4091 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4092 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4093 uint8_t reg;
4094
4095 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4096 &reg, 1) < 0)
4097 return connector_status_unknown;
4098
4099 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4100 : connector_status_disconnected;
4101 }
4102
4103 /* If no HPD, poke DDC gently */
4104 if (drm_probe_ddc(&intel_dp->aux.ddc))
4105 return connector_status_connected;
4106
4107 /* Well we tried, say unknown for unreliable port types */
4108 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4109 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4110 if (type == DP_DS_PORT_TYPE_VGA ||
4111 type == DP_DS_PORT_TYPE_NON_EDID)
4112 return connector_status_unknown;
4113 } else {
4114 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4115 DP_DWN_STRM_PORT_TYPE_MASK;
4116 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4117 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4118 return connector_status_unknown;
4119 }
4120
4121 /* Anything else is out of spec, warn and ignore */
4122 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4123 return connector_status_disconnected;
4124 }
4125
4126 static enum drm_connector_status
4127 edp_detect(struct intel_dp *intel_dp)
4128 {
4129 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4130 enum drm_connector_status status;
4131
4132 status = intel_panel_detect(dev);
4133 if (status == connector_status_unknown)
4134 status = connector_status_connected;
4135
4136 return status;
4137 }
4138
4139 static enum drm_connector_status
4140 ironlake_dp_detect(struct intel_dp *intel_dp)
4141 {
4142 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4143 struct drm_i915_private *dev_priv = dev->dev_private;
4144 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4145
4146 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4147 return connector_status_disconnected;
4148
4149 return intel_dp_detect_dpcd(intel_dp);
4150 }
4151
4152 static int g4x_digital_port_connected(struct drm_device *dev,
4153 struct intel_digital_port *intel_dig_port)
4154 {
4155 struct drm_i915_private *dev_priv = dev->dev_private;
4156 uint32_t bit;
4157
4158 if (IS_VALLEYVIEW(dev)) {
4159 switch (intel_dig_port->port) {
4160 case PORT_B:
4161 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4162 break;
4163 case PORT_C:
4164 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4165 break;
4166 case PORT_D:
4167 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4168 break;
4169 default:
4170 return -EINVAL;
4171 }
4172 } else {
4173 switch (intel_dig_port->port) {
4174 case PORT_B:
4175 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4176 break;
4177 case PORT_C:
4178 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4179 break;
4180 case PORT_D:
4181 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4182 break;
4183 default:
4184 return -EINVAL;
4185 }
4186 }
4187
4188 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4189 return 0;
4190 return 1;
4191 }
4192
4193 static enum drm_connector_status
4194 g4x_dp_detect(struct intel_dp *intel_dp)
4195 {
4196 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4197 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4198 int ret;
4199
4200 /* Can't disconnect eDP, but you can close the lid... */
4201 if (is_edp(intel_dp)) {
4202 enum drm_connector_status status;
4203
4204 status = intel_panel_detect(dev);
4205 if (status == connector_status_unknown)
4206 status = connector_status_connected;
4207 return status;
4208 }
4209
4210 ret = g4x_digital_port_connected(dev, intel_dig_port);
4211 if (ret == -EINVAL)
4212 return connector_status_unknown;
4213 else if (ret == 0)
4214 return connector_status_disconnected;
4215
4216 return intel_dp_detect_dpcd(intel_dp);
4217 }
4218
4219 static struct edid *
4220 intel_dp_get_edid(struct intel_dp *intel_dp)
4221 {
4222 struct intel_connector *intel_connector = intel_dp->attached_connector;
4223
4224 /* use cached edid if we have one */
4225 if (intel_connector->edid) {
4226 /* invalid edid */
4227 if (IS_ERR(intel_connector->edid))
4228 return NULL;
4229
4230 return drm_edid_duplicate(intel_connector->edid);
4231 } else
4232 return drm_get_edid(&intel_connector->base,
4233 &intel_dp->aux.ddc);
4234 }
4235
4236 static void
4237 intel_dp_set_edid(struct intel_dp *intel_dp)
4238 {
4239 struct intel_connector *intel_connector = intel_dp->attached_connector;
4240 struct edid *edid;
4241
4242 edid = intel_dp_get_edid(intel_dp);
4243 intel_connector->detect_edid = edid;
4244
4245 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4246 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4247 else
4248 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4249 }
4250
4251 static void
4252 intel_dp_unset_edid(struct intel_dp *intel_dp)
4253 {
4254 struct intel_connector *intel_connector = intel_dp->attached_connector;
4255
4256 kfree(intel_connector->detect_edid);
4257 intel_connector->detect_edid = NULL;
4258
4259 intel_dp->has_audio = false;
4260 }
4261
4262 static enum intel_display_power_domain
4263 intel_dp_power_get(struct intel_dp *dp)
4264 {
4265 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4266 enum intel_display_power_domain power_domain;
4267
4268 power_domain = intel_display_port_power_domain(encoder);
4269 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4270
4271 return power_domain;
4272 }
4273
4274 static void
4275 intel_dp_power_put(struct intel_dp *dp,
4276 enum intel_display_power_domain power_domain)
4277 {
4278 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4279 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4280 }
4281
4282 static enum drm_connector_status
4283 intel_dp_detect(struct drm_connector *connector, bool force)
4284 {
4285 struct intel_dp *intel_dp = intel_attached_dp(connector);
4286 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4287 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4288 struct drm_device *dev = connector->dev;
4289 enum drm_connector_status status;
4290 enum intel_display_power_domain power_domain;
4291 bool ret;
4292
4293 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4294 connector->base.id, connector->name);
4295 intel_dp_unset_edid(intel_dp);
4296
4297 if (intel_dp->is_mst) {
4298 /* MST devices are disconnected from a monitor POV */
4299 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4300 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4301 return connector_status_disconnected;
4302 }
4303
4304 power_domain = intel_dp_power_get(intel_dp);
4305
4306 /* Can't disconnect eDP, but you can close the lid... */
4307 if (is_edp(intel_dp))
4308 status = edp_detect(intel_dp);
4309 else if (HAS_PCH_SPLIT(dev))
4310 status = ironlake_dp_detect(intel_dp);
4311 else
4312 status = g4x_dp_detect(intel_dp);
4313 if (status != connector_status_connected)
4314 goto out;
4315
4316 intel_dp_probe_oui(intel_dp);
4317
4318 ret = intel_dp_probe_mst(intel_dp);
4319 if (ret) {
4320 /* if we are in MST mode then this connector
4321 won't appear connected or have anything with EDID on it */
4322 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4323 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4324 status = connector_status_disconnected;
4325 goto out;
4326 }
4327
4328 intel_dp_set_edid(intel_dp);
4329
4330 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4331 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4332 status = connector_status_connected;
4333
4334 out:
4335 intel_dp_power_put(intel_dp, power_domain);
4336 return status;
4337 }
4338
4339 static void
4340 intel_dp_force(struct drm_connector *connector)
4341 {
4342 struct intel_dp *intel_dp = intel_attached_dp(connector);
4343 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4344 enum intel_display_power_domain power_domain;
4345
4346 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4347 connector->base.id, connector->name);
4348 intel_dp_unset_edid(intel_dp);
4349
4350 if (connector->status != connector_status_connected)
4351 return;
4352
4353 power_domain = intel_dp_power_get(intel_dp);
4354
4355 intel_dp_set_edid(intel_dp);
4356
4357 intel_dp_power_put(intel_dp, power_domain);
4358
4359 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4360 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4361 }
4362
4363 static int intel_dp_get_modes(struct drm_connector *connector)
4364 {
4365 struct intel_connector *intel_connector = to_intel_connector(connector);
4366 struct edid *edid;
4367
4368 edid = intel_connector->detect_edid;
4369 if (edid) {
4370 int ret = intel_connector_update_modes(connector, edid);
4371 if (ret)
4372 return ret;
4373 }
4374
4375 /* if eDP has no EDID, fall back to fixed mode */
4376 if (is_edp(intel_attached_dp(connector)) &&
4377 intel_connector->panel.fixed_mode) {
4378 struct drm_display_mode *mode;
4379
4380 mode = drm_mode_duplicate(connector->dev,
4381 intel_connector->panel.fixed_mode);
4382 if (mode) {
4383 drm_mode_probed_add(connector, mode);
4384 return 1;
4385 }
4386 }
4387
4388 return 0;
4389 }
4390
4391 static bool
4392 intel_dp_detect_audio(struct drm_connector *connector)
4393 {
4394 bool has_audio = false;
4395 struct edid *edid;
4396
4397 edid = to_intel_connector(connector)->detect_edid;
4398 if (edid)
4399 has_audio = drm_detect_monitor_audio(edid);
4400
4401 return has_audio;
4402 }
4403
4404 static int
4405 intel_dp_set_property(struct drm_connector *connector,
4406 struct drm_property *property,
4407 uint64_t val)
4408 {
4409 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4410 struct intel_connector *intel_connector = to_intel_connector(connector);
4411 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4412 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4413 int ret;
4414
4415 ret = drm_object_property_set_value(&connector->base, property, val);
4416 if (ret)
4417 return ret;
4418
4419 if (property == dev_priv->force_audio_property) {
4420 int i = val;
4421 bool has_audio;
4422
4423 if (i == intel_dp->force_audio)
4424 return 0;
4425
4426 intel_dp->force_audio = i;
4427
4428 if (i == HDMI_AUDIO_AUTO)
4429 has_audio = intel_dp_detect_audio(connector);
4430 else
4431 has_audio = (i == HDMI_AUDIO_ON);
4432
4433 if (has_audio == intel_dp->has_audio)
4434 return 0;
4435
4436 intel_dp->has_audio = has_audio;
4437 goto done;
4438 }
4439
4440 if (property == dev_priv->broadcast_rgb_property) {
4441 bool old_auto = intel_dp->color_range_auto;
4442 uint32_t old_range = intel_dp->color_range;
4443
4444 switch (val) {
4445 case INTEL_BROADCAST_RGB_AUTO:
4446 intel_dp->color_range_auto = true;
4447 break;
4448 case INTEL_BROADCAST_RGB_FULL:
4449 intel_dp->color_range_auto = false;
4450 intel_dp->color_range = 0;
4451 break;
4452 case INTEL_BROADCAST_RGB_LIMITED:
4453 intel_dp->color_range_auto = false;
4454 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4455 break;
4456 default:
4457 return -EINVAL;
4458 }
4459
4460 if (old_auto == intel_dp->color_range_auto &&
4461 old_range == intel_dp->color_range)
4462 return 0;
4463
4464 goto done;
4465 }
4466
4467 if (is_edp(intel_dp) &&
4468 property == connector->dev->mode_config.scaling_mode_property) {
4469 if (val == DRM_MODE_SCALE_NONE) {
4470 DRM_DEBUG_KMS("no scaling not supported\n");
4471 return -EINVAL;
4472 }
4473
4474 if (intel_connector->panel.fitting_mode == val) {
4475 /* the eDP scaling property is not changed */
4476 return 0;
4477 }
4478 intel_connector->panel.fitting_mode = val;
4479
4480 goto done;
4481 }
4482
4483 return -EINVAL;
4484
4485 done:
4486 if (intel_encoder->base.crtc)
4487 intel_crtc_restore_mode(intel_encoder->base.crtc);
4488
4489 return 0;
4490 }
4491
4492 static void
4493 intel_dp_connector_destroy(struct drm_connector *connector)
4494 {
4495 struct intel_connector *intel_connector = to_intel_connector(connector);
4496
4497 kfree(intel_connector->detect_edid);
4498
4499 if (!IS_ERR_OR_NULL(intel_connector->edid))
4500 kfree(intel_connector->edid);
4501
4502 /* Can't call is_edp() since the encoder may have been destroyed
4503 * already. */
4504 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4505 intel_panel_fini(&intel_connector->panel);
4506
4507 drm_connector_cleanup(connector);
4508 kfree(connector);
4509 }
4510
4511 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4512 {
4513 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4514 struct intel_dp *intel_dp = &intel_dig_port->dp;
4515
4516 drm_dp_aux_unregister(&intel_dp->aux);
4517 intel_dp_mst_encoder_cleanup(intel_dig_port);
4518 if (is_edp(intel_dp)) {
4519 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4520 /*
4521 * vdd might still be enabled do to the delayed vdd off.
4522 * Make sure vdd is actually turned off here.
4523 */
4524 pps_lock(intel_dp);
4525 edp_panel_vdd_off_sync(intel_dp);
4526 pps_unlock(intel_dp);
4527
4528 if (intel_dp->edp_notifier.notifier_call) {
4529 unregister_reboot_notifier(&intel_dp->edp_notifier);
4530 intel_dp->edp_notifier.notifier_call = NULL;
4531 }
4532 }
4533 drm_encoder_cleanup(encoder);
4534 kfree(intel_dig_port);
4535 }
4536
4537 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4538 {
4539 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4540
4541 if (!is_edp(intel_dp))
4542 return;
4543
4544 /*
4545 * vdd might still be enabled do to the delayed vdd off.
4546 * Make sure vdd is actually turned off here.
4547 */
4548 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4549 pps_lock(intel_dp);
4550 edp_panel_vdd_off_sync(intel_dp);
4551 pps_unlock(intel_dp);
4552 }
4553
4554 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4555 {
4556 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4557 struct drm_device *dev = intel_dig_port->base.base.dev;
4558 struct drm_i915_private *dev_priv = dev->dev_private;
4559 enum intel_display_power_domain power_domain;
4560
4561 lockdep_assert_held(&dev_priv->pps_mutex);
4562
4563 if (!edp_have_panel_vdd(intel_dp))
4564 return;
4565
4566 /*
4567 * The VDD bit needs a power domain reference, so if the bit is
4568 * already enabled when we boot or resume, grab this reference and
4569 * schedule a vdd off, so we don't hold on to the reference
4570 * indefinitely.
4571 */
4572 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4573 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4574 intel_display_power_get(dev_priv, power_domain);
4575
4576 edp_panel_vdd_schedule_off(intel_dp);
4577 }
4578
4579 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4580 {
4581 struct intel_dp *intel_dp;
4582
4583 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4584 return;
4585
4586 intel_dp = enc_to_intel_dp(encoder);
4587
4588 pps_lock(intel_dp);
4589
4590 /*
4591 * Read out the current power sequencer assignment,
4592 * in case the BIOS did something with it.
4593 */
4594 if (IS_VALLEYVIEW(encoder->dev))
4595 vlv_initial_power_sequencer_setup(intel_dp);
4596
4597 intel_edp_panel_vdd_sanitize(intel_dp);
4598
4599 pps_unlock(intel_dp);
4600 }
4601
4602 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4603 .dpms = intel_connector_dpms,
4604 .detect = intel_dp_detect,
4605 .force = intel_dp_force,
4606 .fill_modes = drm_helper_probe_single_connector_modes,
4607 .set_property = intel_dp_set_property,
4608 .atomic_get_property = intel_connector_atomic_get_property,
4609 .destroy = intel_dp_connector_destroy,
4610 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4611 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4612 };
4613
4614 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4615 .get_modes = intel_dp_get_modes,
4616 .mode_valid = intel_dp_mode_valid,
4617 .best_encoder = intel_best_encoder,
4618 };
4619
4620 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4621 .reset = intel_dp_encoder_reset,
4622 .destroy = intel_dp_encoder_destroy,
4623 };
4624
4625 void
4626 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4627 {
4628 return;
4629 }
4630
4631 enum irqreturn
4632 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4633 {
4634 struct intel_dp *intel_dp = &intel_dig_port->dp;
4635 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4636 struct drm_device *dev = intel_dig_port->base.base.dev;
4637 struct drm_i915_private *dev_priv = dev->dev_private;
4638 enum intel_display_power_domain power_domain;
4639 enum irqreturn ret = IRQ_NONE;
4640
4641 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4642 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4643
4644 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4645 /*
4646 * vdd off can generate a long pulse on eDP which
4647 * would require vdd on to handle it, and thus we
4648 * would end up in an endless cycle of
4649 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4650 */
4651 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4652 port_name(intel_dig_port->port));
4653 return IRQ_HANDLED;
4654 }
4655
4656 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4657 port_name(intel_dig_port->port),
4658 long_hpd ? "long" : "short");
4659
4660 power_domain = intel_display_port_power_domain(intel_encoder);
4661 intel_display_power_get(dev_priv, power_domain);
4662
4663 if (long_hpd) {
4664
4665 if (HAS_PCH_SPLIT(dev)) {
4666 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4667 goto mst_fail;
4668 } else {
4669 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4670 goto mst_fail;
4671 }
4672
4673 if (!intel_dp_get_dpcd(intel_dp)) {
4674 goto mst_fail;
4675 }
4676
4677 intel_dp_probe_oui(intel_dp);
4678
4679 if (!intel_dp_probe_mst(intel_dp))
4680 goto mst_fail;
4681
4682 } else {
4683 if (intel_dp->is_mst) {
4684 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4685 goto mst_fail;
4686 }
4687
4688 if (!intel_dp->is_mst) {
4689 /*
4690 * we'll check the link status via the normal hot plug path later -
4691 * but for short hpds we should check it now
4692 */
4693 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4694 intel_dp_check_link_status(intel_dp);
4695 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4696 }
4697 }
4698
4699 ret = IRQ_HANDLED;
4700
4701 goto put_power;
4702 mst_fail:
4703 /* if we were in MST mode, and device is not there get out of MST mode */
4704 if (intel_dp->is_mst) {
4705 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4706 intel_dp->is_mst = false;
4707 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4708 }
4709 put_power:
4710 intel_display_power_put(dev_priv, power_domain);
4711
4712 return ret;
4713 }
4714
4715 /* Return which DP Port should be selected for Transcoder DP control */
4716 int
4717 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4718 {
4719 struct drm_device *dev = crtc->dev;
4720 struct intel_encoder *intel_encoder;
4721 struct intel_dp *intel_dp;
4722
4723 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4724 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4725
4726 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4727 intel_encoder->type == INTEL_OUTPUT_EDP)
4728 return intel_dp->output_reg;
4729 }
4730
4731 return -1;
4732 }
4733
4734 /* check the VBT to see whether the eDP is on DP-D port */
4735 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4736 {
4737 struct drm_i915_private *dev_priv = dev->dev_private;
4738 union child_device_config *p_child;
4739 int i;
4740 static const short port_mapping[] = {
4741 [PORT_B] = PORT_IDPB,
4742 [PORT_C] = PORT_IDPC,
4743 [PORT_D] = PORT_IDPD,
4744 };
4745
4746 if (port == PORT_A)
4747 return true;
4748
4749 if (!dev_priv->vbt.child_dev_num)
4750 return false;
4751
4752 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4753 p_child = dev_priv->vbt.child_dev + i;
4754
4755 if (p_child->common.dvo_port == port_mapping[port] &&
4756 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4757 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4758 return true;
4759 }
4760 return false;
4761 }
4762
4763 void
4764 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4765 {
4766 struct intel_connector *intel_connector = to_intel_connector(connector);
4767
4768 intel_attach_force_audio_property(connector);
4769 intel_attach_broadcast_rgb_property(connector);
4770 intel_dp->color_range_auto = true;
4771
4772 if (is_edp(intel_dp)) {
4773 drm_mode_create_scaling_mode_property(connector->dev);
4774 drm_object_attach_property(
4775 &connector->base,
4776 connector->dev->mode_config.scaling_mode_property,
4777 DRM_MODE_SCALE_ASPECT);
4778 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4779 }
4780 }
4781
4782 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4783 {
4784 intel_dp->last_power_cycle = jiffies;
4785 intel_dp->last_power_on = jiffies;
4786 intel_dp->last_backlight_off = jiffies;
4787 }
4788
4789 static void
4790 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4791 struct intel_dp *intel_dp)
4792 {
4793 struct drm_i915_private *dev_priv = dev->dev_private;
4794 struct edp_power_seq cur, vbt, spec,
4795 *final = &intel_dp->pps_delays;
4796 u32 pp_on, pp_off, pp_div, pp;
4797 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4798
4799 lockdep_assert_held(&dev_priv->pps_mutex);
4800
4801 /* already initialized? */
4802 if (final->t11_t12 != 0)
4803 return;
4804
4805 if (HAS_PCH_SPLIT(dev)) {
4806 pp_ctrl_reg = PCH_PP_CONTROL;
4807 pp_on_reg = PCH_PP_ON_DELAYS;
4808 pp_off_reg = PCH_PP_OFF_DELAYS;
4809 pp_div_reg = PCH_PP_DIVISOR;
4810 } else {
4811 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4812
4813 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4814 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4815 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4816 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4817 }
4818
4819 /* Workaround: Need to write PP_CONTROL with the unlock key as
4820 * the very first thing. */
4821 pp = ironlake_get_pp_control(intel_dp);
4822 I915_WRITE(pp_ctrl_reg, pp);
4823
4824 pp_on = I915_READ(pp_on_reg);
4825 pp_off = I915_READ(pp_off_reg);
4826 pp_div = I915_READ(pp_div_reg);
4827
4828 /* Pull timing values out of registers */
4829 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4830 PANEL_POWER_UP_DELAY_SHIFT;
4831
4832 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4833 PANEL_LIGHT_ON_DELAY_SHIFT;
4834
4835 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4836 PANEL_LIGHT_OFF_DELAY_SHIFT;
4837
4838 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4839 PANEL_POWER_DOWN_DELAY_SHIFT;
4840
4841 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4842 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4843
4844 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4845 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4846
4847 vbt = dev_priv->vbt.edp_pps;
4848
4849 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4850 * our hw here, which are all in 100usec. */
4851 spec.t1_t3 = 210 * 10;
4852 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4853 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4854 spec.t10 = 500 * 10;
4855 /* This one is special and actually in units of 100ms, but zero
4856 * based in the hw (so we need to add 100 ms). But the sw vbt
4857 * table multiplies it with 1000 to make it in units of 100usec,
4858 * too. */
4859 spec.t11_t12 = (510 + 100) * 10;
4860
4861 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4862 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4863
4864 /* Use the max of the register settings and vbt. If both are
4865 * unset, fall back to the spec limits. */
4866 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
4867 spec.field : \
4868 max(cur.field, vbt.field))
4869 assign_final(t1_t3);
4870 assign_final(t8);
4871 assign_final(t9);
4872 assign_final(t10);
4873 assign_final(t11_t12);
4874 #undef assign_final
4875
4876 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
4877 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4878 intel_dp->backlight_on_delay = get_delay(t8);
4879 intel_dp->backlight_off_delay = get_delay(t9);
4880 intel_dp->panel_power_down_delay = get_delay(t10);
4881 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4882 #undef get_delay
4883
4884 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4885 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4886 intel_dp->panel_power_cycle_delay);
4887
4888 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4889 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4890 }
4891
4892 static void
4893 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4894 struct intel_dp *intel_dp)
4895 {
4896 struct drm_i915_private *dev_priv = dev->dev_private;
4897 u32 pp_on, pp_off, pp_div, port_sel = 0;
4898 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4899 int pp_on_reg, pp_off_reg, pp_div_reg;
4900 enum port port = dp_to_dig_port(intel_dp)->port;
4901 const struct edp_power_seq *seq = &intel_dp->pps_delays;
4902
4903 lockdep_assert_held(&dev_priv->pps_mutex);
4904
4905 if (HAS_PCH_SPLIT(dev)) {
4906 pp_on_reg = PCH_PP_ON_DELAYS;
4907 pp_off_reg = PCH_PP_OFF_DELAYS;
4908 pp_div_reg = PCH_PP_DIVISOR;
4909 } else {
4910 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4911
4912 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4913 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4914 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4915 }
4916
4917 /*
4918 * And finally store the new values in the power sequencer. The
4919 * backlight delays are set to 1 because we do manual waits on them. For
4920 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4921 * we'll end up waiting for the backlight off delay twice: once when we
4922 * do the manual sleep, and once when we disable the panel and wait for
4923 * the PP_STATUS bit to become zero.
4924 */
4925 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4926 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4927 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4928 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4929 /* Compute the divisor for the pp clock, simply match the Bspec
4930 * formula. */
4931 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4932 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4933 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4934
4935 /* Haswell doesn't have any port selection bits for the panel
4936 * power sequencer any more. */
4937 if (IS_VALLEYVIEW(dev)) {
4938 port_sel = PANEL_PORT_SELECT_VLV(port);
4939 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4940 if (port == PORT_A)
4941 port_sel = PANEL_PORT_SELECT_DPA;
4942 else
4943 port_sel = PANEL_PORT_SELECT_DPD;
4944 }
4945
4946 pp_on |= port_sel;
4947
4948 I915_WRITE(pp_on_reg, pp_on);
4949 I915_WRITE(pp_off_reg, pp_off);
4950 I915_WRITE(pp_div_reg, pp_div);
4951
4952 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4953 I915_READ(pp_on_reg),
4954 I915_READ(pp_off_reg),
4955 I915_READ(pp_div_reg));
4956 }
4957
4958 /**
4959 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4960 * @dev: DRM device
4961 * @refresh_rate: RR to be programmed
4962 *
4963 * This function gets called when refresh rate (RR) has to be changed from
4964 * one frequency to another. Switches can be between high and low RR
4965 * supported by the panel or to any other RR based on media playback (in
4966 * this case, RR value needs to be passed from user space).
4967 *
4968 * The caller of this function needs to take a lock on dev_priv->drrs.
4969 */
4970 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4971 {
4972 struct drm_i915_private *dev_priv = dev->dev_private;
4973 struct intel_encoder *encoder;
4974 struct intel_digital_port *dig_port = NULL;
4975 struct intel_dp *intel_dp = dev_priv->drrs.dp;
4976 struct intel_crtc_state *config = NULL;
4977 struct intel_crtc *intel_crtc = NULL;
4978 u32 reg, val;
4979 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4980
4981 if (refresh_rate <= 0) {
4982 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4983 return;
4984 }
4985
4986 if (intel_dp == NULL) {
4987 DRM_DEBUG_KMS("DRRS not supported.\n");
4988 return;
4989 }
4990
4991 /*
4992 * FIXME: This needs proper synchronization with psr state for some
4993 * platforms that cannot have PSR and DRRS enabled at the same time.
4994 */
4995
4996 dig_port = dp_to_dig_port(intel_dp);
4997 encoder = &dig_port->base;
4998 intel_crtc = to_intel_crtc(encoder->base.crtc);
4999
5000 if (!intel_crtc) {
5001 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5002 return;
5003 }
5004
5005 config = intel_crtc->config;
5006
5007 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5008 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5009 return;
5010 }
5011
5012 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5013 refresh_rate)
5014 index = DRRS_LOW_RR;
5015
5016 if (index == dev_priv->drrs.refresh_rate_type) {
5017 DRM_DEBUG_KMS(
5018 "DRRS requested for previously set RR...ignoring\n");
5019 return;
5020 }
5021
5022 if (!intel_crtc->active) {
5023 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5024 return;
5025 }
5026
5027 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5028 switch (index) {
5029 case DRRS_HIGH_RR:
5030 intel_dp_set_m_n(intel_crtc, M1_N1);
5031 break;
5032 case DRRS_LOW_RR:
5033 intel_dp_set_m_n(intel_crtc, M2_N2);
5034 break;
5035 case DRRS_MAX_RR:
5036 default:
5037 DRM_ERROR("Unsupported refreshrate type\n");
5038 }
5039 } else if (INTEL_INFO(dev)->gen > 6) {
5040 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5041 val = I915_READ(reg);
5042
5043 if (index > DRRS_HIGH_RR) {
5044 if (IS_VALLEYVIEW(dev))
5045 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5046 else
5047 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5048 } else {
5049 if (IS_VALLEYVIEW(dev))
5050 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5051 else
5052 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5053 }
5054 I915_WRITE(reg, val);
5055 }
5056
5057 dev_priv->drrs.refresh_rate_type = index;
5058
5059 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5060 }
5061
5062 /**
5063 * intel_edp_drrs_enable - init drrs struct if supported
5064 * @intel_dp: DP struct
5065 *
5066 * Initializes frontbuffer_bits and drrs.dp
5067 */
5068 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5069 {
5070 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5071 struct drm_i915_private *dev_priv = dev->dev_private;
5072 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5073 struct drm_crtc *crtc = dig_port->base.base.crtc;
5074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5075
5076 if (!intel_crtc->config->has_drrs) {
5077 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5078 return;
5079 }
5080
5081 mutex_lock(&dev_priv->drrs.mutex);
5082 if (WARN_ON(dev_priv->drrs.dp)) {
5083 DRM_ERROR("DRRS already enabled\n");
5084 goto unlock;
5085 }
5086
5087 dev_priv->drrs.busy_frontbuffer_bits = 0;
5088
5089 dev_priv->drrs.dp = intel_dp;
5090
5091 unlock:
5092 mutex_unlock(&dev_priv->drrs.mutex);
5093 }
5094
5095 /**
5096 * intel_edp_drrs_disable - Disable DRRS
5097 * @intel_dp: DP struct
5098 *
5099 */
5100 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5101 {
5102 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5103 struct drm_i915_private *dev_priv = dev->dev_private;
5104 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5105 struct drm_crtc *crtc = dig_port->base.base.crtc;
5106 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5107
5108 if (!intel_crtc->config->has_drrs)
5109 return;
5110
5111 mutex_lock(&dev_priv->drrs.mutex);
5112 if (!dev_priv->drrs.dp) {
5113 mutex_unlock(&dev_priv->drrs.mutex);
5114 return;
5115 }
5116
5117 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5118 intel_dp_set_drrs_state(dev_priv->dev,
5119 intel_dp->attached_connector->panel.
5120 fixed_mode->vrefresh);
5121
5122 dev_priv->drrs.dp = NULL;
5123 mutex_unlock(&dev_priv->drrs.mutex);
5124
5125 cancel_delayed_work_sync(&dev_priv->drrs.work);
5126 }
5127
5128 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5129 {
5130 struct drm_i915_private *dev_priv =
5131 container_of(work, typeof(*dev_priv), drrs.work.work);
5132 struct intel_dp *intel_dp;
5133
5134 mutex_lock(&dev_priv->drrs.mutex);
5135
5136 intel_dp = dev_priv->drrs.dp;
5137
5138 if (!intel_dp)
5139 goto unlock;
5140
5141 /*
5142 * The delayed work can race with an invalidate hence we need to
5143 * recheck.
5144 */
5145
5146 if (dev_priv->drrs.busy_frontbuffer_bits)
5147 goto unlock;
5148
5149 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5150 intel_dp_set_drrs_state(dev_priv->dev,
5151 intel_dp->attached_connector->panel.
5152 downclock_mode->vrefresh);
5153
5154 unlock:
5155 mutex_unlock(&dev_priv->drrs.mutex);
5156 }
5157
5158 /**
5159 * intel_edp_drrs_invalidate - Invalidate DRRS
5160 * @dev: DRM device
5161 * @frontbuffer_bits: frontbuffer plane tracking bits
5162 *
5163 * When there is a disturbance on screen (due to cursor movement/time
5164 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5165 * high RR.
5166 *
5167 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5168 */
5169 void intel_edp_drrs_invalidate(struct drm_device *dev,
5170 unsigned frontbuffer_bits)
5171 {
5172 struct drm_i915_private *dev_priv = dev->dev_private;
5173 struct drm_crtc *crtc;
5174 enum pipe pipe;
5175
5176 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5177 return;
5178
5179 cancel_delayed_work(&dev_priv->drrs.work);
5180
5181 mutex_lock(&dev_priv->drrs.mutex);
5182 if (!dev_priv->drrs.dp) {
5183 mutex_unlock(&dev_priv->drrs.mutex);
5184 return;
5185 }
5186
5187 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5188 pipe = to_intel_crtc(crtc)->pipe;
5189
5190 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5191 intel_dp_set_drrs_state(dev_priv->dev,
5192 dev_priv->drrs.dp->attached_connector->panel.
5193 fixed_mode->vrefresh);
5194 }
5195
5196 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5197
5198 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5199 mutex_unlock(&dev_priv->drrs.mutex);
5200 }
5201
5202 /**
5203 * intel_edp_drrs_flush - Flush DRRS
5204 * @dev: DRM device
5205 * @frontbuffer_bits: frontbuffer plane tracking bits
5206 *
5207 * When there is no movement on screen, DRRS work can be scheduled.
5208 * This DRRS work is responsible for setting relevant registers after a
5209 * timeout of 1 second.
5210 *
5211 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5212 */
5213 void intel_edp_drrs_flush(struct drm_device *dev,
5214 unsigned frontbuffer_bits)
5215 {
5216 struct drm_i915_private *dev_priv = dev->dev_private;
5217 struct drm_crtc *crtc;
5218 enum pipe pipe;
5219
5220 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5221 return;
5222
5223 cancel_delayed_work(&dev_priv->drrs.work);
5224
5225 mutex_lock(&dev_priv->drrs.mutex);
5226 if (!dev_priv->drrs.dp) {
5227 mutex_unlock(&dev_priv->drrs.mutex);
5228 return;
5229 }
5230
5231 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5232 pipe = to_intel_crtc(crtc)->pipe;
5233 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5234
5235 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5236 !dev_priv->drrs.busy_frontbuffer_bits)
5237 schedule_delayed_work(&dev_priv->drrs.work,
5238 msecs_to_jiffies(1000));
5239 mutex_unlock(&dev_priv->drrs.mutex);
5240 }
5241
5242 /**
5243 * DOC: Display Refresh Rate Switching (DRRS)
5244 *
5245 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5246 * which enables swtching between low and high refresh rates,
5247 * dynamically, based on the usage scenario. This feature is applicable
5248 * for internal panels.
5249 *
5250 * Indication that the panel supports DRRS is given by the panel EDID, which
5251 * would list multiple refresh rates for one resolution.
5252 *
5253 * DRRS is of 2 types - static and seamless.
5254 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5255 * (may appear as a blink on screen) and is used in dock-undock scenario.
5256 * Seamless DRRS involves changing RR without any visual effect to the user
5257 * and can be used during normal system usage. This is done by programming
5258 * certain registers.
5259 *
5260 * Support for static/seamless DRRS may be indicated in the VBT based on
5261 * inputs from the panel spec.
5262 *
5263 * DRRS saves power by switching to low RR based on usage scenarios.
5264 *
5265 * eDP DRRS:-
5266 * The implementation is based on frontbuffer tracking implementation.
5267 * When there is a disturbance on the screen triggered by user activity or a
5268 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5269 * When there is no movement on screen, after a timeout of 1 second, a switch
5270 * to low RR is made.
5271 * For integration with frontbuffer tracking code,
5272 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5273 *
5274 * DRRS can be further extended to support other internal panels and also
5275 * the scenario of video playback wherein RR is set based on the rate
5276 * requested by userspace.
5277 */
5278
5279 /**
5280 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5281 * @intel_connector: eDP connector
5282 * @fixed_mode: preferred mode of panel
5283 *
5284 * This function is called only once at driver load to initialize basic
5285 * DRRS stuff.
5286 *
5287 * Returns:
5288 * Downclock mode if panel supports it, else return NULL.
5289 * DRRS support is determined by the presence of downclock mode (apart
5290 * from VBT setting).
5291 */
5292 static struct drm_display_mode *
5293 intel_dp_drrs_init(struct intel_connector *intel_connector,
5294 struct drm_display_mode *fixed_mode)
5295 {
5296 struct drm_connector *connector = &intel_connector->base;
5297 struct drm_device *dev = connector->dev;
5298 struct drm_i915_private *dev_priv = dev->dev_private;
5299 struct drm_display_mode *downclock_mode = NULL;
5300
5301 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5302 mutex_init(&dev_priv->drrs.mutex);
5303
5304 if (INTEL_INFO(dev)->gen <= 6) {
5305 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5306 return NULL;
5307 }
5308
5309 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5310 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5311 return NULL;
5312 }
5313
5314 downclock_mode = intel_find_panel_downclock
5315 (dev, fixed_mode, connector);
5316
5317 if (!downclock_mode) {
5318 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5319 return NULL;
5320 }
5321
5322 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5323
5324 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5325 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5326 return downclock_mode;
5327 }
5328
5329 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5330 struct intel_connector *intel_connector)
5331 {
5332 struct drm_connector *connector = &intel_connector->base;
5333 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5334 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5335 struct drm_device *dev = intel_encoder->base.dev;
5336 struct drm_i915_private *dev_priv = dev->dev_private;
5337 struct drm_display_mode *fixed_mode = NULL;
5338 struct drm_display_mode *downclock_mode = NULL;
5339 bool has_dpcd;
5340 struct drm_display_mode *scan;
5341 struct edid *edid;
5342 enum pipe pipe = INVALID_PIPE;
5343
5344 if (!is_edp(intel_dp))
5345 return true;
5346
5347 pps_lock(intel_dp);
5348 intel_edp_panel_vdd_sanitize(intel_dp);
5349 pps_unlock(intel_dp);
5350
5351 /* Cache DPCD and EDID for edp. */
5352 has_dpcd = intel_dp_get_dpcd(intel_dp);
5353
5354 if (has_dpcd) {
5355 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5356 dev_priv->no_aux_handshake =
5357 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5358 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5359 } else {
5360 /* if this fails, presume the device is a ghost */
5361 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5362 return false;
5363 }
5364
5365 /* We now know it's not a ghost, init power sequence regs. */
5366 pps_lock(intel_dp);
5367 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5368 pps_unlock(intel_dp);
5369
5370 mutex_lock(&dev->mode_config.mutex);
5371 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5372 if (edid) {
5373 if (drm_add_edid_modes(connector, edid)) {
5374 drm_mode_connector_update_edid_property(connector,
5375 edid);
5376 drm_edid_to_eld(connector, edid);
5377 } else {
5378 kfree(edid);
5379 edid = ERR_PTR(-EINVAL);
5380 }
5381 } else {
5382 edid = ERR_PTR(-ENOENT);
5383 }
5384 intel_connector->edid = edid;
5385
5386 /* prefer fixed mode from EDID if available */
5387 list_for_each_entry(scan, &connector->probed_modes, head) {
5388 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5389 fixed_mode = drm_mode_duplicate(dev, scan);
5390 downclock_mode = intel_dp_drrs_init(
5391 intel_connector, fixed_mode);
5392 break;
5393 }
5394 }
5395
5396 /* fallback to VBT if available for eDP */
5397 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5398 fixed_mode = drm_mode_duplicate(dev,
5399 dev_priv->vbt.lfp_lvds_vbt_mode);
5400 if (fixed_mode)
5401 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5402 }
5403 mutex_unlock(&dev->mode_config.mutex);
5404
5405 if (IS_VALLEYVIEW(dev)) {
5406 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5407 register_reboot_notifier(&intel_dp->edp_notifier);
5408
5409 /*
5410 * Figure out the current pipe for the initial backlight setup.
5411 * If the current pipe isn't valid, try the PPS pipe, and if that
5412 * fails just assume pipe A.
5413 */
5414 if (IS_CHERRYVIEW(dev))
5415 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5416 else
5417 pipe = PORT_TO_PIPE(intel_dp->DP);
5418
5419 if (pipe != PIPE_A && pipe != PIPE_B)
5420 pipe = intel_dp->pps_pipe;
5421
5422 if (pipe != PIPE_A && pipe != PIPE_B)
5423 pipe = PIPE_A;
5424
5425 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5426 pipe_name(pipe));
5427 }
5428
5429 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5430 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5431 intel_panel_setup_backlight(connector, pipe);
5432
5433 return true;
5434 }
5435
5436 bool
5437 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5438 struct intel_connector *intel_connector)
5439 {
5440 struct drm_connector *connector = &intel_connector->base;
5441 struct intel_dp *intel_dp = &intel_dig_port->dp;
5442 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5443 struct drm_device *dev = intel_encoder->base.dev;
5444 struct drm_i915_private *dev_priv = dev->dev_private;
5445 enum port port = intel_dig_port->port;
5446 int type;
5447
5448 intel_dp->pps_pipe = INVALID_PIPE;
5449
5450 /* intel_dp vfuncs */
5451 if (INTEL_INFO(dev)->gen >= 9)
5452 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5453 else if (IS_VALLEYVIEW(dev))
5454 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5455 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5456 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5457 else if (HAS_PCH_SPLIT(dev))
5458 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5459 else
5460 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5461
5462 if (INTEL_INFO(dev)->gen >= 9)
5463 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5464 else
5465 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5466
5467 /* Preserve the current hw state. */
5468 intel_dp->DP = I915_READ(intel_dp->output_reg);
5469 intel_dp->attached_connector = intel_connector;
5470
5471 if (intel_dp_is_edp(dev, port))
5472 type = DRM_MODE_CONNECTOR_eDP;
5473 else
5474 type = DRM_MODE_CONNECTOR_DisplayPort;
5475
5476 /*
5477 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5478 * for DP the encoder type can be set by the caller to
5479 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5480 */
5481 if (type == DRM_MODE_CONNECTOR_eDP)
5482 intel_encoder->type = INTEL_OUTPUT_EDP;
5483
5484 /* eDP only on port B and/or C on vlv/chv */
5485 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5486 port != PORT_B && port != PORT_C))
5487 return false;
5488
5489 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5490 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5491 port_name(port));
5492
5493 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5494 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5495
5496 connector->interlace_allowed = true;
5497 connector->doublescan_allowed = 0;
5498
5499 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5500 edp_panel_vdd_work);
5501
5502 intel_connector_attach_encoder(intel_connector, intel_encoder);
5503 drm_connector_register(connector);
5504
5505 if (HAS_DDI(dev))
5506 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5507 else
5508 intel_connector->get_hw_state = intel_connector_get_hw_state;
5509 intel_connector->unregister = intel_dp_connector_unregister;
5510
5511 /* Set up the hotplug pin. */
5512 switch (port) {
5513 case PORT_A:
5514 intel_encoder->hpd_pin = HPD_PORT_A;
5515 break;
5516 case PORT_B:
5517 intel_encoder->hpd_pin = HPD_PORT_B;
5518 break;
5519 case PORT_C:
5520 intel_encoder->hpd_pin = HPD_PORT_C;
5521 break;
5522 case PORT_D:
5523 intel_encoder->hpd_pin = HPD_PORT_D;
5524 break;
5525 default:
5526 BUG();
5527 }
5528
5529 if (is_edp(intel_dp)) {
5530 pps_lock(intel_dp);
5531 intel_dp_init_panel_power_timestamps(intel_dp);
5532 if (IS_VALLEYVIEW(dev))
5533 vlv_initial_power_sequencer_setup(intel_dp);
5534 else
5535 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5536 pps_unlock(intel_dp);
5537 }
5538
5539 intel_dp_aux_init(intel_dp, intel_connector);
5540
5541 /* init MST on ports that can support it */
5542 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5543 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5544 intel_dp_mst_encoder_init(intel_dig_port,
5545 intel_connector->base.base.id);
5546 }
5547 }
5548
5549 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5550 drm_dp_aux_unregister(&intel_dp->aux);
5551 if (is_edp(intel_dp)) {
5552 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5553 /*
5554 * vdd might still be enabled do to the delayed vdd off.
5555 * Make sure vdd is actually turned off here.
5556 */
5557 pps_lock(intel_dp);
5558 edp_panel_vdd_off_sync(intel_dp);
5559 pps_unlock(intel_dp);
5560 }
5561 drm_connector_unregister(connector);
5562 drm_connector_cleanup(connector);
5563 return false;
5564 }
5565
5566 intel_dp_add_properties(intel_dp, connector);
5567
5568 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5569 * 0xd. Failure to do so will result in spurious interrupts being
5570 * generated on the port when a cable is not attached.
5571 */
5572 if (IS_G4X(dev) && !IS_GM45(dev)) {
5573 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5574 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5575 }
5576
5577 return true;
5578 }
5579
5580 void
5581 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5582 {
5583 struct drm_i915_private *dev_priv = dev->dev_private;
5584 struct intel_digital_port *intel_dig_port;
5585 struct intel_encoder *intel_encoder;
5586 struct drm_encoder *encoder;
5587 struct intel_connector *intel_connector;
5588
5589 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5590 if (!intel_dig_port)
5591 return;
5592
5593 intel_connector = intel_connector_alloc();
5594 if (!intel_connector) {
5595 kfree(intel_dig_port);
5596 return;
5597 }
5598
5599 intel_encoder = &intel_dig_port->base;
5600 encoder = &intel_encoder->base;
5601
5602 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5603 DRM_MODE_ENCODER_TMDS);
5604
5605 intel_encoder->compute_config = intel_dp_compute_config;
5606 intel_encoder->disable = intel_disable_dp;
5607 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5608 intel_encoder->get_config = intel_dp_get_config;
5609 intel_encoder->suspend = intel_dp_encoder_suspend;
5610 if (IS_CHERRYVIEW(dev)) {
5611 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5612 intel_encoder->pre_enable = chv_pre_enable_dp;
5613 intel_encoder->enable = vlv_enable_dp;
5614 intel_encoder->post_disable = chv_post_disable_dp;
5615 } else if (IS_VALLEYVIEW(dev)) {
5616 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5617 intel_encoder->pre_enable = vlv_pre_enable_dp;
5618 intel_encoder->enable = vlv_enable_dp;
5619 intel_encoder->post_disable = vlv_post_disable_dp;
5620 } else {
5621 intel_encoder->pre_enable = g4x_pre_enable_dp;
5622 intel_encoder->enable = g4x_enable_dp;
5623 if (INTEL_INFO(dev)->gen >= 5)
5624 intel_encoder->post_disable = ilk_post_disable_dp;
5625 }
5626
5627 intel_dig_port->port = port;
5628 intel_dig_port->dp.output_reg = output_reg;
5629
5630 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5631 if (IS_CHERRYVIEW(dev)) {
5632 if (port == PORT_D)
5633 intel_encoder->crtc_mask = 1 << 2;
5634 else
5635 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5636 } else {
5637 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5638 }
5639 intel_encoder->cloneable = 0;
5640 intel_encoder->hot_plug = intel_dp_hot_plug;
5641
5642 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5643 dev_priv->hpd_irq_port[port] = intel_dig_port;
5644
5645 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5646 drm_encoder_cleanup(encoder);
5647 kfree(intel_dig_port);
5648 kfree(intel_connector);
5649 }
5650 }
5651
5652 void intel_dp_mst_suspend(struct drm_device *dev)
5653 {
5654 struct drm_i915_private *dev_priv = dev->dev_private;
5655 int i;
5656
5657 /* disable MST */
5658 for (i = 0; i < I915_MAX_PORTS; i++) {
5659 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5660 if (!intel_dig_port)
5661 continue;
5662
5663 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5664 if (!intel_dig_port->dp.can_mst)
5665 continue;
5666 if (intel_dig_port->dp.is_mst)
5667 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5668 }
5669 }
5670 }
5671
5672 void intel_dp_mst_resume(struct drm_device *dev)
5673 {
5674 struct drm_i915_private *dev_priv = dev->dev_private;
5675 int i;
5676
5677 for (i = 0; i < I915_MAX_PORTS; i++) {
5678 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5679 if (!intel_dig_port)
5680 continue;
5681 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5682 int ret;
5683
5684 if (!intel_dig_port->dp.can_mst)
5685 continue;
5686
5687 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5688 if (ret != 0) {
5689 intel_dp_check_mst_status(&intel_dig_port->dp);
5690 }
5691 }
5692 }
5693 }
This page took 0.166005 seconds and 5 git commands to generate.