drm/i915: Removing the drrs capability enum initialization
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
44 struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47 };
48
49 static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54 };
55
56 static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61 };
62
63 static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68 };
69
70 /*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74 static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86 };
87 /* Skylake supports following rates */
88 static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
90 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91 243000, 270000, 324000, 405000,
92 420000, 432000, 540000 };
93 static const int default_rates[] = { 162000, 270000, 540000 };
94
95 /**
96 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97 * @intel_dp: DP struct
98 *
99 * If a CPU or PCH DP output is attached to an eDP panel, this function
100 * will return true, and false otherwise.
101 */
102 static bool is_edp(struct intel_dp *intel_dp)
103 {
104 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
105
106 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
107 }
108
109 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
110 {
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113 return intel_dig_port->base.base.dev;
114 }
115
116 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117 {
118 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
119 }
120
121 static void intel_dp_link_down(struct intel_dp *intel_dp);
122 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
123 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
124 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
125 static void vlv_steal_power_sequencer(struct drm_device *dev,
126 enum pipe pipe);
127
128 static int
129 intel_dp_max_link_bw(struct intel_dp *intel_dp)
130 {
131 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
132
133 switch (max_link_bw) {
134 case DP_LINK_BW_1_62:
135 case DP_LINK_BW_2_7:
136 case DP_LINK_BW_5_4:
137 break;
138 default:
139 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
140 max_link_bw);
141 max_link_bw = DP_LINK_BW_1_62;
142 break;
143 }
144 return max_link_bw;
145 }
146
147 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
148 {
149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150 struct drm_device *dev = intel_dig_port->base.base.dev;
151 u8 source_max, sink_max;
152
153 source_max = 4;
154 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
156 source_max = 2;
157
158 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
159
160 return min(source_max, sink_max);
161 }
162
163 /*
164 * The units on the numbers in the next two are... bizarre. Examples will
165 * make it clearer; this one parallels an example in the eDP spec.
166 *
167 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
168 *
169 * 270000 * 1 * 8 / 10 == 216000
170 *
171 * The actual data capacity of that configuration is 2.16Gbit/s, so the
172 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
173 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174 * 119000. At 18bpp that's 2142000 kilobits per second.
175 *
176 * Thus the strange-looking division by 10 in intel_dp_link_required, to
177 * get the result in decakilobits instead of kilobits.
178 */
179
180 static int
181 intel_dp_link_required(int pixel_clock, int bpp)
182 {
183 return (pixel_clock * bpp + 9) / 10;
184 }
185
186 static int
187 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
188 {
189 return (max_link_clock * max_lanes * 8) / 10;
190 }
191
192 static enum drm_mode_status
193 intel_dp_mode_valid(struct drm_connector *connector,
194 struct drm_display_mode *mode)
195 {
196 struct intel_dp *intel_dp = intel_attached_dp(connector);
197 struct intel_connector *intel_connector = to_intel_connector(connector);
198 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
199 int target_clock = mode->clock;
200 int max_rate, mode_rate, max_lanes, max_link_clock;
201
202 if (is_edp(intel_dp) && fixed_mode) {
203 if (mode->hdisplay > fixed_mode->hdisplay)
204 return MODE_PANEL;
205
206 if (mode->vdisplay > fixed_mode->vdisplay)
207 return MODE_PANEL;
208
209 target_clock = fixed_mode->clock;
210 }
211
212 max_link_clock = intel_dp_max_link_rate(intel_dp);
213 max_lanes = intel_dp_max_lane_count(intel_dp);
214
215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216 mode_rate = intel_dp_link_required(target_clock, 18);
217
218 if (mode_rate > max_rate)
219 return MODE_CLOCK_HIGH;
220
221 if (mode->clock < 10000)
222 return MODE_CLOCK_LOW;
223
224 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225 return MODE_H_ILLEGAL;
226
227 return MODE_OK;
228 }
229
230 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
231 {
232 int i;
233 uint32_t v = 0;
234
235 if (src_bytes > 4)
236 src_bytes = 4;
237 for (i = 0; i < src_bytes; i++)
238 v |= ((uint32_t) src[i]) << ((3-i) * 8);
239 return v;
240 }
241
242 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
243 {
244 int i;
245 if (dst_bytes > 4)
246 dst_bytes = 4;
247 for (i = 0; i < dst_bytes; i++)
248 dst[i] = src >> ((3-i) * 8);
249 }
250
251 /* hrawclock is 1/4 the FSB frequency */
252 static int
253 intel_hrawclk(struct drm_device *dev)
254 {
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 uint32_t clkcfg;
257
258 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259 if (IS_VALLEYVIEW(dev))
260 return 200;
261
262 clkcfg = I915_READ(CLKCFG);
263 switch (clkcfg & CLKCFG_FSB_MASK) {
264 case CLKCFG_FSB_400:
265 return 100;
266 case CLKCFG_FSB_533:
267 return 133;
268 case CLKCFG_FSB_667:
269 return 166;
270 case CLKCFG_FSB_800:
271 return 200;
272 case CLKCFG_FSB_1067:
273 return 266;
274 case CLKCFG_FSB_1333:
275 return 333;
276 /* these two are just a guess; one of them might be right */
277 case CLKCFG_FSB_1600:
278 case CLKCFG_FSB_1600_ALT:
279 return 400;
280 default:
281 return 133;
282 }
283 }
284
285 static void
286 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
287 struct intel_dp *intel_dp);
288 static void
289 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
290 struct intel_dp *intel_dp);
291
292 static void pps_lock(struct intel_dp *intel_dp)
293 {
294 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295 struct intel_encoder *encoder = &intel_dig_port->base;
296 struct drm_device *dev = encoder->base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 enum intel_display_power_domain power_domain;
299
300 /*
301 * See vlv_power_sequencer_reset() why we need
302 * a power domain reference here.
303 */
304 power_domain = intel_display_port_power_domain(encoder);
305 intel_display_power_get(dev_priv, power_domain);
306
307 mutex_lock(&dev_priv->pps_mutex);
308 }
309
310 static void pps_unlock(struct intel_dp *intel_dp)
311 {
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct intel_encoder *encoder = &intel_dig_port->base;
314 struct drm_device *dev = encoder->base.dev;
315 struct drm_i915_private *dev_priv = dev->dev_private;
316 enum intel_display_power_domain power_domain;
317
318 mutex_unlock(&dev_priv->pps_mutex);
319
320 power_domain = intel_display_port_power_domain(encoder);
321 intel_display_power_put(dev_priv, power_domain);
322 }
323
324 static void
325 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326 {
327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 struct drm_device *dev = intel_dig_port->base.base.dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 enum pipe pipe = intel_dp->pps_pipe;
331 bool pll_enabled;
332 uint32_t DP;
333
334 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 pipe_name(pipe), port_name(intel_dig_port->port)))
337 return;
338
339 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 pipe_name(pipe), port_name(intel_dig_port->port));
341
342 /* Preserve the BIOS-computed detected bit. This is
343 * supposed to be read-only.
344 */
345 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347 DP |= DP_PORT_WIDTH(1);
348 DP |= DP_LINK_TRAIN_PAT_1;
349
350 if (IS_CHERRYVIEW(dev))
351 DP |= DP_PIPE_SELECT_CHV(pipe);
352 else if (pipe == PIPE_B)
353 DP |= DP_PIPEB_SELECT;
354
355 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356
357 /*
358 * The DPLL for the pipe must be enabled for this to work.
359 * So enable temporarily it if it's not already enabled.
360 */
361 if (!pll_enabled)
362 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364
365 /*
366 * Similar magic as in intel_dp_enable_port().
367 * We _must_ do this port enable + disable trick
368 * to make this power seqeuencer lock onto the port.
369 * Otherwise even VDD force bit won't work.
370 */
371 I915_WRITE(intel_dp->output_reg, DP);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
376
377 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
379
380 if (!pll_enabled)
381 vlv_force_pll_off(dev, pipe);
382 }
383
384 static enum pipe
385 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
386 {
387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
388 struct drm_device *dev = intel_dig_port->base.base.dev;
389 struct drm_i915_private *dev_priv = dev->dev_private;
390 struct intel_encoder *encoder;
391 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
392 enum pipe pipe;
393
394 lockdep_assert_held(&dev_priv->pps_mutex);
395
396 /* We should never land here with regular DP ports */
397 WARN_ON(!is_edp(intel_dp));
398
399 if (intel_dp->pps_pipe != INVALID_PIPE)
400 return intel_dp->pps_pipe;
401
402 /*
403 * We don't have power sequencer currently.
404 * Pick one that's not used by other ports.
405 */
406 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
407 base.head) {
408 struct intel_dp *tmp;
409
410 if (encoder->type != INTEL_OUTPUT_EDP)
411 continue;
412
413 tmp = enc_to_intel_dp(&encoder->base);
414
415 if (tmp->pps_pipe != INVALID_PIPE)
416 pipes &= ~(1 << tmp->pps_pipe);
417 }
418
419 /*
420 * Didn't find one. This should not happen since there
421 * are two power sequencers and up to two eDP ports.
422 */
423 if (WARN_ON(pipes == 0))
424 pipe = PIPE_A;
425 else
426 pipe = ffs(pipes) - 1;
427
428 vlv_steal_power_sequencer(dev, pipe);
429 intel_dp->pps_pipe = pipe;
430
431 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432 pipe_name(intel_dp->pps_pipe),
433 port_name(intel_dig_port->port));
434
435 /* init power sequencer on this pipe and port */
436 intel_dp_init_panel_power_sequencer(dev, intel_dp);
437 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
438
439 /*
440 * Even vdd force doesn't work until we've made
441 * the power sequencer lock in on the port.
442 */
443 vlv_power_sequencer_kick(intel_dp);
444
445 return intel_dp->pps_pipe;
446 }
447
448 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
449 enum pipe pipe);
450
451 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
452 enum pipe pipe)
453 {
454 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
455 }
456
457 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
458 enum pipe pipe)
459 {
460 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
461 }
462
463 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
464 enum pipe pipe)
465 {
466 return true;
467 }
468
469 static enum pipe
470 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
471 enum port port,
472 vlv_pipe_check pipe_check)
473 {
474 enum pipe pipe;
475
476 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478 PANEL_PORT_SELECT_MASK;
479
480 if (port_sel != PANEL_PORT_SELECT_VLV(port))
481 continue;
482
483 if (!pipe_check(dev_priv, pipe))
484 continue;
485
486 return pipe;
487 }
488
489 return INVALID_PIPE;
490 }
491
492 static void
493 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
494 {
495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 struct drm_device *dev = intel_dig_port->base.base.dev;
497 struct drm_i915_private *dev_priv = dev->dev_private;
498 enum port port = intel_dig_port->port;
499
500 lockdep_assert_held(&dev_priv->pps_mutex);
501
502 /* try to find a pipe with this port selected */
503 /* first pick one where the panel is on */
504 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
505 vlv_pipe_has_pp_on);
506 /* didn't find one? pick one where vdd is on */
507 if (intel_dp->pps_pipe == INVALID_PIPE)
508 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509 vlv_pipe_has_vdd_on);
510 /* didn't find one? pick one with just the correct port */
511 if (intel_dp->pps_pipe == INVALID_PIPE)
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_any);
514
515 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516 if (intel_dp->pps_pipe == INVALID_PIPE) {
517 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
518 port_name(port));
519 return;
520 }
521
522 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523 port_name(port), pipe_name(intel_dp->pps_pipe));
524
525 intel_dp_init_panel_power_sequencer(dev, intel_dp);
526 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
527 }
528
529 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
530 {
531 struct drm_device *dev = dev_priv->dev;
532 struct intel_encoder *encoder;
533
534 if (WARN_ON(!IS_VALLEYVIEW(dev)))
535 return;
536
537 /*
538 * We can't grab pps_mutex here due to deadlock with power_domain
539 * mutex when power_domain functions are called while holding pps_mutex.
540 * That also means that in order to use pps_pipe the code needs to
541 * hold both a power domain reference and pps_mutex, and the power domain
542 * reference get/put must be done while _not_ holding pps_mutex.
543 * pps_{lock,unlock}() do these steps in the correct order, so one
544 * should use them always.
545 */
546
547 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548 struct intel_dp *intel_dp;
549
550 if (encoder->type != INTEL_OUTPUT_EDP)
551 continue;
552
553 intel_dp = enc_to_intel_dp(&encoder->base);
554 intel_dp->pps_pipe = INVALID_PIPE;
555 }
556 }
557
558 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
559 {
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562 if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_CONTROL;
564 else
565 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
566 }
567
568 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
569 {
570 struct drm_device *dev = intel_dp_to_dev(intel_dp);
571
572 if (HAS_PCH_SPLIT(dev))
573 return PCH_PP_STATUS;
574 else
575 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
576 }
577
578 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579 This function only applicable when panel PM state is not to be tracked */
580 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
581 void *unused)
582 {
583 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
584 edp_notifier);
585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
586 struct drm_i915_private *dev_priv = dev->dev_private;
587 u32 pp_div;
588 u32 pp_ctrl_reg, pp_div_reg;
589
590 if (!is_edp(intel_dp) || code != SYS_RESTART)
591 return 0;
592
593 pps_lock(intel_dp);
594
595 if (IS_VALLEYVIEW(dev)) {
596 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
597
598 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
600 pp_div = I915_READ(pp_div_reg);
601 pp_div &= PP_REFERENCE_DIVIDER_MASK;
602
603 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606 msleep(intel_dp->panel_power_cycle_delay);
607 }
608
609 pps_unlock(intel_dp);
610
611 return 0;
612 }
613
614 static bool edp_have_panel_power(struct intel_dp *intel_dp)
615 {
616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
617 struct drm_i915_private *dev_priv = dev->dev_private;
618
619 lockdep_assert_held(&dev_priv->pps_mutex);
620
621 if (IS_VALLEYVIEW(dev) &&
622 intel_dp->pps_pipe == INVALID_PIPE)
623 return false;
624
625 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
626 }
627
628 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
629 {
630 struct drm_device *dev = intel_dp_to_dev(intel_dp);
631 struct drm_i915_private *dev_priv = dev->dev_private;
632
633 lockdep_assert_held(&dev_priv->pps_mutex);
634
635 if (IS_VALLEYVIEW(dev) &&
636 intel_dp->pps_pipe == INVALID_PIPE)
637 return false;
638
639 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
640 }
641
642 static void
643 intel_dp_check_edp(struct intel_dp *intel_dp)
644 {
645 struct drm_device *dev = intel_dp_to_dev(intel_dp);
646 struct drm_i915_private *dev_priv = dev->dev_private;
647
648 if (!is_edp(intel_dp))
649 return;
650
651 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
652 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
654 I915_READ(_pp_stat_reg(intel_dp)),
655 I915_READ(_pp_ctrl_reg(intel_dp)));
656 }
657 }
658
659 static uint32_t
660 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
661 {
662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663 struct drm_device *dev = intel_dig_port->base.base.dev;
664 struct drm_i915_private *dev_priv = dev->dev_private;
665 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
666 uint32_t status;
667 bool done;
668
669 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
670 if (has_aux_irq)
671 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
672 msecs_to_jiffies_timeout(10));
673 else
674 done = wait_for_atomic(C, 10) == 0;
675 if (!done)
676 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677 has_aux_irq);
678 #undef C
679
680 return status;
681 }
682
683 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
684 {
685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686 struct drm_device *dev = intel_dig_port->base.base.dev;
687
688 /*
689 * The clock divider is based off the hrawclk, and would like to run at
690 * 2MHz. So, take the hrawclk value and divide by 2 and use that
691 */
692 return index ? 0 : intel_hrawclk(dev) / 2;
693 }
694
695 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696 {
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
699
700 if (index)
701 return 0;
702
703 if (intel_dig_port->port == PORT_A) {
704 if (IS_GEN6(dev) || IS_GEN7(dev))
705 return 200; /* SNB & IVB eDP input clock at 400Mhz */
706 else
707 return 225; /* eDP input clock at 450Mhz */
708 } else {
709 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
710 }
711 }
712
713 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
714 {
715 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716 struct drm_device *dev = intel_dig_port->base.base.dev;
717 struct drm_i915_private *dev_priv = dev->dev_private;
718
719 if (intel_dig_port->port == PORT_A) {
720 if (index)
721 return 0;
722 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
723 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
724 /* Workaround for non-ULT HSW */
725 switch (index) {
726 case 0: return 63;
727 case 1: return 72;
728 default: return 0;
729 }
730 } else {
731 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
732 }
733 }
734
735 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
736 {
737 return index ? 0 : 100;
738 }
739
740 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741 {
742 /*
743 * SKL doesn't need us to program the AUX clock divider (Hardware will
744 * derive the clock from CDCLK automatically). We still implement the
745 * get_aux_clock_divider vfunc to plug-in into the existing code.
746 */
747 return index ? 0 : 1;
748 }
749
750 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
751 bool has_aux_irq,
752 int send_bytes,
753 uint32_t aux_clock_divider)
754 {
755 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756 struct drm_device *dev = intel_dig_port->base.base.dev;
757 uint32_t precharge, timeout;
758
759 if (IS_GEN6(dev))
760 precharge = 3;
761 else
762 precharge = 5;
763
764 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
765 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
766 else
767 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
768
769 return DP_AUX_CH_CTL_SEND_BUSY |
770 DP_AUX_CH_CTL_DONE |
771 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
772 DP_AUX_CH_CTL_TIME_OUT_ERROR |
773 timeout |
774 DP_AUX_CH_CTL_RECEIVE_ERROR |
775 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
776 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
777 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
778 }
779
780 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
781 bool has_aux_irq,
782 int send_bytes,
783 uint32_t unused)
784 {
785 return DP_AUX_CH_CTL_SEND_BUSY |
786 DP_AUX_CH_CTL_DONE |
787 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788 DP_AUX_CH_CTL_TIME_OUT_ERROR |
789 DP_AUX_CH_CTL_TIME_OUT_1600us |
790 DP_AUX_CH_CTL_RECEIVE_ERROR |
791 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
793 }
794
795 static int
796 intel_dp_aux_ch(struct intel_dp *intel_dp,
797 const uint8_t *send, int send_bytes,
798 uint8_t *recv, int recv_size)
799 {
800 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
801 struct drm_device *dev = intel_dig_port->base.base.dev;
802 struct drm_i915_private *dev_priv = dev->dev_private;
803 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
804 uint32_t ch_data = ch_ctl + 4;
805 uint32_t aux_clock_divider;
806 int i, ret, recv_bytes;
807 uint32_t status;
808 int try, clock = 0;
809 bool has_aux_irq = HAS_AUX_IRQ(dev);
810 bool vdd;
811
812 pps_lock(intel_dp);
813
814 /*
815 * We will be called with VDD already enabled for dpcd/edid/oui reads.
816 * In such cases we want to leave VDD enabled and it's up to upper layers
817 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
818 * ourselves.
819 */
820 vdd = edp_panel_vdd_on(intel_dp);
821
822 /* dp aux is extremely sensitive to irq latency, hence request the
823 * lowest possible wakeup latency and so prevent the cpu from going into
824 * deep sleep states.
825 */
826 pm_qos_update_request(&dev_priv->pm_qos, 0);
827
828 intel_dp_check_edp(intel_dp);
829
830 intel_aux_display_runtime_get(dev_priv);
831
832 /* Try to wait for any previous AUX channel activity */
833 for (try = 0; try < 3; try++) {
834 status = I915_READ_NOTRACE(ch_ctl);
835 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
836 break;
837 msleep(1);
838 }
839
840 if (try == 3) {
841 WARN(1, "dp_aux_ch not started status 0x%08x\n",
842 I915_READ(ch_ctl));
843 ret = -EBUSY;
844 goto out;
845 }
846
847 /* Only 5 data registers! */
848 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
849 ret = -E2BIG;
850 goto out;
851 }
852
853 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
854 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
855 has_aux_irq,
856 send_bytes,
857 aux_clock_divider);
858
859 /* Must try at least 3 times according to DP spec */
860 for (try = 0; try < 5; try++) {
861 /* Load the send data into the aux channel data registers */
862 for (i = 0; i < send_bytes; i += 4)
863 I915_WRITE(ch_data + i,
864 intel_dp_pack_aux(send + i,
865 send_bytes - i));
866
867 /* Send the command and wait for it to complete */
868 I915_WRITE(ch_ctl, send_ctl);
869
870 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
871
872 /* Clear done status and any errors */
873 I915_WRITE(ch_ctl,
874 status |
875 DP_AUX_CH_CTL_DONE |
876 DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR);
878
879 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
880 DP_AUX_CH_CTL_RECEIVE_ERROR))
881 continue;
882 if (status & DP_AUX_CH_CTL_DONE)
883 break;
884 }
885 if (status & DP_AUX_CH_CTL_DONE)
886 break;
887 }
888
889 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
890 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
891 ret = -EBUSY;
892 goto out;
893 }
894
895 /* Check for timeout or receive error.
896 * Timeouts occur when the sink is not connected
897 */
898 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
899 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
900 ret = -EIO;
901 goto out;
902 }
903
904 /* Timeouts occur when the device isn't connected, so they're
905 * "normal" -- don't fill the kernel log with these */
906 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
907 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
908 ret = -ETIMEDOUT;
909 goto out;
910 }
911
912 /* Unload any bytes sent back from the other side */
913 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
914 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
915 if (recv_bytes > recv_size)
916 recv_bytes = recv_size;
917
918 for (i = 0; i < recv_bytes; i += 4)
919 intel_dp_unpack_aux(I915_READ(ch_data + i),
920 recv + i, recv_bytes - i);
921
922 ret = recv_bytes;
923 out:
924 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
925 intel_aux_display_runtime_put(dev_priv);
926
927 if (vdd)
928 edp_panel_vdd_off(intel_dp, false);
929
930 pps_unlock(intel_dp);
931
932 return ret;
933 }
934
935 #define BARE_ADDRESS_SIZE 3
936 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
937 static ssize_t
938 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
939 {
940 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
941 uint8_t txbuf[20], rxbuf[20];
942 size_t txsize, rxsize;
943 int ret;
944
945 txbuf[0] = (msg->request << 4) |
946 ((msg->address >> 16) & 0xf);
947 txbuf[1] = (msg->address >> 8) & 0xff;
948 txbuf[2] = msg->address & 0xff;
949 txbuf[3] = msg->size - 1;
950
951 switch (msg->request & ~DP_AUX_I2C_MOT) {
952 case DP_AUX_NATIVE_WRITE:
953 case DP_AUX_I2C_WRITE:
954 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
955 rxsize = 2; /* 0 or 1 data bytes */
956
957 if (WARN_ON(txsize > 20))
958 return -E2BIG;
959
960 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
961
962 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
963 if (ret > 0) {
964 msg->reply = rxbuf[0] >> 4;
965
966 if (ret > 1) {
967 /* Number of bytes written in a short write. */
968 ret = clamp_t(int, rxbuf[1], 0, msg->size);
969 } else {
970 /* Return payload size. */
971 ret = msg->size;
972 }
973 }
974 break;
975
976 case DP_AUX_NATIVE_READ:
977 case DP_AUX_I2C_READ:
978 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
979 rxsize = msg->size + 1;
980
981 if (WARN_ON(rxsize > 20))
982 return -E2BIG;
983
984 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
985 if (ret > 0) {
986 msg->reply = rxbuf[0] >> 4;
987 /*
988 * Assume happy day, and copy the data. The caller is
989 * expected to check msg->reply before touching it.
990 *
991 * Return payload size.
992 */
993 ret--;
994 memcpy(msg->buffer, rxbuf + 1, ret);
995 }
996 break;
997
998 default:
999 ret = -EINVAL;
1000 break;
1001 }
1002
1003 return ret;
1004 }
1005
1006 static void
1007 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1008 {
1009 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1010 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1011 enum port port = intel_dig_port->port;
1012 const char *name = NULL;
1013 int ret;
1014
1015 switch (port) {
1016 case PORT_A:
1017 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1018 name = "DPDDC-A";
1019 break;
1020 case PORT_B:
1021 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1022 name = "DPDDC-B";
1023 break;
1024 case PORT_C:
1025 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1026 name = "DPDDC-C";
1027 break;
1028 case PORT_D:
1029 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1030 name = "DPDDC-D";
1031 break;
1032 default:
1033 BUG();
1034 }
1035
1036 /*
1037 * The AUX_CTL register is usually DP_CTL + 0x10.
1038 *
1039 * On Haswell and Broadwell though:
1040 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1041 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1042 *
1043 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1044 */
1045 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1046 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1047
1048 intel_dp->aux.name = name;
1049 intel_dp->aux.dev = dev->dev;
1050 intel_dp->aux.transfer = intel_dp_aux_transfer;
1051
1052 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1053 connector->base.kdev->kobj.name);
1054
1055 ret = drm_dp_aux_register(&intel_dp->aux);
1056 if (ret < 0) {
1057 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1058 name, ret);
1059 return;
1060 }
1061
1062 ret = sysfs_create_link(&connector->base.kdev->kobj,
1063 &intel_dp->aux.ddc.dev.kobj,
1064 intel_dp->aux.ddc.dev.kobj.name);
1065 if (ret < 0) {
1066 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1067 drm_dp_aux_unregister(&intel_dp->aux);
1068 }
1069 }
1070
1071 static void
1072 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1073 {
1074 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1075
1076 if (!intel_connector->mst_port)
1077 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1078 intel_dp->aux.ddc.dev.kobj.name);
1079 intel_connector_unregister(intel_connector);
1080 }
1081
1082 static void
1083 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1084 {
1085 u32 ctrl1;
1086
1087 pipe_config->ddi_pll_sel = SKL_DPLL0;
1088 pipe_config->dpll_hw_state.cfgcr1 = 0;
1089 pipe_config->dpll_hw_state.cfgcr2 = 0;
1090
1091 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1092 switch (link_clock / 2) {
1093 case 81000:
1094 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1095 SKL_DPLL0);
1096 break;
1097 case 135000:
1098 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1099 SKL_DPLL0);
1100 break;
1101 case 270000:
1102 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1103 SKL_DPLL0);
1104 break;
1105 case 162000:
1106 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1107 SKL_DPLL0);
1108 break;
1109 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1110 results in CDCLK change. Need to handle the change of CDCLK by
1111 disabling pipes and re-enabling them */
1112 case 108000:
1113 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1114 SKL_DPLL0);
1115 break;
1116 case 216000:
1117 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1118 SKL_DPLL0);
1119 break;
1120
1121 }
1122 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1123 }
1124
1125 static void
1126 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1127 {
1128 switch (link_bw) {
1129 case DP_LINK_BW_1_62:
1130 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1131 break;
1132 case DP_LINK_BW_2_7:
1133 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1134 break;
1135 case DP_LINK_BW_5_4:
1136 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1137 break;
1138 }
1139 }
1140
1141 static int
1142 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1143 {
1144 if (intel_dp->num_sink_rates) {
1145 *sink_rates = intel_dp->sink_rates;
1146 return intel_dp->num_sink_rates;
1147 }
1148
1149 *sink_rates = default_rates;
1150
1151 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1152 }
1153
1154 static int
1155 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1156 {
1157 if (INTEL_INFO(dev)->gen >= 9) {
1158 *source_rates = gen9_rates;
1159 return ARRAY_SIZE(gen9_rates);
1160 } else if (IS_CHERRYVIEW(dev)) {
1161 *source_rates = chv_rates;
1162 return ARRAY_SIZE(chv_rates);
1163 }
1164
1165 *source_rates = default_rates;
1166
1167 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1168 /* WaDisableHBR2:skl */
1169 return (DP_LINK_BW_2_7 >> 3) + 1;
1170 else if (INTEL_INFO(dev)->gen >= 8 ||
1171 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1172 return (DP_LINK_BW_5_4 >> 3) + 1;
1173 else
1174 return (DP_LINK_BW_2_7 >> 3) + 1;
1175 }
1176
1177 static void
1178 intel_dp_set_clock(struct intel_encoder *encoder,
1179 struct intel_crtc_state *pipe_config, int link_bw)
1180 {
1181 struct drm_device *dev = encoder->base.dev;
1182 const struct dp_link_dpll *divisor = NULL;
1183 int i, count = 0;
1184
1185 if (IS_G4X(dev)) {
1186 divisor = gen4_dpll;
1187 count = ARRAY_SIZE(gen4_dpll);
1188 } else if (HAS_PCH_SPLIT(dev)) {
1189 divisor = pch_dpll;
1190 count = ARRAY_SIZE(pch_dpll);
1191 } else if (IS_CHERRYVIEW(dev)) {
1192 divisor = chv_dpll;
1193 count = ARRAY_SIZE(chv_dpll);
1194 } else if (IS_VALLEYVIEW(dev)) {
1195 divisor = vlv_dpll;
1196 count = ARRAY_SIZE(vlv_dpll);
1197 }
1198
1199 if (divisor && count) {
1200 for (i = 0; i < count; i++) {
1201 if (link_bw == divisor[i].link_bw) {
1202 pipe_config->dpll = divisor[i].dpll;
1203 pipe_config->clock_set = true;
1204 break;
1205 }
1206 }
1207 }
1208 }
1209
1210 static int intersect_rates(const int *source_rates, int source_len,
1211 const int *sink_rates, int sink_len,
1212 int *common_rates)
1213 {
1214 int i = 0, j = 0, k = 0;
1215
1216 while (i < source_len && j < sink_len) {
1217 if (source_rates[i] == sink_rates[j]) {
1218 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1219 return k;
1220 common_rates[k] = source_rates[i];
1221 ++k;
1222 ++i;
1223 ++j;
1224 } else if (source_rates[i] < sink_rates[j]) {
1225 ++i;
1226 } else {
1227 ++j;
1228 }
1229 }
1230 return k;
1231 }
1232
1233 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1234 int *common_rates)
1235 {
1236 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1237 const int *source_rates, *sink_rates;
1238 int source_len, sink_len;
1239
1240 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1241 source_len = intel_dp_source_rates(dev, &source_rates);
1242
1243 return intersect_rates(source_rates, source_len,
1244 sink_rates, sink_len,
1245 common_rates);
1246 }
1247
1248 static void snprintf_int_array(char *str, size_t len,
1249 const int *array, int nelem)
1250 {
1251 int i;
1252
1253 str[0] = '\0';
1254
1255 for (i = 0; i < nelem; i++) {
1256 int r = snprintf(str, len, "%d,", array[i]);
1257 if (r >= len)
1258 return;
1259 str += r;
1260 len -= r;
1261 }
1262 }
1263
1264 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1265 {
1266 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1267 const int *source_rates, *sink_rates;
1268 int source_len, sink_len, common_len;
1269 int common_rates[DP_MAX_SUPPORTED_RATES];
1270 char str[128]; /* FIXME: too big for stack? */
1271
1272 if ((drm_debug & DRM_UT_KMS) == 0)
1273 return;
1274
1275 source_len = intel_dp_source_rates(dev, &source_rates);
1276 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1277 DRM_DEBUG_KMS("source rates: %s\n", str);
1278
1279 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1280 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1281 DRM_DEBUG_KMS("sink rates: %s\n", str);
1282
1283 common_len = intel_dp_common_rates(intel_dp, common_rates);
1284 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1285 DRM_DEBUG_KMS("common rates: %s\n", str);
1286 }
1287
1288 static int rate_to_index(int find, const int *rates)
1289 {
1290 int i = 0;
1291
1292 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1293 if (find == rates[i])
1294 break;
1295
1296 return i;
1297 }
1298
1299 int
1300 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1301 {
1302 int rates[DP_MAX_SUPPORTED_RATES] = {};
1303 int len;
1304
1305 len = intel_dp_common_rates(intel_dp, rates);
1306 if (WARN_ON(len <= 0))
1307 return 162000;
1308
1309 return rates[rate_to_index(0, rates) - 1];
1310 }
1311
1312 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1313 {
1314 return rate_to_index(rate, intel_dp->sink_rates);
1315 }
1316
1317 bool
1318 intel_dp_compute_config(struct intel_encoder *encoder,
1319 struct intel_crtc_state *pipe_config)
1320 {
1321 struct drm_device *dev = encoder->base.dev;
1322 struct drm_i915_private *dev_priv = dev->dev_private;
1323 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1324 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1325 enum port port = dp_to_dig_port(intel_dp)->port;
1326 struct intel_crtc *intel_crtc = encoder->new_crtc;
1327 struct intel_connector *intel_connector = intel_dp->attached_connector;
1328 int lane_count, clock;
1329 int min_lane_count = 1;
1330 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1331 /* Conveniently, the link BW constants become indices with a shift...*/
1332 int min_clock = 0;
1333 int max_clock;
1334 int bpp, mode_rate;
1335 int link_avail, link_clock;
1336 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1337 int common_len;
1338
1339 common_len = intel_dp_common_rates(intel_dp, common_rates);
1340
1341 /* No common link rates between source and sink */
1342 WARN_ON(common_len <= 0);
1343
1344 max_clock = common_len - 1;
1345
1346 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1347 pipe_config->has_pch_encoder = true;
1348
1349 pipe_config->has_dp_encoder = true;
1350 pipe_config->has_drrs = false;
1351 pipe_config->has_audio = intel_dp->has_audio;
1352
1353 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1354 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1355 adjusted_mode);
1356 if (!HAS_PCH_SPLIT(dev))
1357 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1358 intel_connector->panel.fitting_mode);
1359 else
1360 intel_pch_panel_fitting(intel_crtc, pipe_config,
1361 intel_connector->panel.fitting_mode);
1362 }
1363
1364 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1365 return false;
1366
1367 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1368 "max bw %d pixel clock %iKHz\n",
1369 max_lane_count, common_rates[max_clock],
1370 adjusted_mode->crtc_clock);
1371
1372 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1373 * bpc in between. */
1374 bpp = pipe_config->pipe_bpp;
1375 if (is_edp(intel_dp)) {
1376 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1377 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1378 dev_priv->vbt.edp_bpp);
1379 bpp = dev_priv->vbt.edp_bpp;
1380 }
1381
1382 /*
1383 * Use the maximum clock and number of lanes the eDP panel
1384 * advertizes being capable of. The panels are generally
1385 * designed to support only a single clock and lane
1386 * configuration, and typically these values correspond to the
1387 * native resolution of the panel.
1388 */
1389 min_lane_count = max_lane_count;
1390 min_clock = max_clock;
1391 }
1392
1393 for (; bpp >= 6*3; bpp -= 2*3) {
1394 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1395 bpp);
1396
1397 for (clock = min_clock; clock <= max_clock; clock++) {
1398 for (lane_count = min_lane_count;
1399 lane_count <= max_lane_count;
1400 lane_count <<= 1) {
1401
1402 link_clock = common_rates[clock];
1403 link_avail = intel_dp_max_data_rate(link_clock,
1404 lane_count);
1405
1406 if (mode_rate <= link_avail) {
1407 goto found;
1408 }
1409 }
1410 }
1411 }
1412
1413 return false;
1414
1415 found:
1416 if (intel_dp->color_range_auto) {
1417 /*
1418 * See:
1419 * CEA-861-E - 5.1 Default Encoding Parameters
1420 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1421 */
1422 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1423 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1424 else
1425 intel_dp->color_range = 0;
1426 }
1427
1428 if (intel_dp->color_range)
1429 pipe_config->limited_color_range = true;
1430
1431 intel_dp->lane_count = lane_count;
1432
1433 if (intel_dp->num_sink_rates) {
1434 intel_dp->link_bw = 0;
1435 intel_dp->rate_select =
1436 intel_dp_rate_select(intel_dp, common_rates[clock]);
1437 } else {
1438 intel_dp->link_bw =
1439 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1440 intel_dp->rate_select = 0;
1441 }
1442
1443 pipe_config->pipe_bpp = bpp;
1444 pipe_config->port_clock = common_rates[clock];
1445
1446 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1447 intel_dp->link_bw, intel_dp->lane_count,
1448 pipe_config->port_clock, bpp);
1449 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1450 mode_rate, link_avail);
1451
1452 intel_link_compute_m_n(bpp, lane_count,
1453 adjusted_mode->crtc_clock,
1454 pipe_config->port_clock,
1455 &pipe_config->dp_m_n);
1456
1457 if (intel_connector->panel.downclock_mode != NULL &&
1458 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1459 pipe_config->has_drrs = true;
1460 intel_link_compute_m_n(bpp, lane_count,
1461 intel_connector->panel.downclock_mode->clock,
1462 pipe_config->port_clock,
1463 &pipe_config->dp_m2_n2);
1464 }
1465
1466 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1467 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1468 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1469 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1470 else
1471 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1472
1473 return true;
1474 }
1475
1476 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1477 {
1478 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1479 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1480 struct drm_device *dev = crtc->base.dev;
1481 struct drm_i915_private *dev_priv = dev->dev_private;
1482 u32 dpa_ctl;
1483
1484 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1485 crtc->config->port_clock);
1486 dpa_ctl = I915_READ(DP_A);
1487 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1488
1489 if (crtc->config->port_clock == 162000) {
1490 /* For a long time we've carried around a ILK-DevA w/a for the
1491 * 160MHz clock. If we're really unlucky, it's still required.
1492 */
1493 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1494 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1495 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1496 } else {
1497 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1498 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1499 }
1500
1501 I915_WRITE(DP_A, dpa_ctl);
1502
1503 POSTING_READ(DP_A);
1504 udelay(500);
1505 }
1506
1507 static void intel_dp_prepare(struct intel_encoder *encoder)
1508 {
1509 struct drm_device *dev = encoder->base.dev;
1510 struct drm_i915_private *dev_priv = dev->dev_private;
1511 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1512 enum port port = dp_to_dig_port(intel_dp)->port;
1513 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1514 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1515
1516 /*
1517 * There are four kinds of DP registers:
1518 *
1519 * IBX PCH
1520 * SNB CPU
1521 * IVB CPU
1522 * CPT PCH
1523 *
1524 * IBX PCH and CPU are the same for almost everything,
1525 * except that the CPU DP PLL is configured in this
1526 * register
1527 *
1528 * CPT PCH is quite different, having many bits moved
1529 * to the TRANS_DP_CTL register instead. That
1530 * configuration happens (oddly) in ironlake_pch_enable
1531 */
1532
1533 /* Preserve the BIOS-computed detected bit. This is
1534 * supposed to be read-only.
1535 */
1536 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1537
1538 /* Handle DP bits in common between all three register formats */
1539 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1540 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1541
1542 if (crtc->config->has_audio)
1543 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1544
1545 /* Split out the IBX/CPU vs CPT settings */
1546
1547 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1548 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1549 intel_dp->DP |= DP_SYNC_HS_HIGH;
1550 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1551 intel_dp->DP |= DP_SYNC_VS_HIGH;
1552 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1553
1554 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1555 intel_dp->DP |= DP_ENHANCED_FRAMING;
1556
1557 intel_dp->DP |= crtc->pipe << 29;
1558 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1559 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1560 intel_dp->DP |= intel_dp->color_range;
1561
1562 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1563 intel_dp->DP |= DP_SYNC_HS_HIGH;
1564 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1565 intel_dp->DP |= DP_SYNC_VS_HIGH;
1566 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1567
1568 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1569 intel_dp->DP |= DP_ENHANCED_FRAMING;
1570
1571 if (!IS_CHERRYVIEW(dev)) {
1572 if (crtc->pipe == 1)
1573 intel_dp->DP |= DP_PIPEB_SELECT;
1574 } else {
1575 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1576 }
1577 } else {
1578 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1579 }
1580 }
1581
1582 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1583 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1584
1585 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1586 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1587
1588 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1589 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1590
1591 static void wait_panel_status(struct intel_dp *intel_dp,
1592 u32 mask,
1593 u32 value)
1594 {
1595 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1596 struct drm_i915_private *dev_priv = dev->dev_private;
1597 u32 pp_stat_reg, pp_ctrl_reg;
1598
1599 lockdep_assert_held(&dev_priv->pps_mutex);
1600
1601 pp_stat_reg = _pp_stat_reg(intel_dp);
1602 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1603
1604 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1605 mask, value,
1606 I915_READ(pp_stat_reg),
1607 I915_READ(pp_ctrl_reg));
1608
1609 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1610 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1611 I915_READ(pp_stat_reg),
1612 I915_READ(pp_ctrl_reg));
1613 }
1614
1615 DRM_DEBUG_KMS("Wait complete\n");
1616 }
1617
1618 static void wait_panel_on(struct intel_dp *intel_dp)
1619 {
1620 DRM_DEBUG_KMS("Wait for panel power on\n");
1621 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1622 }
1623
1624 static void wait_panel_off(struct intel_dp *intel_dp)
1625 {
1626 DRM_DEBUG_KMS("Wait for panel power off time\n");
1627 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1628 }
1629
1630 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1631 {
1632 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1633
1634 /* When we disable the VDD override bit last we have to do the manual
1635 * wait. */
1636 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1637 intel_dp->panel_power_cycle_delay);
1638
1639 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1640 }
1641
1642 static void wait_backlight_on(struct intel_dp *intel_dp)
1643 {
1644 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1645 intel_dp->backlight_on_delay);
1646 }
1647
1648 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1649 {
1650 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1651 intel_dp->backlight_off_delay);
1652 }
1653
1654 /* Read the current pp_control value, unlocking the register if it
1655 * is locked
1656 */
1657
1658 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1659 {
1660 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1661 struct drm_i915_private *dev_priv = dev->dev_private;
1662 u32 control;
1663
1664 lockdep_assert_held(&dev_priv->pps_mutex);
1665
1666 control = I915_READ(_pp_ctrl_reg(intel_dp));
1667 control &= ~PANEL_UNLOCK_MASK;
1668 control |= PANEL_UNLOCK_REGS;
1669 return control;
1670 }
1671
1672 /*
1673 * Must be paired with edp_panel_vdd_off().
1674 * Must hold pps_mutex around the whole on/off sequence.
1675 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1676 */
1677 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1678 {
1679 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1680 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1681 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1682 struct drm_i915_private *dev_priv = dev->dev_private;
1683 enum intel_display_power_domain power_domain;
1684 u32 pp;
1685 u32 pp_stat_reg, pp_ctrl_reg;
1686 bool need_to_disable = !intel_dp->want_panel_vdd;
1687
1688 lockdep_assert_held(&dev_priv->pps_mutex);
1689
1690 if (!is_edp(intel_dp))
1691 return false;
1692
1693 cancel_delayed_work(&intel_dp->panel_vdd_work);
1694 intel_dp->want_panel_vdd = true;
1695
1696 if (edp_have_panel_vdd(intel_dp))
1697 return need_to_disable;
1698
1699 power_domain = intel_display_port_power_domain(intel_encoder);
1700 intel_display_power_get(dev_priv, power_domain);
1701
1702 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1703 port_name(intel_dig_port->port));
1704
1705 if (!edp_have_panel_power(intel_dp))
1706 wait_panel_power_cycle(intel_dp);
1707
1708 pp = ironlake_get_pp_control(intel_dp);
1709 pp |= EDP_FORCE_VDD;
1710
1711 pp_stat_reg = _pp_stat_reg(intel_dp);
1712 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1713
1714 I915_WRITE(pp_ctrl_reg, pp);
1715 POSTING_READ(pp_ctrl_reg);
1716 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1717 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1718 /*
1719 * If the panel wasn't on, delay before accessing aux channel
1720 */
1721 if (!edp_have_panel_power(intel_dp)) {
1722 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1723 port_name(intel_dig_port->port));
1724 msleep(intel_dp->panel_power_up_delay);
1725 }
1726
1727 return need_to_disable;
1728 }
1729
1730 /*
1731 * Must be paired with intel_edp_panel_vdd_off() or
1732 * intel_edp_panel_off().
1733 * Nested calls to these functions are not allowed since
1734 * we drop the lock. Caller must use some higher level
1735 * locking to prevent nested calls from other threads.
1736 */
1737 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1738 {
1739 bool vdd;
1740
1741 if (!is_edp(intel_dp))
1742 return;
1743
1744 pps_lock(intel_dp);
1745 vdd = edp_panel_vdd_on(intel_dp);
1746 pps_unlock(intel_dp);
1747
1748 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1749 port_name(dp_to_dig_port(intel_dp)->port));
1750 }
1751
1752 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1753 {
1754 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1755 struct drm_i915_private *dev_priv = dev->dev_private;
1756 struct intel_digital_port *intel_dig_port =
1757 dp_to_dig_port(intel_dp);
1758 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1759 enum intel_display_power_domain power_domain;
1760 u32 pp;
1761 u32 pp_stat_reg, pp_ctrl_reg;
1762
1763 lockdep_assert_held(&dev_priv->pps_mutex);
1764
1765 WARN_ON(intel_dp->want_panel_vdd);
1766
1767 if (!edp_have_panel_vdd(intel_dp))
1768 return;
1769
1770 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1771 port_name(intel_dig_port->port));
1772
1773 pp = ironlake_get_pp_control(intel_dp);
1774 pp &= ~EDP_FORCE_VDD;
1775
1776 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1777 pp_stat_reg = _pp_stat_reg(intel_dp);
1778
1779 I915_WRITE(pp_ctrl_reg, pp);
1780 POSTING_READ(pp_ctrl_reg);
1781
1782 /* Make sure sequencer is idle before allowing subsequent activity */
1783 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1784 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1785
1786 if ((pp & POWER_TARGET_ON) == 0)
1787 intel_dp->last_power_cycle = jiffies;
1788
1789 power_domain = intel_display_port_power_domain(intel_encoder);
1790 intel_display_power_put(dev_priv, power_domain);
1791 }
1792
1793 static void edp_panel_vdd_work(struct work_struct *__work)
1794 {
1795 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1796 struct intel_dp, panel_vdd_work);
1797
1798 pps_lock(intel_dp);
1799 if (!intel_dp->want_panel_vdd)
1800 edp_panel_vdd_off_sync(intel_dp);
1801 pps_unlock(intel_dp);
1802 }
1803
1804 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1805 {
1806 unsigned long delay;
1807
1808 /*
1809 * Queue the timer to fire a long time from now (relative to the power
1810 * down delay) to keep the panel power up across a sequence of
1811 * operations.
1812 */
1813 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1814 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1815 }
1816
1817 /*
1818 * Must be paired with edp_panel_vdd_on().
1819 * Must hold pps_mutex around the whole on/off sequence.
1820 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1821 */
1822 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1823 {
1824 struct drm_i915_private *dev_priv =
1825 intel_dp_to_dev(intel_dp)->dev_private;
1826
1827 lockdep_assert_held(&dev_priv->pps_mutex);
1828
1829 if (!is_edp(intel_dp))
1830 return;
1831
1832 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1833 port_name(dp_to_dig_port(intel_dp)->port));
1834
1835 intel_dp->want_panel_vdd = false;
1836
1837 if (sync)
1838 edp_panel_vdd_off_sync(intel_dp);
1839 else
1840 edp_panel_vdd_schedule_off(intel_dp);
1841 }
1842
1843 static void edp_panel_on(struct intel_dp *intel_dp)
1844 {
1845 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1846 struct drm_i915_private *dev_priv = dev->dev_private;
1847 u32 pp;
1848 u32 pp_ctrl_reg;
1849
1850 lockdep_assert_held(&dev_priv->pps_mutex);
1851
1852 if (!is_edp(intel_dp))
1853 return;
1854
1855 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1856 port_name(dp_to_dig_port(intel_dp)->port));
1857
1858 if (WARN(edp_have_panel_power(intel_dp),
1859 "eDP port %c panel power already on\n",
1860 port_name(dp_to_dig_port(intel_dp)->port)))
1861 return;
1862
1863 wait_panel_power_cycle(intel_dp);
1864
1865 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1866 pp = ironlake_get_pp_control(intel_dp);
1867 if (IS_GEN5(dev)) {
1868 /* ILK workaround: disable reset around power sequence */
1869 pp &= ~PANEL_POWER_RESET;
1870 I915_WRITE(pp_ctrl_reg, pp);
1871 POSTING_READ(pp_ctrl_reg);
1872 }
1873
1874 pp |= POWER_TARGET_ON;
1875 if (!IS_GEN5(dev))
1876 pp |= PANEL_POWER_RESET;
1877
1878 I915_WRITE(pp_ctrl_reg, pp);
1879 POSTING_READ(pp_ctrl_reg);
1880
1881 wait_panel_on(intel_dp);
1882 intel_dp->last_power_on = jiffies;
1883
1884 if (IS_GEN5(dev)) {
1885 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1886 I915_WRITE(pp_ctrl_reg, pp);
1887 POSTING_READ(pp_ctrl_reg);
1888 }
1889 }
1890
1891 void intel_edp_panel_on(struct intel_dp *intel_dp)
1892 {
1893 if (!is_edp(intel_dp))
1894 return;
1895
1896 pps_lock(intel_dp);
1897 edp_panel_on(intel_dp);
1898 pps_unlock(intel_dp);
1899 }
1900
1901
1902 static void edp_panel_off(struct intel_dp *intel_dp)
1903 {
1904 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1905 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1906 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1907 struct drm_i915_private *dev_priv = dev->dev_private;
1908 enum intel_display_power_domain power_domain;
1909 u32 pp;
1910 u32 pp_ctrl_reg;
1911
1912 lockdep_assert_held(&dev_priv->pps_mutex);
1913
1914 if (!is_edp(intel_dp))
1915 return;
1916
1917 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1918 port_name(dp_to_dig_port(intel_dp)->port));
1919
1920 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1921 port_name(dp_to_dig_port(intel_dp)->port));
1922
1923 pp = ironlake_get_pp_control(intel_dp);
1924 /* We need to switch off panel power _and_ force vdd, for otherwise some
1925 * panels get very unhappy and cease to work. */
1926 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1927 EDP_BLC_ENABLE);
1928
1929 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1930
1931 intel_dp->want_panel_vdd = false;
1932
1933 I915_WRITE(pp_ctrl_reg, pp);
1934 POSTING_READ(pp_ctrl_reg);
1935
1936 intel_dp->last_power_cycle = jiffies;
1937 wait_panel_off(intel_dp);
1938
1939 /* We got a reference when we enabled the VDD. */
1940 power_domain = intel_display_port_power_domain(intel_encoder);
1941 intel_display_power_put(dev_priv, power_domain);
1942 }
1943
1944 void intel_edp_panel_off(struct intel_dp *intel_dp)
1945 {
1946 if (!is_edp(intel_dp))
1947 return;
1948
1949 pps_lock(intel_dp);
1950 edp_panel_off(intel_dp);
1951 pps_unlock(intel_dp);
1952 }
1953
1954 /* Enable backlight in the panel power control. */
1955 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1956 {
1957 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1958 struct drm_device *dev = intel_dig_port->base.base.dev;
1959 struct drm_i915_private *dev_priv = dev->dev_private;
1960 u32 pp;
1961 u32 pp_ctrl_reg;
1962
1963 /*
1964 * If we enable the backlight right away following a panel power
1965 * on, we may see slight flicker as the panel syncs with the eDP
1966 * link. So delay a bit to make sure the image is solid before
1967 * allowing it to appear.
1968 */
1969 wait_backlight_on(intel_dp);
1970
1971 pps_lock(intel_dp);
1972
1973 pp = ironlake_get_pp_control(intel_dp);
1974 pp |= EDP_BLC_ENABLE;
1975
1976 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1977
1978 I915_WRITE(pp_ctrl_reg, pp);
1979 POSTING_READ(pp_ctrl_reg);
1980
1981 pps_unlock(intel_dp);
1982 }
1983
1984 /* Enable backlight PWM and backlight PP control. */
1985 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1986 {
1987 if (!is_edp(intel_dp))
1988 return;
1989
1990 DRM_DEBUG_KMS("\n");
1991
1992 intel_panel_enable_backlight(intel_dp->attached_connector);
1993 _intel_edp_backlight_on(intel_dp);
1994 }
1995
1996 /* Disable backlight in the panel power control. */
1997 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1998 {
1999 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2000 struct drm_i915_private *dev_priv = dev->dev_private;
2001 u32 pp;
2002 u32 pp_ctrl_reg;
2003
2004 if (!is_edp(intel_dp))
2005 return;
2006
2007 pps_lock(intel_dp);
2008
2009 pp = ironlake_get_pp_control(intel_dp);
2010 pp &= ~EDP_BLC_ENABLE;
2011
2012 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2013
2014 I915_WRITE(pp_ctrl_reg, pp);
2015 POSTING_READ(pp_ctrl_reg);
2016
2017 pps_unlock(intel_dp);
2018
2019 intel_dp->last_backlight_off = jiffies;
2020 edp_wait_backlight_off(intel_dp);
2021 }
2022
2023 /* Disable backlight PP control and backlight PWM. */
2024 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2025 {
2026 if (!is_edp(intel_dp))
2027 return;
2028
2029 DRM_DEBUG_KMS("\n");
2030
2031 _intel_edp_backlight_off(intel_dp);
2032 intel_panel_disable_backlight(intel_dp->attached_connector);
2033 }
2034
2035 /*
2036 * Hook for controlling the panel power control backlight through the bl_power
2037 * sysfs attribute. Take care to handle multiple calls.
2038 */
2039 static void intel_edp_backlight_power(struct intel_connector *connector,
2040 bool enable)
2041 {
2042 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2043 bool is_enabled;
2044
2045 pps_lock(intel_dp);
2046 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2047 pps_unlock(intel_dp);
2048
2049 if (is_enabled == enable)
2050 return;
2051
2052 DRM_DEBUG_KMS("panel power control backlight %s\n",
2053 enable ? "enable" : "disable");
2054
2055 if (enable)
2056 _intel_edp_backlight_on(intel_dp);
2057 else
2058 _intel_edp_backlight_off(intel_dp);
2059 }
2060
2061 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2062 {
2063 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2064 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2065 struct drm_device *dev = crtc->dev;
2066 struct drm_i915_private *dev_priv = dev->dev_private;
2067 u32 dpa_ctl;
2068
2069 assert_pipe_disabled(dev_priv,
2070 to_intel_crtc(crtc)->pipe);
2071
2072 DRM_DEBUG_KMS("\n");
2073 dpa_ctl = I915_READ(DP_A);
2074 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2075 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2076
2077 /* We don't adjust intel_dp->DP while tearing down the link, to
2078 * facilitate link retraining (e.g. after hotplug). Hence clear all
2079 * enable bits here to ensure that we don't enable too much. */
2080 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2081 intel_dp->DP |= DP_PLL_ENABLE;
2082 I915_WRITE(DP_A, intel_dp->DP);
2083 POSTING_READ(DP_A);
2084 udelay(200);
2085 }
2086
2087 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2088 {
2089 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2090 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2091 struct drm_device *dev = crtc->dev;
2092 struct drm_i915_private *dev_priv = dev->dev_private;
2093 u32 dpa_ctl;
2094
2095 assert_pipe_disabled(dev_priv,
2096 to_intel_crtc(crtc)->pipe);
2097
2098 dpa_ctl = I915_READ(DP_A);
2099 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2100 "dp pll off, should be on\n");
2101 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2102
2103 /* We can't rely on the value tracked for the DP register in
2104 * intel_dp->DP because link_down must not change that (otherwise link
2105 * re-training will fail. */
2106 dpa_ctl &= ~DP_PLL_ENABLE;
2107 I915_WRITE(DP_A, dpa_ctl);
2108 POSTING_READ(DP_A);
2109 udelay(200);
2110 }
2111
2112 /* If the sink supports it, try to set the power state appropriately */
2113 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2114 {
2115 int ret, i;
2116
2117 /* Should have a valid DPCD by this point */
2118 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2119 return;
2120
2121 if (mode != DRM_MODE_DPMS_ON) {
2122 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2123 DP_SET_POWER_D3);
2124 } else {
2125 /*
2126 * When turning on, we need to retry for 1ms to give the sink
2127 * time to wake up.
2128 */
2129 for (i = 0; i < 3; i++) {
2130 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2131 DP_SET_POWER_D0);
2132 if (ret == 1)
2133 break;
2134 msleep(1);
2135 }
2136 }
2137
2138 if (ret != 1)
2139 DRM_DEBUG_KMS("failed to %s sink power state\n",
2140 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2141 }
2142
2143 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2144 enum pipe *pipe)
2145 {
2146 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2147 enum port port = dp_to_dig_port(intel_dp)->port;
2148 struct drm_device *dev = encoder->base.dev;
2149 struct drm_i915_private *dev_priv = dev->dev_private;
2150 enum intel_display_power_domain power_domain;
2151 u32 tmp;
2152
2153 power_domain = intel_display_port_power_domain(encoder);
2154 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2155 return false;
2156
2157 tmp = I915_READ(intel_dp->output_reg);
2158
2159 if (!(tmp & DP_PORT_EN))
2160 return false;
2161
2162 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2163 *pipe = PORT_TO_PIPE_CPT(tmp);
2164 } else if (IS_CHERRYVIEW(dev)) {
2165 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2166 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2167 *pipe = PORT_TO_PIPE(tmp);
2168 } else {
2169 u32 trans_sel;
2170 u32 trans_dp;
2171 int i;
2172
2173 switch (intel_dp->output_reg) {
2174 case PCH_DP_B:
2175 trans_sel = TRANS_DP_PORT_SEL_B;
2176 break;
2177 case PCH_DP_C:
2178 trans_sel = TRANS_DP_PORT_SEL_C;
2179 break;
2180 case PCH_DP_D:
2181 trans_sel = TRANS_DP_PORT_SEL_D;
2182 break;
2183 default:
2184 return true;
2185 }
2186
2187 for_each_pipe(dev_priv, i) {
2188 trans_dp = I915_READ(TRANS_DP_CTL(i));
2189 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2190 *pipe = i;
2191 return true;
2192 }
2193 }
2194
2195 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2196 intel_dp->output_reg);
2197 }
2198
2199 return true;
2200 }
2201
2202 static void intel_dp_get_config(struct intel_encoder *encoder,
2203 struct intel_crtc_state *pipe_config)
2204 {
2205 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2206 u32 tmp, flags = 0;
2207 struct drm_device *dev = encoder->base.dev;
2208 struct drm_i915_private *dev_priv = dev->dev_private;
2209 enum port port = dp_to_dig_port(intel_dp)->port;
2210 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2211 int dotclock;
2212
2213 tmp = I915_READ(intel_dp->output_reg);
2214 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2215 pipe_config->has_audio = true;
2216
2217 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2218 if (tmp & DP_SYNC_HS_HIGH)
2219 flags |= DRM_MODE_FLAG_PHSYNC;
2220 else
2221 flags |= DRM_MODE_FLAG_NHSYNC;
2222
2223 if (tmp & DP_SYNC_VS_HIGH)
2224 flags |= DRM_MODE_FLAG_PVSYNC;
2225 else
2226 flags |= DRM_MODE_FLAG_NVSYNC;
2227 } else {
2228 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2229 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2230 flags |= DRM_MODE_FLAG_PHSYNC;
2231 else
2232 flags |= DRM_MODE_FLAG_NHSYNC;
2233
2234 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2235 flags |= DRM_MODE_FLAG_PVSYNC;
2236 else
2237 flags |= DRM_MODE_FLAG_NVSYNC;
2238 }
2239
2240 pipe_config->base.adjusted_mode.flags |= flags;
2241
2242 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2243 tmp & DP_COLOR_RANGE_16_235)
2244 pipe_config->limited_color_range = true;
2245
2246 pipe_config->has_dp_encoder = true;
2247
2248 intel_dp_get_m_n(crtc, pipe_config);
2249
2250 if (port == PORT_A) {
2251 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2252 pipe_config->port_clock = 162000;
2253 else
2254 pipe_config->port_clock = 270000;
2255 }
2256
2257 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2258 &pipe_config->dp_m_n);
2259
2260 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2261 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2262
2263 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2264
2265 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2266 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2267 /*
2268 * This is a big fat ugly hack.
2269 *
2270 * Some machines in UEFI boot mode provide us a VBT that has 18
2271 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2272 * unknown we fail to light up. Yet the same BIOS boots up with
2273 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2274 * max, not what it tells us to use.
2275 *
2276 * Note: This will still be broken if the eDP panel is not lit
2277 * up by the BIOS, and thus we can't get the mode at module
2278 * load.
2279 */
2280 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2281 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2282 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2283 }
2284 }
2285
2286 static void intel_disable_dp(struct intel_encoder *encoder)
2287 {
2288 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2289 struct drm_device *dev = encoder->base.dev;
2290 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2291
2292 if (crtc->config->has_audio)
2293 intel_audio_codec_disable(encoder);
2294
2295 if (HAS_PSR(dev) && !HAS_DDI(dev))
2296 intel_psr_disable(intel_dp);
2297
2298 /* Make sure the panel is off before trying to change the mode. But also
2299 * ensure that we have vdd while we switch off the panel. */
2300 intel_edp_panel_vdd_on(intel_dp);
2301 intel_edp_backlight_off(intel_dp);
2302 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2303 intel_edp_panel_off(intel_dp);
2304
2305 /* disable the port before the pipe on g4x */
2306 if (INTEL_INFO(dev)->gen < 5)
2307 intel_dp_link_down(intel_dp);
2308 }
2309
2310 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2311 {
2312 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2313 enum port port = dp_to_dig_port(intel_dp)->port;
2314
2315 intel_dp_link_down(intel_dp);
2316 if (port == PORT_A)
2317 ironlake_edp_pll_off(intel_dp);
2318 }
2319
2320 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2321 {
2322 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2323
2324 intel_dp_link_down(intel_dp);
2325 }
2326
2327 static void chv_post_disable_dp(struct intel_encoder *encoder)
2328 {
2329 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2330 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2331 struct drm_device *dev = encoder->base.dev;
2332 struct drm_i915_private *dev_priv = dev->dev_private;
2333 struct intel_crtc *intel_crtc =
2334 to_intel_crtc(encoder->base.crtc);
2335 enum dpio_channel ch = vlv_dport_to_channel(dport);
2336 enum pipe pipe = intel_crtc->pipe;
2337 u32 val;
2338
2339 intel_dp_link_down(intel_dp);
2340
2341 mutex_lock(&dev_priv->dpio_lock);
2342
2343 /* Propagate soft reset to data lane reset */
2344 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2345 val |= CHV_PCS_REQ_SOFTRESET_EN;
2346 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2347
2348 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2349 val |= CHV_PCS_REQ_SOFTRESET_EN;
2350 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2351
2352 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2353 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2354 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2355
2356 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2357 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2358 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2359
2360 mutex_unlock(&dev_priv->dpio_lock);
2361 }
2362
2363 static void
2364 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2365 uint32_t *DP,
2366 uint8_t dp_train_pat)
2367 {
2368 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2369 struct drm_device *dev = intel_dig_port->base.base.dev;
2370 struct drm_i915_private *dev_priv = dev->dev_private;
2371 enum port port = intel_dig_port->port;
2372
2373 if (HAS_DDI(dev)) {
2374 uint32_t temp = I915_READ(DP_TP_CTL(port));
2375
2376 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2377 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2378 else
2379 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2380
2381 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2382 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2383 case DP_TRAINING_PATTERN_DISABLE:
2384 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2385
2386 break;
2387 case DP_TRAINING_PATTERN_1:
2388 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2389 break;
2390 case DP_TRAINING_PATTERN_2:
2391 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2392 break;
2393 case DP_TRAINING_PATTERN_3:
2394 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2395 break;
2396 }
2397 I915_WRITE(DP_TP_CTL(port), temp);
2398
2399 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2400 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2401
2402 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2403 case DP_TRAINING_PATTERN_DISABLE:
2404 *DP |= DP_LINK_TRAIN_OFF_CPT;
2405 break;
2406 case DP_TRAINING_PATTERN_1:
2407 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2408 break;
2409 case DP_TRAINING_PATTERN_2:
2410 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2411 break;
2412 case DP_TRAINING_PATTERN_3:
2413 DRM_ERROR("DP training pattern 3 not supported\n");
2414 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2415 break;
2416 }
2417
2418 } else {
2419 if (IS_CHERRYVIEW(dev))
2420 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2421 else
2422 *DP &= ~DP_LINK_TRAIN_MASK;
2423
2424 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2425 case DP_TRAINING_PATTERN_DISABLE:
2426 *DP |= DP_LINK_TRAIN_OFF;
2427 break;
2428 case DP_TRAINING_PATTERN_1:
2429 *DP |= DP_LINK_TRAIN_PAT_1;
2430 break;
2431 case DP_TRAINING_PATTERN_2:
2432 *DP |= DP_LINK_TRAIN_PAT_2;
2433 break;
2434 case DP_TRAINING_PATTERN_3:
2435 if (IS_CHERRYVIEW(dev)) {
2436 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2437 } else {
2438 DRM_ERROR("DP training pattern 3 not supported\n");
2439 *DP |= DP_LINK_TRAIN_PAT_2;
2440 }
2441 break;
2442 }
2443 }
2444 }
2445
2446 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2447 {
2448 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2449 struct drm_i915_private *dev_priv = dev->dev_private;
2450
2451 /* enable with pattern 1 (as per spec) */
2452 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2453 DP_TRAINING_PATTERN_1);
2454
2455 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2456 POSTING_READ(intel_dp->output_reg);
2457
2458 /*
2459 * Magic for VLV/CHV. We _must_ first set up the register
2460 * without actually enabling the port, and then do another
2461 * write to enable the port. Otherwise link training will
2462 * fail when the power sequencer is freshly used for this port.
2463 */
2464 intel_dp->DP |= DP_PORT_EN;
2465
2466 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2467 POSTING_READ(intel_dp->output_reg);
2468 }
2469
2470 static void intel_enable_dp(struct intel_encoder *encoder)
2471 {
2472 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2473 struct drm_device *dev = encoder->base.dev;
2474 struct drm_i915_private *dev_priv = dev->dev_private;
2475 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2476 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2477
2478 if (WARN_ON(dp_reg & DP_PORT_EN))
2479 return;
2480
2481 pps_lock(intel_dp);
2482
2483 if (IS_VALLEYVIEW(dev))
2484 vlv_init_panel_power_sequencer(intel_dp);
2485
2486 intel_dp_enable_port(intel_dp);
2487
2488 edp_panel_vdd_on(intel_dp);
2489 edp_panel_on(intel_dp);
2490 edp_panel_vdd_off(intel_dp, true);
2491
2492 pps_unlock(intel_dp);
2493
2494 if (IS_VALLEYVIEW(dev))
2495 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2496
2497 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2498 intel_dp_start_link_train(intel_dp);
2499 intel_dp_complete_link_train(intel_dp);
2500 intel_dp_stop_link_train(intel_dp);
2501
2502 if (crtc->config->has_audio) {
2503 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2504 pipe_name(crtc->pipe));
2505 intel_audio_codec_enable(encoder);
2506 }
2507 }
2508
2509 static void g4x_enable_dp(struct intel_encoder *encoder)
2510 {
2511 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2512
2513 intel_enable_dp(encoder);
2514 intel_edp_backlight_on(intel_dp);
2515 }
2516
2517 static void vlv_enable_dp(struct intel_encoder *encoder)
2518 {
2519 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2520
2521 intel_edp_backlight_on(intel_dp);
2522 intel_psr_enable(intel_dp);
2523 }
2524
2525 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2526 {
2527 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2528 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2529
2530 intel_dp_prepare(encoder);
2531
2532 /* Only ilk+ has port A */
2533 if (dport->port == PORT_A) {
2534 ironlake_set_pll_cpu_edp(intel_dp);
2535 ironlake_edp_pll_on(intel_dp);
2536 }
2537 }
2538
2539 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2540 {
2541 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2542 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2543 enum pipe pipe = intel_dp->pps_pipe;
2544 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2545
2546 edp_panel_vdd_off_sync(intel_dp);
2547
2548 /*
2549 * VLV seems to get confused when multiple power seqeuencers
2550 * have the same port selected (even if only one has power/vdd
2551 * enabled). The failure manifests as vlv_wait_port_ready() failing
2552 * CHV on the other hand doesn't seem to mind having the same port
2553 * selected in multiple power seqeuencers, but let's clear the
2554 * port select always when logically disconnecting a power sequencer
2555 * from a port.
2556 */
2557 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2558 pipe_name(pipe), port_name(intel_dig_port->port));
2559 I915_WRITE(pp_on_reg, 0);
2560 POSTING_READ(pp_on_reg);
2561
2562 intel_dp->pps_pipe = INVALID_PIPE;
2563 }
2564
2565 static void vlv_steal_power_sequencer(struct drm_device *dev,
2566 enum pipe pipe)
2567 {
2568 struct drm_i915_private *dev_priv = dev->dev_private;
2569 struct intel_encoder *encoder;
2570
2571 lockdep_assert_held(&dev_priv->pps_mutex);
2572
2573 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2574 return;
2575
2576 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2577 base.head) {
2578 struct intel_dp *intel_dp;
2579 enum port port;
2580
2581 if (encoder->type != INTEL_OUTPUT_EDP)
2582 continue;
2583
2584 intel_dp = enc_to_intel_dp(&encoder->base);
2585 port = dp_to_dig_port(intel_dp)->port;
2586
2587 if (intel_dp->pps_pipe != pipe)
2588 continue;
2589
2590 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2591 pipe_name(pipe), port_name(port));
2592
2593 WARN(encoder->connectors_active,
2594 "stealing pipe %c power sequencer from active eDP port %c\n",
2595 pipe_name(pipe), port_name(port));
2596
2597 /* make sure vdd is off before we steal it */
2598 vlv_detach_power_sequencer(intel_dp);
2599 }
2600 }
2601
2602 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2603 {
2604 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2605 struct intel_encoder *encoder = &intel_dig_port->base;
2606 struct drm_device *dev = encoder->base.dev;
2607 struct drm_i915_private *dev_priv = dev->dev_private;
2608 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2609
2610 lockdep_assert_held(&dev_priv->pps_mutex);
2611
2612 if (!is_edp(intel_dp))
2613 return;
2614
2615 if (intel_dp->pps_pipe == crtc->pipe)
2616 return;
2617
2618 /*
2619 * If another power sequencer was being used on this
2620 * port previously make sure to turn off vdd there while
2621 * we still have control of it.
2622 */
2623 if (intel_dp->pps_pipe != INVALID_PIPE)
2624 vlv_detach_power_sequencer(intel_dp);
2625
2626 /*
2627 * We may be stealing the power
2628 * sequencer from another port.
2629 */
2630 vlv_steal_power_sequencer(dev, crtc->pipe);
2631
2632 /* now it's all ours */
2633 intel_dp->pps_pipe = crtc->pipe;
2634
2635 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2636 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2637
2638 /* init power sequencer on this pipe and port */
2639 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2640 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2641 }
2642
2643 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2644 {
2645 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2646 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2647 struct drm_device *dev = encoder->base.dev;
2648 struct drm_i915_private *dev_priv = dev->dev_private;
2649 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2650 enum dpio_channel port = vlv_dport_to_channel(dport);
2651 int pipe = intel_crtc->pipe;
2652 u32 val;
2653
2654 mutex_lock(&dev_priv->dpio_lock);
2655
2656 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2657 val = 0;
2658 if (pipe)
2659 val |= (1<<21);
2660 else
2661 val &= ~(1<<21);
2662 val |= 0x001000c4;
2663 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2664 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2665 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2666
2667 mutex_unlock(&dev_priv->dpio_lock);
2668
2669 intel_enable_dp(encoder);
2670 }
2671
2672 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2673 {
2674 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2675 struct drm_device *dev = encoder->base.dev;
2676 struct drm_i915_private *dev_priv = dev->dev_private;
2677 struct intel_crtc *intel_crtc =
2678 to_intel_crtc(encoder->base.crtc);
2679 enum dpio_channel port = vlv_dport_to_channel(dport);
2680 int pipe = intel_crtc->pipe;
2681
2682 intel_dp_prepare(encoder);
2683
2684 /* Program Tx lane resets to default */
2685 mutex_lock(&dev_priv->dpio_lock);
2686 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2687 DPIO_PCS_TX_LANE2_RESET |
2688 DPIO_PCS_TX_LANE1_RESET);
2689 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2690 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2691 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2692 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2693 DPIO_PCS_CLK_SOFT_RESET);
2694
2695 /* Fix up inter-pair skew failure */
2696 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2697 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2698 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2699 mutex_unlock(&dev_priv->dpio_lock);
2700 }
2701
2702 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2703 {
2704 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2705 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2706 struct drm_device *dev = encoder->base.dev;
2707 struct drm_i915_private *dev_priv = dev->dev_private;
2708 struct intel_crtc *intel_crtc =
2709 to_intel_crtc(encoder->base.crtc);
2710 enum dpio_channel ch = vlv_dport_to_channel(dport);
2711 int pipe = intel_crtc->pipe;
2712 int data, i;
2713 u32 val;
2714
2715 mutex_lock(&dev_priv->dpio_lock);
2716
2717 /* allow hardware to manage TX FIFO reset source */
2718 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2719 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2720 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2721
2722 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2723 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2724 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2725
2726 /* Deassert soft data lane reset*/
2727 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2728 val |= CHV_PCS_REQ_SOFTRESET_EN;
2729 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2730
2731 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2732 val |= CHV_PCS_REQ_SOFTRESET_EN;
2733 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2734
2735 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2736 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2737 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2738
2739 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2740 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2741 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2742
2743 /* Program Tx lane latency optimal setting*/
2744 for (i = 0; i < 4; i++) {
2745 /* Set the latency optimal bit */
2746 data = (i == 1) ? 0x0 : 0x6;
2747 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2748 data << DPIO_FRC_LATENCY_SHFIT);
2749
2750 /* Set the upar bit */
2751 data = (i == 1) ? 0x0 : 0x1;
2752 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2753 data << DPIO_UPAR_SHIFT);
2754 }
2755
2756 /* Data lane stagger programming */
2757 /* FIXME: Fix up value only after power analysis */
2758
2759 mutex_unlock(&dev_priv->dpio_lock);
2760
2761 intel_enable_dp(encoder);
2762 }
2763
2764 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2765 {
2766 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2767 struct drm_device *dev = encoder->base.dev;
2768 struct drm_i915_private *dev_priv = dev->dev_private;
2769 struct intel_crtc *intel_crtc =
2770 to_intel_crtc(encoder->base.crtc);
2771 enum dpio_channel ch = vlv_dport_to_channel(dport);
2772 enum pipe pipe = intel_crtc->pipe;
2773 u32 val;
2774
2775 intel_dp_prepare(encoder);
2776
2777 mutex_lock(&dev_priv->dpio_lock);
2778
2779 /* program left/right clock distribution */
2780 if (pipe != PIPE_B) {
2781 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2782 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2783 if (ch == DPIO_CH0)
2784 val |= CHV_BUFLEFTENA1_FORCE;
2785 if (ch == DPIO_CH1)
2786 val |= CHV_BUFRIGHTENA1_FORCE;
2787 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2788 } else {
2789 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2790 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2791 if (ch == DPIO_CH0)
2792 val |= CHV_BUFLEFTENA2_FORCE;
2793 if (ch == DPIO_CH1)
2794 val |= CHV_BUFRIGHTENA2_FORCE;
2795 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2796 }
2797
2798 /* program clock channel usage */
2799 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2800 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2801 if (pipe != PIPE_B)
2802 val &= ~CHV_PCS_USEDCLKCHANNEL;
2803 else
2804 val |= CHV_PCS_USEDCLKCHANNEL;
2805 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2806
2807 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2808 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2809 if (pipe != PIPE_B)
2810 val &= ~CHV_PCS_USEDCLKCHANNEL;
2811 else
2812 val |= CHV_PCS_USEDCLKCHANNEL;
2813 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2814
2815 /*
2816 * This a a bit weird since generally CL
2817 * matches the pipe, but here we need to
2818 * pick the CL based on the port.
2819 */
2820 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2821 if (pipe != PIPE_B)
2822 val &= ~CHV_CMN_USEDCLKCHANNEL;
2823 else
2824 val |= CHV_CMN_USEDCLKCHANNEL;
2825 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2826
2827 mutex_unlock(&dev_priv->dpio_lock);
2828 }
2829
2830 /*
2831 * Native read with retry for link status and receiver capability reads for
2832 * cases where the sink may still be asleep.
2833 *
2834 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2835 * supposed to retry 3 times per the spec.
2836 */
2837 static ssize_t
2838 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2839 void *buffer, size_t size)
2840 {
2841 ssize_t ret;
2842 int i;
2843
2844 /*
2845 * Sometime we just get the same incorrect byte repeated
2846 * over the entire buffer. Doing just one throw away read
2847 * initially seems to "solve" it.
2848 */
2849 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2850
2851 for (i = 0; i < 3; i++) {
2852 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2853 if (ret == size)
2854 return ret;
2855 msleep(1);
2856 }
2857
2858 return ret;
2859 }
2860
2861 /*
2862 * Fetch AUX CH registers 0x202 - 0x207 which contain
2863 * link status information
2864 */
2865 static bool
2866 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2867 {
2868 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2869 DP_LANE0_1_STATUS,
2870 link_status,
2871 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2872 }
2873
2874 /* These are source-specific values. */
2875 static uint8_t
2876 intel_dp_voltage_max(struct intel_dp *intel_dp)
2877 {
2878 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2879 struct drm_i915_private *dev_priv = dev->dev_private;
2880 enum port port = dp_to_dig_port(intel_dp)->port;
2881
2882 if (INTEL_INFO(dev)->gen >= 9) {
2883 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2884 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2885 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2886 } else if (IS_VALLEYVIEW(dev))
2887 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2888 else if (IS_GEN7(dev) && port == PORT_A)
2889 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2890 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2891 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2892 else
2893 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2894 }
2895
2896 static uint8_t
2897 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2898 {
2899 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2900 enum port port = dp_to_dig_port(intel_dp)->port;
2901
2902 if (INTEL_INFO(dev)->gen >= 9) {
2903 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2904 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2905 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2906 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2907 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2908 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2909 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2910 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2911 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2912 default:
2913 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2914 }
2915 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2916 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2917 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2918 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2919 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2920 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2921 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2922 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2923 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2924 default:
2925 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2926 }
2927 } else if (IS_VALLEYVIEW(dev)) {
2928 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2929 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2930 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2931 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2932 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2933 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2934 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2935 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2936 default:
2937 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2938 }
2939 } else if (IS_GEN7(dev) && port == PORT_A) {
2940 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2941 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2942 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2943 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2944 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2945 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2946 default:
2947 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2948 }
2949 } else {
2950 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2951 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2952 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2953 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2954 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2955 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2956 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2957 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2958 default:
2959 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2960 }
2961 }
2962 }
2963
2964 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2965 {
2966 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2967 struct drm_i915_private *dev_priv = dev->dev_private;
2968 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2969 struct intel_crtc *intel_crtc =
2970 to_intel_crtc(dport->base.base.crtc);
2971 unsigned long demph_reg_value, preemph_reg_value,
2972 uniqtranscale_reg_value;
2973 uint8_t train_set = intel_dp->train_set[0];
2974 enum dpio_channel port = vlv_dport_to_channel(dport);
2975 int pipe = intel_crtc->pipe;
2976
2977 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2978 case DP_TRAIN_PRE_EMPH_LEVEL_0:
2979 preemph_reg_value = 0x0004000;
2980 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2981 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2982 demph_reg_value = 0x2B405555;
2983 uniqtranscale_reg_value = 0x552AB83A;
2984 break;
2985 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2986 demph_reg_value = 0x2B404040;
2987 uniqtranscale_reg_value = 0x5548B83A;
2988 break;
2989 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2990 demph_reg_value = 0x2B245555;
2991 uniqtranscale_reg_value = 0x5560B83A;
2992 break;
2993 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2994 demph_reg_value = 0x2B405555;
2995 uniqtranscale_reg_value = 0x5598DA3A;
2996 break;
2997 default:
2998 return 0;
2999 }
3000 break;
3001 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3002 preemph_reg_value = 0x0002000;
3003 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3004 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3005 demph_reg_value = 0x2B404040;
3006 uniqtranscale_reg_value = 0x5552B83A;
3007 break;
3008 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3009 demph_reg_value = 0x2B404848;
3010 uniqtranscale_reg_value = 0x5580B83A;
3011 break;
3012 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3013 demph_reg_value = 0x2B404040;
3014 uniqtranscale_reg_value = 0x55ADDA3A;
3015 break;
3016 default:
3017 return 0;
3018 }
3019 break;
3020 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3021 preemph_reg_value = 0x0000000;
3022 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3023 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3024 demph_reg_value = 0x2B305555;
3025 uniqtranscale_reg_value = 0x5570B83A;
3026 break;
3027 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3028 demph_reg_value = 0x2B2B4040;
3029 uniqtranscale_reg_value = 0x55ADDA3A;
3030 break;
3031 default:
3032 return 0;
3033 }
3034 break;
3035 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3036 preemph_reg_value = 0x0006000;
3037 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3038 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3039 demph_reg_value = 0x1B405555;
3040 uniqtranscale_reg_value = 0x55ADDA3A;
3041 break;
3042 default:
3043 return 0;
3044 }
3045 break;
3046 default:
3047 return 0;
3048 }
3049
3050 mutex_lock(&dev_priv->dpio_lock);
3051 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3052 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3053 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3054 uniqtranscale_reg_value);
3055 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3056 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3057 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3058 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3059 mutex_unlock(&dev_priv->dpio_lock);
3060
3061 return 0;
3062 }
3063
3064 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3065 {
3066 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3067 struct drm_i915_private *dev_priv = dev->dev_private;
3068 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3069 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3070 u32 deemph_reg_value, margin_reg_value, val;
3071 uint8_t train_set = intel_dp->train_set[0];
3072 enum dpio_channel ch = vlv_dport_to_channel(dport);
3073 enum pipe pipe = intel_crtc->pipe;
3074 int i;
3075
3076 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3077 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3078 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3079 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3080 deemph_reg_value = 128;
3081 margin_reg_value = 52;
3082 break;
3083 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3084 deemph_reg_value = 128;
3085 margin_reg_value = 77;
3086 break;
3087 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3088 deemph_reg_value = 128;
3089 margin_reg_value = 102;
3090 break;
3091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3092 deemph_reg_value = 128;
3093 margin_reg_value = 154;
3094 /* FIXME extra to set for 1200 */
3095 break;
3096 default:
3097 return 0;
3098 }
3099 break;
3100 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3101 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3102 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3103 deemph_reg_value = 85;
3104 margin_reg_value = 78;
3105 break;
3106 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3107 deemph_reg_value = 85;
3108 margin_reg_value = 116;
3109 break;
3110 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3111 deemph_reg_value = 85;
3112 margin_reg_value = 154;
3113 break;
3114 default:
3115 return 0;
3116 }
3117 break;
3118 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3119 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3120 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3121 deemph_reg_value = 64;
3122 margin_reg_value = 104;
3123 break;
3124 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3125 deemph_reg_value = 64;
3126 margin_reg_value = 154;
3127 break;
3128 default:
3129 return 0;
3130 }
3131 break;
3132 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3133 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3134 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3135 deemph_reg_value = 43;
3136 margin_reg_value = 154;
3137 break;
3138 default:
3139 return 0;
3140 }
3141 break;
3142 default:
3143 return 0;
3144 }
3145
3146 mutex_lock(&dev_priv->dpio_lock);
3147
3148 /* Clear calc init */
3149 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3150 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3151 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3152 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3153 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3154
3155 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3156 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3157 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3158 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3159 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3160
3161 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3162 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3163 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3164 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3165
3166 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3167 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3168 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3169 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3170
3171 /* Program swing deemph */
3172 for (i = 0; i < 4; i++) {
3173 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3174 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3175 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3176 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3177 }
3178
3179 /* Program swing margin */
3180 for (i = 0; i < 4; i++) {
3181 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3182 val &= ~DPIO_SWING_MARGIN000_MASK;
3183 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3184 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3185 }
3186
3187 /* Disable unique transition scale */
3188 for (i = 0; i < 4; i++) {
3189 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3190 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3191 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3192 }
3193
3194 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3195 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3196 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3197 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3198
3199 /*
3200 * The document said it needs to set bit 27 for ch0 and bit 26
3201 * for ch1. Might be a typo in the doc.
3202 * For now, for this unique transition scale selection, set bit
3203 * 27 for ch0 and ch1.
3204 */
3205 for (i = 0; i < 4; i++) {
3206 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3207 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3208 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3209 }
3210
3211 for (i = 0; i < 4; i++) {
3212 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3213 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3214 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3215 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3216 }
3217 }
3218
3219 /* Start swing calculation */
3220 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3221 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3222 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3223
3224 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3225 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3226 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3227
3228 /* LRC Bypass */
3229 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3230 val |= DPIO_LRC_BYPASS;
3231 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3232
3233 mutex_unlock(&dev_priv->dpio_lock);
3234
3235 return 0;
3236 }
3237
3238 static void
3239 intel_get_adjust_train(struct intel_dp *intel_dp,
3240 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3241 {
3242 uint8_t v = 0;
3243 uint8_t p = 0;
3244 int lane;
3245 uint8_t voltage_max;
3246 uint8_t preemph_max;
3247
3248 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3249 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3250 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3251
3252 if (this_v > v)
3253 v = this_v;
3254 if (this_p > p)
3255 p = this_p;
3256 }
3257
3258 voltage_max = intel_dp_voltage_max(intel_dp);
3259 if (v >= voltage_max)
3260 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3261
3262 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3263 if (p >= preemph_max)
3264 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3265
3266 for (lane = 0; lane < 4; lane++)
3267 intel_dp->train_set[lane] = v | p;
3268 }
3269
3270 static uint32_t
3271 intel_gen4_signal_levels(uint8_t train_set)
3272 {
3273 uint32_t signal_levels = 0;
3274
3275 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3277 default:
3278 signal_levels |= DP_VOLTAGE_0_4;
3279 break;
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3281 signal_levels |= DP_VOLTAGE_0_6;
3282 break;
3283 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3284 signal_levels |= DP_VOLTAGE_0_8;
3285 break;
3286 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3287 signal_levels |= DP_VOLTAGE_1_2;
3288 break;
3289 }
3290 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3291 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3292 default:
3293 signal_levels |= DP_PRE_EMPHASIS_0;
3294 break;
3295 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3296 signal_levels |= DP_PRE_EMPHASIS_3_5;
3297 break;
3298 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3299 signal_levels |= DP_PRE_EMPHASIS_6;
3300 break;
3301 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3302 signal_levels |= DP_PRE_EMPHASIS_9_5;
3303 break;
3304 }
3305 return signal_levels;
3306 }
3307
3308 /* Gen6's DP voltage swing and pre-emphasis control */
3309 static uint32_t
3310 intel_gen6_edp_signal_levels(uint8_t train_set)
3311 {
3312 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3313 DP_TRAIN_PRE_EMPHASIS_MASK);
3314 switch (signal_levels) {
3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3317 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3319 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3321 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3322 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3323 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3325 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3326 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3327 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3328 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3329 default:
3330 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3331 "0x%x\n", signal_levels);
3332 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3333 }
3334 }
3335
3336 /* Gen7's DP voltage swing and pre-emphasis control */
3337 static uint32_t
3338 intel_gen7_edp_signal_levels(uint8_t train_set)
3339 {
3340 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3341 DP_TRAIN_PRE_EMPHASIS_MASK);
3342 switch (signal_levels) {
3343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3344 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3345 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3346 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3348 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3349
3350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3351 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3352 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3353 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3354
3355 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3356 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3357 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3358 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3359
3360 default:
3361 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3362 "0x%x\n", signal_levels);
3363 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3364 }
3365 }
3366
3367 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3368 static uint32_t
3369 intel_hsw_signal_levels(uint8_t train_set)
3370 {
3371 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3372 DP_TRAIN_PRE_EMPHASIS_MASK);
3373 switch (signal_levels) {
3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3375 return DDI_BUF_TRANS_SELECT(0);
3376 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3377 return DDI_BUF_TRANS_SELECT(1);
3378 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3379 return DDI_BUF_TRANS_SELECT(2);
3380 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3381 return DDI_BUF_TRANS_SELECT(3);
3382
3383 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3384 return DDI_BUF_TRANS_SELECT(4);
3385 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3386 return DDI_BUF_TRANS_SELECT(5);
3387 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3388 return DDI_BUF_TRANS_SELECT(6);
3389
3390 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3391 return DDI_BUF_TRANS_SELECT(7);
3392 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3393 return DDI_BUF_TRANS_SELECT(8);
3394
3395 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3396 return DDI_BUF_TRANS_SELECT(9);
3397 default:
3398 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3399 "0x%x\n", signal_levels);
3400 return DDI_BUF_TRANS_SELECT(0);
3401 }
3402 }
3403
3404 /* Properly updates "DP" with the correct signal levels. */
3405 static void
3406 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3407 {
3408 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3409 enum port port = intel_dig_port->port;
3410 struct drm_device *dev = intel_dig_port->base.base.dev;
3411 uint32_t signal_levels, mask;
3412 uint8_t train_set = intel_dp->train_set[0];
3413
3414 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3415 signal_levels = intel_hsw_signal_levels(train_set);
3416 mask = DDI_BUF_EMP_MASK;
3417 } else if (IS_CHERRYVIEW(dev)) {
3418 signal_levels = intel_chv_signal_levels(intel_dp);
3419 mask = 0;
3420 } else if (IS_VALLEYVIEW(dev)) {
3421 signal_levels = intel_vlv_signal_levels(intel_dp);
3422 mask = 0;
3423 } else if (IS_GEN7(dev) && port == PORT_A) {
3424 signal_levels = intel_gen7_edp_signal_levels(train_set);
3425 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3426 } else if (IS_GEN6(dev) && port == PORT_A) {
3427 signal_levels = intel_gen6_edp_signal_levels(train_set);
3428 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3429 } else {
3430 signal_levels = intel_gen4_signal_levels(train_set);
3431 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3432 }
3433
3434 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3435
3436 *DP = (*DP & ~mask) | signal_levels;
3437 }
3438
3439 static bool
3440 intel_dp_set_link_train(struct intel_dp *intel_dp,
3441 uint32_t *DP,
3442 uint8_t dp_train_pat)
3443 {
3444 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3445 struct drm_device *dev = intel_dig_port->base.base.dev;
3446 struct drm_i915_private *dev_priv = dev->dev_private;
3447 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3448 int ret, len;
3449
3450 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3451
3452 I915_WRITE(intel_dp->output_reg, *DP);
3453 POSTING_READ(intel_dp->output_reg);
3454
3455 buf[0] = dp_train_pat;
3456 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3457 DP_TRAINING_PATTERN_DISABLE) {
3458 /* don't write DP_TRAINING_LANEx_SET on disable */
3459 len = 1;
3460 } else {
3461 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3462 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3463 len = intel_dp->lane_count + 1;
3464 }
3465
3466 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3467 buf, len);
3468
3469 return ret == len;
3470 }
3471
3472 static bool
3473 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3474 uint8_t dp_train_pat)
3475 {
3476 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3477 intel_dp_set_signal_levels(intel_dp, DP);
3478 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3479 }
3480
3481 static bool
3482 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3483 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3484 {
3485 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3486 struct drm_device *dev = intel_dig_port->base.base.dev;
3487 struct drm_i915_private *dev_priv = dev->dev_private;
3488 int ret;
3489
3490 intel_get_adjust_train(intel_dp, link_status);
3491 intel_dp_set_signal_levels(intel_dp, DP);
3492
3493 I915_WRITE(intel_dp->output_reg, *DP);
3494 POSTING_READ(intel_dp->output_reg);
3495
3496 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3497 intel_dp->train_set, intel_dp->lane_count);
3498
3499 return ret == intel_dp->lane_count;
3500 }
3501
3502 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3503 {
3504 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3505 struct drm_device *dev = intel_dig_port->base.base.dev;
3506 struct drm_i915_private *dev_priv = dev->dev_private;
3507 enum port port = intel_dig_port->port;
3508 uint32_t val;
3509
3510 if (!HAS_DDI(dev))
3511 return;
3512
3513 val = I915_READ(DP_TP_CTL(port));
3514 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3515 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3516 I915_WRITE(DP_TP_CTL(port), val);
3517
3518 /*
3519 * On PORT_A we can have only eDP in SST mode. There the only reason
3520 * we need to set idle transmission mode is to work around a HW issue
3521 * where we enable the pipe while not in idle link-training mode.
3522 * In this case there is requirement to wait for a minimum number of
3523 * idle patterns to be sent.
3524 */
3525 if (port == PORT_A)
3526 return;
3527
3528 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3529 1))
3530 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3531 }
3532
3533 /* Enable corresponding port and start training pattern 1 */
3534 void
3535 intel_dp_start_link_train(struct intel_dp *intel_dp)
3536 {
3537 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3538 struct drm_device *dev = encoder->dev;
3539 int i;
3540 uint8_t voltage;
3541 int voltage_tries, loop_tries;
3542 uint32_t DP = intel_dp->DP;
3543 uint8_t link_config[2];
3544
3545 if (HAS_DDI(dev))
3546 intel_ddi_prepare_link_retrain(encoder);
3547
3548 /* Write the link configuration data */
3549 link_config[0] = intel_dp->link_bw;
3550 link_config[1] = intel_dp->lane_count;
3551 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3552 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3553 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3554 if (intel_dp->num_sink_rates)
3555 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3556 &intel_dp->rate_select, 1);
3557
3558 link_config[0] = 0;
3559 link_config[1] = DP_SET_ANSI_8B10B;
3560 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3561
3562 DP |= DP_PORT_EN;
3563
3564 /* clock recovery */
3565 if (!intel_dp_reset_link_train(intel_dp, &DP,
3566 DP_TRAINING_PATTERN_1 |
3567 DP_LINK_SCRAMBLING_DISABLE)) {
3568 DRM_ERROR("failed to enable link training\n");
3569 return;
3570 }
3571
3572 voltage = 0xff;
3573 voltage_tries = 0;
3574 loop_tries = 0;
3575 for (;;) {
3576 uint8_t link_status[DP_LINK_STATUS_SIZE];
3577
3578 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3579 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3580 DRM_ERROR("failed to get link status\n");
3581 break;
3582 }
3583
3584 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3585 DRM_DEBUG_KMS("clock recovery OK\n");
3586 break;
3587 }
3588
3589 /* Check to see if we've tried the max voltage */
3590 for (i = 0; i < intel_dp->lane_count; i++)
3591 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3592 break;
3593 if (i == intel_dp->lane_count) {
3594 ++loop_tries;
3595 if (loop_tries == 5) {
3596 DRM_ERROR("too many full retries, give up\n");
3597 break;
3598 }
3599 intel_dp_reset_link_train(intel_dp, &DP,
3600 DP_TRAINING_PATTERN_1 |
3601 DP_LINK_SCRAMBLING_DISABLE);
3602 voltage_tries = 0;
3603 continue;
3604 }
3605
3606 /* Check to see if we've tried the same voltage 5 times */
3607 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3608 ++voltage_tries;
3609 if (voltage_tries == 5) {
3610 DRM_ERROR("too many voltage retries, give up\n");
3611 break;
3612 }
3613 } else
3614 voltage_tries = 0;
3615 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3616
3617 /* Update training set as requested by target */
3618 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3619 DRM_ERROR("failed to update link training\n");
3620 break;
3621 }
3622 }
3623
3624 intel_dp->DP = DP;
3625 }
3626
3627 void
3628 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3629 {
3630 bool channel_eq = false;
3631 int tries, cr_tries;
3632 uint32_t DP = intel_dp->DP;
3633 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3634
3635 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3636 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3637 training_pattern = DP_TRAINING_PATTERN_3;
3638
3639 /* channel equalization */
3640 if (!intel_dp_set_link_train(intel_dp, &DP,
3641 training_pattern |
3642 DP_LINK_SCRAMBLING_DISABLE)) {
3643 DRM_ERROR("failed to start channel equalization\n");
3644 return;
3645 }
3646
3647 tries = 0;
3648 cr_tries = 0;
3649 channel_eq = false;
3650 for (;;) {
3651 uint8_t link_status[DP_LINK_STATUS_SIZE];
3652
3653 if (cr_tries > 5) {
3654 DRM_ERROR("failed to train DP, aborting\n");
3655 break;
3656 }
3657
3658 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3659 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3660 DRM_ERROR("failed to get link status\n");
3661 break;
3662 }
3663
3664 /* Make sure clock is still ok */
3665 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3666 intel_dp_start_link_train(intel_dp);
3667 intel_dp_set_link_train(intel_dp, &DP,
3668 training_pattern |
3669 DP_LINK_SCRAMBLING_DISABLE);
3670 cr_tries++;
3671 continue;
3672 }
3673
3674 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3675 channel_eq = true;
3676 break;
3677 }
3678
3679 /* Try 5 times, then try clock recovery if that fails */
3680 if (tries > 5) {
3681 intel_dp_start_link_train(intel_dp);
3682 intel_dp_set_link_train(intel_dp, &DP,
3683 training_pattern |
3684 DP_LINK_SCRAMBLING_DISABLE);
3685 tries = 0;
3686 cr_tries++;
3687 continue;
3688 }
3689
3690 /* Update training set as requested by target */
3691 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3692 DRM_ERROR("failed to update link training\n");
3693 break;
3694 }
3695 ++tries;
3696 }
3697
3698 intel_dp_set_idle_link_train(intel_dp);
3699
3700 intel_dp->DP = DP;
3701
3702 if (channel_eq)
3703 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3704
3705 }
3706
3707 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3708 {
3709 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3710 DP_TRAINING_PATTERN_DISABLE);
3711 }
3712
3713 static void
3714 intel_dp_link_down(struct intel_dp *intel_dp)
3715 {
3716 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3717 enum port port = intel_dig_port->port;
3718 struct drm_device *dev = intel_dig_port->base.base.dev;
3719 struct drm_i915_private *dev_priv = dev->dev_private;
3720 uint32_t DP = intel_dp->DP;
3721
3722 if (WARN_ON(HAS_DDI(dev)))
3723 return;
3724
3725 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3726 return;
3727
3728 DRM_DEBUG_KMS("\n");
3729
3730 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3731 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3732 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3733 } else {
3734 if (IS_CHERRYVIEW(dev))
3735 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3736 else
3737 DP &= ~DP_LINK_TRAIN_MASK;
3738 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3739 }
3740 POSTING_READ(intel_dp->output_reg);
3741
3742 if (HAS_PCH_IBX(dev) &&
3743 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3744 /* Hardware workaround: leaving our transcoder select
3745 * set to transcoder B while it's off will prevent the
3746 * corresponding HDMI output on transcoder A.
3747 *
3748 * Combine this with another hardware workaround:
3749 * transcoder select bit can only be cleared while the
3750 * port is enabled.
3751 */
3752 DP &= ~DP_PIPEB_SELECT;
3753 I915_WRITE(intel_dp->output_reg, DP);
3754 POSTING_READ(intel_dp->output_reg);
3755 }
3756
3757 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3758 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3759 POSTING_READ(intel_dp->output_reg);
3760 msleep(intel_dp->panel_power_down_delay);
3761 }
3762
3763 static bool
3764 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3765 {
3766 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3767 struct drm_device *dev = dig_port->base.base.dev;
3768 struct drm_i915_private *dev_priv = dev->dev_private;
3769 uint8_t rev;
3770
3771 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3772 sizeof(intel_dp->dpcd)) < 0)
3773 return false; /* aux transfer failed */
3774
3775 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3776
3777 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3778 return false; /* DPCD not present */
3779
3780 /* Check if the panel supports PSR */
3781 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3782 if (is_edp(intel_dp)) {
3783 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3784 intel_dp->psr_dpcd,
3785 sizeof(intel_dp->psr_dpcd));
3786 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3787 dev_priv->psr.sink_support = true;
3788 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3789 }
3790 }
3791
3792 /* Training Pattern 3 support, both source and sink */
3793 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3794 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3795 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3796 intel_dp->use_tps3 = true;
3797 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3798 } else
3799 intel_dp->use_tps3 = false;
3800
3801 /* Intermediate frequency support */
3802 if (is_edp(intel_dp) &&
3803 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3804 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3805 (rev >= 0x03)) { /* eDp v1.4 or higher */
3806 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3807 int i;
3808
3809 intel_dp_dpcd_read_wake(&intel_dp->aux,
3810 DP_SUPPORTED_LINK_RATES,
3811 sink_rates,
3812 sizeof(sink_rates));
3813
3814 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3815 int val = le16_to_cpu(sink_rates[i]);
3816
3817 if (val == 0)
3818 break;
3819
3820 intel_dp->sink_rates[i] = val * 200;
3821 }
3822 intel_dp->num_sink_rates = i;
3823 }
3824
3825 intel_dp_print_rates(intel_dp);
3826
3827 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3828 DP_DWN_STRM_PORT_PRESENT))
3829 return true; /* native DP sink */
3830
3831 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3832 return true; /* no per-port downstream info */
3833
3834 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3835 intel_dp->downstream_ports,
3836 DP_MAX_DOWNSTREAM_PORTS) < 0)
3837 return false; /* downstream port status fetch failed */
3838
3839 return true;
3840 }
3841
3842 static void
3843 intel_dp_probe_oui(struct intel_dp *intel_dp)
3844 {
3845 u8 buf[3];
3846
3847 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3848 return;
3849
3850 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3851 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3852 buf[0], buf[1], buf[2]);
3853
3854 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3855 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3856 buf[0], buf[1], buf[2]);
3857 }
3858
3859 static bool
3860 intel_dp_probe_mst(struct intel_dp *intel_dp)
3861 {
3862 u8 buf[1];
3863
3864 if (!intel_dp->can_mst)
3865 return false;
3866
3867 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3868 return false;
3869
3870 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3871 if (buf[0] & DP_MST_CAP) {
3872 DRM_DEBUG_KMS("Sink is MST capable\n");
3873 intel_dp->is_mst = true;
3874 } else {
3875 DRM_DEBUG_KMS("Sink is not MST capable\n");
3876 intel_dp->is_mst = false;
3877 }
3878 }
3879
3880 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3881 return intel_dp->is_mst;
3882 }
3883
3884 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3885 {
3886 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3887 struct drm_device *dev = intel_dig_port->base.base.dev;
3888 struct intel_crtc *intel_crtc =
3889 to_intel_crtc(intel_dig_port->base.base.crtc);
3890 u8 buf;
3891 int test_crc_count;
3892 int attempts = 6;
3893
3894 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3895 return -EIO;
3896
3897 if (!(buf & DP_TEST_CRC_SUPPORTED))
3898 return -ENOTTY;
3899
3900 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3901 return -EIO;
3902
3903 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3904 buf | DP_TEST_SINK_START) < 0)
3905 return -EIO;
3906
3907 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3908 return -EIO;
3909 test_crc_count = buf & DP_TEST_COUNT_MASK;
3910
3911 do {
3912 if (drm_dp_dpcd_readb(&intel_dp->aux,
3913 DP_TEST_SINK_MISC, &buf) < 0)
3914 return -EIO;
3915 intel_wait_for_vblank(dev, intel_crtc->pipe);
3916 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3917
3918 if (attempts == 0) {
3919 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3920 return -ETIMEDOUT;
3921 }
3922
3923 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3924 return -EIO;
3925
3926 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3927 return -EIO;
3928 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3929 buf & ~DP_TEST_SINK_START) < 0)
3930 return -EIO;
3931
3932 return 0;
3933 }
3934
3935 static bool
3936 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3937 {
3938 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3939 DP_DEVICE_SERVICE_IRQ_VECTOR,
3940 sink_irq_vector, 1) == 1;
3941 }
3942
3943 static bool
3944 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3945 {
3946 int ret;
3947
3948 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3949 DP_SINK_COUNT_ESI,
3950 sink_irq_vector, 14);
3951 if (ret != 14)
3952 return false;
3953
3954 return true;
3955 }
3956
3957 static void
3958 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3959 {
3960 /* NAK by default */
3961 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3962 }
3963
3964 static int
3965 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3966 {
3967 bool bret;
3968
3969 if (intel_dp->is_mst) {
3970 u8 esi[16] = { 0 };
3971 int ret = 0;
3972 int retry;
3973 bool handled;
3974 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3975 go_again:
3976 if (bret == true) {
3977
3978 /* check link status - esi[10] = 0x200c */
3979 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3980 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3981 intel_dp_start_link_train(intel_dp);
3982 intel_dp_complete_link_train(intel_dp);
3983 intel_dp_stop_link_train(intel_dp);
3984 }
3985
3986 DRM_DEBUG_KMS("got esi %3ph\n", esi);
3987 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3988
3989 if (handled) {
3990 for (retry = 0; retry < 3; retry++) {
3991 int wret;
3992 wret = drm_dp_dpcd_write(&intel_dp->aux,
3993 DP_SINK_COUNT_ESI+1,
3994 &esi[1], 3);
3995 if (wret == 3) {
3996 break;
3997 }
3998 }
3999
4000 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4001 if (bret == true) {
4002 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4003 goto go_again;
4004 }
4005 } else
4006 ret = 0;
4007
4008 return ret;
4009 } else {
4010 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4011 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4012 intel_dp->is_mst = false;
4013 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4014 /* send a hotplug event */
4015 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4016 }
4017 }
4018 return -EINVAL;
4019 }
4020
4021 /*
4022 * According to DP spec
4023 * 5.1.2:
4024 * 1. Read DPCD
4025 * 2. Configure link according to Receiver Capabilities
4026 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4027 * 4. Check link status on receipt of hot-plug interrupt
4028 */
4029 static void
4030 intel_dp_check_link_status(struct intel_dp *intel_dp)
4031 {
4032 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4033 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4034 u8 sink_irq_vector;
4035 u8 link_status[DP_LINK_STATUS_SIZE];
4036
4037 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4038
4039 if (!intel_encoder->connectors_active)
4040 return;
4041
4042 if (WARN_ON(!intel_encoder->base.crtc))
4043 return;
4044
4045 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4046 return;
4047
4048 /* Try to read receiver status if the link appears to be up */
4049 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4050 return;
4051 }
4052
4053 /* Now read the DPCD to see if it's actually running */
4054 if (!intel_dp_get_dpcd(intel_dp)) {
4055 return;
4056 }
4057
4058 /* Try to read the source of the interrupt */
4059 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4060 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4061 /* Clear interrupt source */
4062 drm_dp_dpcd_writeb(&intel_dp->aux,
4063 DP_DEVICE_SERVICE_IRQ_VECTOR,
4064 sink_irq_vector);
4065
4066 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4067 intel_dp_handle_test_request(intel_dp);
4068 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4069 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4070 }
4071
4072 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4073 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4074 intel_encoder->base.name);
4075 intel_dp_start_link_train(intel_dp);
4076 intel_dp_complete_link_train(intel_dp);
4077 intel_dp_stop_link_train(intel_dp);
4078 }
4079 }
4080
4081 /* XXX this is probably wrong for multiple downstream ports */
4082 static enum drm_connector_status
4083 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4084 {
4085 uint8_t *dpcd = intel_dp->dpcd;
4086 uint8_t type;
4087
4088 if (!intel_dp_get_dpcd(intel_dp))
4089 return connector_status_disconnected;
4090
4091 /* if there's no downstream port, we're done */
4092 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4093 return connector_status_connected;
4094
4095 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4096 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4097 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4098 uint8_t reg;
4099
4100 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4101 &reg, 1) < 0)
4102 return connector_status_unknown;
4103
4104 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4105 : connector_status_disconnected;
4106 }
4107
4108 /* If no HPD, poke DDC gently */
4109 if (drm_probe_ddc(&intel_dp->aux.ddc))
4110 return connector_status_connected;
4111
4112 /* Well we tried, say unknown for unreliable port types */
4113 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4114 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4115 if (type == DP_DS_PORT_TYPE_VGA ||
4116 type == DP_DS_PORT_TYPE_NON_EDID)
4117 return connector_status_unknown;
4118 } else {
4119 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4120 DP_DWN_STRM_PORT_TYPE_MASK;
4121 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4122 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4123 return connector_status_unknown;
4124 }
4125
4126 /* Anything else is out of spec, warn and ignore */
4127 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4128 return connector_status_disconnected;
4129 }
4130
4131 static enum drm_connector_status
4132 edp_detect(struct intel_dp *intel_dp)
4133 {
4134 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4135 enum drm_connector_status status;
4136
4137 status = intel_panel_detect(dev);
4138 if (status == connector_status_unknown)
4139 status = connector_status_connected;
4140
4141 return status;
4142 }
4143
4144 static enum drm_connector_status
4145 ironlake_dp_detect(struct intel_dp *intel_dp)
4146 {
4147 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4148 struct drm_i915_private *dev_priv = dev->dev_private;
4149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4150
4151 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4152 return connector_status_disconnected;
4153
4154 return intel_dp_detect_dpcd(intel_dp);
4155 }
4156
4157 static int g4x_digital_port_connected(struct drm_device *dev,
4158 struct intel_digital_port *intel_dig_port)
4159 {
4160 struct drm_i915_private *dev_priv = dev->dev_private;
4161 uint32_t bit;
4162
4163 if (IS_VALLEYVIEW(dev)) {
4164 switch (intel_dig_port->port) {
4165 case PORT_B:
4166 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4167 break;
4168 case PORT_C:
4169 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4170 break;
4171 case PORT_D:
4172 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4173 break;
4174 default:
4175 return -EINVAL;
4176 }
4177 } else {
4178 switch (intel_dig_port->port) {
4179 case PORT_B:
4180 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4181 break;
4182 case PORT_C:
4183 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4184 break;
4185 case PORT_D:
4186 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4187 break;
4188 default:
4189 return -EINVAL;
4190 }
4191 }
4192
4193 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4194 return 0;
4195 return 1;
4196 }
4197
4198 static enum drm_connector_status
4199 g4x_dp_detect(struct intel_dp *intel_dp)
4200 {
4201 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4202 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4203 int ret;
4204
4205 /* Can't disconnect eDP, but you can close the lid... */
4206 if (is_edp(intel_dp)) {
4207 enum drm_connector_status status;
4208
4209 status = intel_panel_detect(dev);
4210 if (status == connector_status_unknown)
4211 status = connector_status_connected;
4212 return status;
4213 }
4214
4215 ret = g4x_digital_port_connected(dev, intel_dig_port);
4216 if (ret == -EINVAL)
4217 return connector_status_unknown;
4218 else if (ret == 0)
4219 return connector_status_disconnected;
4220
4221 return intel_dp_detect_dpcd(intel_dp);
4222 }
4223
4224 static struct edid *
4225 intel_dp_get_edid(struct intel_dp *intel_dp)
4226 {
4227 struct intel_connector *intel_connector = intel_dp->attached_connector;
4228
4229 /* use cached edid if we have one */
4230 if (intel_connector->edid) {
4231 /* invalid edid */
4232 if (IS_ERR(intel_connector->edid))
4233 return NULL;
4234
4235 return drm_edid_duplicate(intel_connector->edid);
4236 } else
4237 return drm_get_edid(&intel_connector->base,
4238 &intel_dp->aux.ddc);
4239 }
4240
4241 static void
4242 intel_dp_set_edid(struct intel_dp *intel_dp)
4243 {
4244 struct intel_connector *intel_connector = intel_dp->attached_connector;
4245 struct edid *edid;
4246
4247 edid = intel_dp_get_edid(intel_dp);
4248 intel_connector->detect_edid = edid;
4249
4250 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4251 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4252 else
4253 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4254 }
4255
4256 static void
4257 intel_dp_unset_edid(struct intel_dp *intel_dp)
4258 {
4259 struct intel_connector *intel_connector = intel_dp->attached_connector;
4260
4261 kfree(intel_connector->detect_edid);
4262 intel_connector->detect_edid = NULL;
4263
4264 intel_dp->has_audio = false;
4265 }
4266
4267 static enum intel_display_power_domain
4268 intel_dp_power_get(struct intel_dp *dp)
4269 {
4270 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4271 enum intel_display_power_domain power_domain;
4272
4273 power_domain = intel_display_port_power_domain(encoder);
4274 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4275
4276 return power_domain;
4277 }
4278
4279 static void
4280 intel_dp_power_put(struct intel_dp *dp,
4281 enum intel_display_power_domain power_domain)
4282 {
4283 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4284 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4285 }
4286
4287 static enum drm_connector_status
4288 intel_dp_detect(struct drm_connector *connector, bool force)
4289 {
4290 struct intel_dp *intel_dp = intel_attached_dp(connector);
4291 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4292 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4293 struct drm_device *dev = connector->dev;
4294 enum drm_connector_status status;
4295 enum intel_display_power_domain power_domain;
4296 bool ret;
4297
4298 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4299 connector->base.id, connector->name);
4300 intel_dp_unset_edid(intel_dp);
4301
4302 if (intel_dp->is_mst) {
4303 /* MST devices are disconnected from a monitor POV */
4304 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4305 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4306 return connector_status_disconnected;
4307 }
4308
4309 power_domain = intel_dp_power_get(intel_dp);
4310
4311 /* Can't disconnect eDP, but you can close the lid... */
4312 if (is_edp(intel_dp))
4313 status = edp_detect(intel_dp);
4314 else if (HAS_PCH_SPLIT(dev))
4315 status = ironlake_dp_detect(intel_dp);
4316 else
4317 status = g4x_dp_detect(intel_dp);
4318 if (status != connector_status_connected)
4319 goto out;
4320
4321 intel_dp_probe_oui(intel_dp);
4322
4323 ret = intel_dp_probe_mst(intel_dp);
4324 if (ret) {
4325 /* if we are in MST mode then this connector
4326 won't appear connected or have anything with EDID on it */
4327 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4328 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4329 status = connector_status_disconnected;
4330 goto out;
4331 }
4332
4333 intel_dp_set_edid(intel_dp);
4334
4335 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4336 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4337 status = connector_status_connected;
4338
4339 out:
4340 intel_dp_power_put(intel_dp, power_domain);
4341 return status;
4342 }
4343
4344 static void
4345 intel_dp_force(struct drm_connector *connector)
4346 {
4347 struct intel_dp *intel_dp = intel_attached_dp(connector);
4348 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4349 enum intel_display_power_domain power_domain;
4350
4351 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4352 connector->base.id, connector->name);
4353 intel_dp_unset_edid(intel_dp);
4354
4355 if (connector->status != connector_status_connected)
4356 return;
4357
4358 power_domain = intel_dp_power_get(intel_dp);
4359
4360 intel_dp_set_edid(intel_dp);
4361
4362 intel_dp_power_put(intel_dp, power_domain);
4363
4364 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4365 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4366 }
4367
4368 static int intel_dp_get_modes(struct drm_connector *connector)
4369 {
4370 struct intel_connector *intel_connector = to_intel_connector(connector);
4371 struct edid *edid;
4372
4373 edid = intel_connector->detect_edid;
4374 if (edid) {
4375 int ret = intel_connector_update_modes(connector, edid);
4376 if (ret)
4377 return ret;
4378 }
4379
4380 /* if eDP has no EDID, fall back to fixed mode */
4381 if (is_edp(intel_attached_dp(connector)) &&
4382 intel_connector->panel.fixed_mode) {
4383 struct drm_display_mode *mode;
4384
4385 mode = drm_mode_duplicate(connector->dev,
4386 intel_connector->panel.fixed_mode);
4387 if (mode) {
4388 drm_mode_probed_add(connector, mode);
4389 return 1;
4390 }
4391 }
4392
4393 return 0;
4394 }
4395
4396 static bool
4397 intel_dp_detect_audio(struct drm_connector *connector)
4398 {
4399 bool has_audio = false;
4400 struct edid *edid;
4401
4402 edid = to_intel_connector(connector)->detect_edid;
4403 if (edid)
4404 has_audio = drm_detect_monitor_audio(edid);
4405
4406 return has_audio;
4407 }
4408
4409 static int
4410 intel_dp_set_property(struct drm_connector *connector,
4411 struct drm_property *property,
4412 uint64_t val)
4413 {
4414 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4415 struct intel_connector *intel_connector = to_intel_connector(connector);
4416 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4417 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4418 int ret;
4419
4420 ret = drm_object_property_set_value(&connector->base, property, val);
4421 if (ret)
4422 return ret;
4423
4424 if (property == dev_priv->force_audio_property) {
4425 int i = val;
4426 bool has_audio;
4427
4428 if (i == intel_dp->force_audio)
4429 return 0;
4430
4431 intel_dp->force_audio = i;
4432
4433 if (i == HDMI_AUDIO_AUTO)
4434 has_audio = intel_dp_detect_audio(connector);
4435 else
4436 has_audio = (i == HDMI_AUDIO_ON);
4437
4438 if (has_audio == intel_dp->has_audio)
4439 return 0;
4440
4441 intel_dp->has_audio = has_audio;
4442 goto done;
4443 }
4444
4445 if (property == dev_priv->broadcast_rgb_property) {
4446 bool old_auto = intel_dp->color_range_auto;
4447 uint32_t old_range = intel_dp->color_range;
4448
4449 switch (val) {
4450 case INTEL_BROADCAST_RGB_AUTO:
4451 intel_dp->color_range_auto = true;
4452 break;
4453 case INTEL_BROADCAST_RGB_FULL:
4454 intel_dp->color_range_auto = false;
4455 intel_dp->color_range = 0;
4456 break;
4457 case INTEL_BROADCAST_RGB_LIMITED:
4458 intel_dp->color_range_auto = false;
4459 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4460 break;
4461 default:
4462 return -EINVAL;
4463 }
4464
4465 if (old_auto == intel_dp->color_range_auto &&
4466 old_range == intel_dp->color_range)
4467 return 0;
4468
4469 goto done;
4470 }
4471
4472 if (is_edp(intel_dp) &&
4473 property == connector->dev->mode_config.scaling_mode_property) {
4474 if (val == DRM_MODE_SCALE_NONE) {
4475 DRM_DEBUG_KMS("no scaling not supported\n");
4476 return -EINVAL;
4477 }
4478
4479 if (intel_connector->panel.fitting_mode == val) {
4480 /* the eDP scaling property is not changed */
4481 return 0;
4482 }
4483 intel_connector->panel.fitting_mode = val;
4484
4485 goto done;
4486 }
4487
4488 return -EINVAL;
4489
4490 done:
4491 if (intel_encoder->base.crtc)
4492 intel_crtc_restore_mode(intel_encoder->base.crtc);
4493
4494 return 0;
4495 }
4496
4497 static void
4498 intel_dp_connector_destroy(struct drm_connector *connector)
4499 {
4500 struct intel_connector *intel_connector = to_intel_connector(connector);
4501
4502 kfree(intel_connector->detect_edid);
4503
4504 if (!IS_ERR_OR_NULL(intel_connector->edid))
4505 kfree(intel_connector->edid);
4506
4507 /* Can't call is_edp() since the encoder may have been destroyed
4508 * already. */
4509 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4510 intel_panel_fini(&intel_connector->panel);
4511
4512 drm_connector_cleanup(connector);
4513 kfree(connector);
4514 }
4515
4516 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4517 {
4518 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4519 struct intel_dp *intel_dp = &intel_dig_port->dp;
4520
4521 drm_dp_aux_unregister(&intel_dp->aux);
4522 intel_dp_mst_encoder_cleanup(intel_dig_port);
4523 if (is_edp(intel_dp)) {
4524 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4525 /*
4526 * vdd might still be enabled do to the delayed vdd off.
4527 * Make sure vdd is actually turned off here.
4528 */
4529 pps_lock(intel_dp);
4530 edp_panel_vdd_off_sync(intel_dp);
4531 pps_unlock(intel_dp);
4532
4533 if (intel_dp->edp_notifier.notifier_call) {
4534 unregister_reboot_notifier(&intel_dp->edp_notifier);
4535 intel_dp->edp_notifier.notifier_call = NULL;
4536 }
4537 }
4538 drm_encoder_cleanup(encoder);
4539 kfree(intel_dig_port);
4540 }
4541
4542 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4543 {
4544 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4545
4546 if (!is_edp(intel_dp))
4547 return;
4548
4549 /*
4550 * vdd might still be enabled do to the delayed vdd off.
4551 * Make sure vdd is actually turned off here.
4552 */
4553 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4554 pps_lock(intel_dp);
4555 edp_panel_vdd_off_sync(intel_dp);
4556 pps_unlock(intel_dp);
4557 }
4558
4559 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4560 {
4561 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4562 struct drm_device *dev = intel_dig_port->base.base.dev;
4563 struct drm_i915_private *dev_priv = dev->dev_private;
4564 enum intel_display_power_domain power_domain;
4565
4566 lockdep_assert_held(&dev_priv->pps_mutex);
4567
4568 if (!edp_have_panel_vdd(intel_dp))
4569 return;
4570
4571 /*
4572 * The VDD bit needs a power domain reference, so if the bit is
4573 * already enabled when we boot or resume, grab this reference and
4574 * schedule a vdd off, so we don't hold on to the reference
4575 * indefinitely.
4576 */
4577 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4578 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4579 intel_display_power_get(dev_priv, power_domain);
4580
4581 edp_panel_vdd_schedule_off(intel_dp);
4582 }
4583
4584 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4585 {
4586 struct intel_dp *intel_dp;
4587
4588 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4589 return;
4590
4591 intel_dp = enc_to_intel_dp(encoder);
4592
4593 pps_lock(intel_dp);
4594
4595 /*
4596 * Read out the current power sequencer assignment,
4597 * in case the BIOS did something with it.
4598 */
4599 if (IS_VALLEYVIEW(encoder->dev))
4600 vlv_initial_power_sequencer_setup(intel_dp);
4601
4602 intel_edp_panel_vdd_sanitize(intel_dp);
4603
4604 pps_unlock(intel_dp);
4605 }
4606
4607 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4608 .dpms = intel_connector_dpms,
4609 .detect = intel_dp_detect,
4610 .force = intel_dp_force,
4611 .fill_modes = drm_helper_probe_single_connector_modes,
4612 .set_property = intel_dp_set_property,
4613 .atomic_get_property = intel_connector_atomic_get_property,
4614 .destroy = intel_dp_connector_destroy,
4615 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4616 };
4617
4618 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4619 .get_modes = intel_dp_get_modes,
4620 .mode_valid = intel_dp_mode_valid,
4621 .best_encoder = intel_best_encoder,
4622 };
4623
4624 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4625 .reset = intel_dp_encoder_reset,
4626 .destroy = intel_dp_encoder_destroy,
4627 };
4628
4629 void
4630 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4631 {
4632 return;
4633 }
4634
4635 enum irqreturn
4636 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4637 {
4638 struct intel_dp *intel_dp = &intel_dig_port->dp;
4639 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4640 struct drm_device *dev = intel_dig_port->base.base.dev;
4641 struct drm_i915_private *dev_priv = dev->dev_private;
4642 enum intel_display_power_domain power_domain;
4643 enum irqreturn ret = IRQ_NONE;
4644
4645 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4646 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4647
4648 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4649 /*
4650 * vdd off can generate a long pulse on eDP which
4651 * would require vdd on to handle it, and thus we
4652 * would end up in an endless cycle of
4653 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4654 */
4655 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4656 port_name(intel_dig_port->port));
4657 return IRQ_HANDLED;
4658 }
4659
4660 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4661 port_name(intel_dig_port->port),
4662 long_hpd ? "long" : "short");
4663
4664 power_domain = intel_display_port_power_domain(intel_encoder);
4665 intel_display_power_get(dev_priv, power_domain);
4666
4667 if (long_hpd) {
4668
4669 if (HAS_PCH_SPLIT(dev)) {
4670 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4671 goto mst_fail;
4672 } else {
4673 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4674 goto mst_fail;
4675 }
4676
4677 if (!intel_dp_get_dpcd(intel_dp)) {
4678 goto mst_fail;
4679 }
4680
4681 intel_dp_probe_oui(intel_dp);
4682
4683 if (!intel_dp_probe_mst(intel_dp))
4684 goto mst_fail;
4685
4686 } else {
4687 if (intel_dp->is_mst) {
4688 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4689 goto mst_fail;
4690 }
4691
4692 if (!intel_dp->is_mst) {
4693 /*
4694 * we'll check the link status via the normal hot plug path later -
4695 * but for short hpds we should check it now
4696 */
4697 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4698 intel_dp_check_link_status(intel_dp);
4699 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4700 }
4701 }
4702
4703 ret = IRQ_HANDLED;
4704
4705 goto put_power;
4706 mst_fail:
4707 /* if we were in MST mode, and device is not there get out of MST mode */
4708 if (intel_dp->is_mst) {
4709 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4710 intel_dp->is_mst = false;
4711 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4712 }
4713 put_power:
4714 intel_display_power_put(dev_priv, power_domain);
4715
4716 return ret;
4717 }
4718
4719 /* Return which DP Port should be selected for Transcoder DP control */
4720 int
4721 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4722 {
4723 struct drm_device *dev = crtc->dev;
4724 struct intel_encoder *intel_encoder;
4725 struct intel_dp *intel_dp;
4726
4727 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4728 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4729
4730 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4731 intel_encoder->type == INTEL_OUTPUT_EDP)
4732 return intel_dp->output_reg;
4733 }
4734
4735 return -1;
4736 }
4737
4738 /* check the VBT to see whether the eDP is on DP-D port */
4739 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4740 {
4741 struct drm_i915_private *dev_priv = dev->dev_private;
4742 union child_device_config *p_child;
4743 int i;
4744 static const short port_mapping[] = {
4745 [PORT_B] = PORT_IDPB,
4746 [PORT_C] = PORT_IDPC,
4747 [PORT_D] = PORT_IDPD,
4748 };
4749
4750 if (port == PORT_A)
4751 return true;
4752
4753 if (!dev_priv->vbt.child_dev_num)
4754 return false;
4755
4756 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4757 p_child = dev_priv->vbt.child_dev + i;
4758
4759 if (p_child->common.dvo_port == port_mapping[port] &&
4760 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4761 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4762 return true;
4763 }
4764 return false;
4765 }
4766
4767 void
4768 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4769 {
4770 struct intel_connector *intel_connector = to_intel_connector(connector);
4771
4772 intel_attach_force_audio_property(connector);
4773 intel_attach_broadcast_rgb_property(connector);
4774 intel_dp->color_range_auto = true;
4775
4776 if (is_edp(intel_dp)) {
4777 drm_mode_create_scaling_mode_property(connector->dev);
4778 drm_object_attach_property(
4779 &connector->base,
4780 connector->dev->mode_config.scaling_mode_property,
4781 DRM_MODE_SCALE_ASPECT);
4782 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4783 }
4784 }
4785
4786 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4787 {
4788 intel_dp->last_power_cycle = jiffies;
4789 intel_dp->last_power_on = jiffies;
4790 intel_dp->last_backlight_off = jiffies;
4791 }
4792
4793 static void
4794 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4795 struct intel_dp *intel_dp)
4796 {
4797 struct drm_i915_private *dev_priv = dev->dev_private;
4798 struct edp_power_seq cur, vbt, spec,
4799 *final = &intel_dp->pps_delays;
4800 u32 pp_on, pp_off, pp_div, pp;
4801 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4802
4803 lockdep_assert_held(&dev_priv->pps_mutex);
4804
4805 /* already initialized? */
4806 if (final->t11_t12 != 0)
4807 return;
4808
4809 if (HAS_PCH_SPLIT(dev)) {
4810 pp_ctrl_reg = PCH_PP_CONTROL;
4811 pp_on_reg = PCH_PP_ON_DELAYS;
4812 pp_off_reg = PCH_PP_OFF_DELAYS;
4813 pp_div_reg = PCH_PP_DIVISOR;
4814 } else {
4815 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4816
4817 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4818 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4819 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4820 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4821 }
4822
4823 /* Workaround: Need to write PP_CONTROL with the unlock key as
4824 * the very first thing. */
4825 pp = ironlake_get_pp_control(intel_dp);
4826 I915_WRITE(pp_ctrl_reg, pp);
4827
4828 pp_on = I915_READ(pp_on_reg);
4829 pp_off = I915_READ(pp_off_reg);
4830 pp_div = I915_READ(pp_div_reg);
4831
4832 /* Pull timing values out of registers */
4833 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4834 PANEL_POWER_UP_DELAY_SHIFT;
4835
4836 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4837 PANEL_LIGHT_ON_DELAY_SHIFT;
4838
4839 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4840 PANEL_LIGHT_OFF_DELAY_SHIFT;
4841
4842 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4843 PANEL_POWER_DOWN_DELAY_SHIFT;
4844
4845 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4846 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4847
4848 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4849 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4850
4851 vbt = dev_priv->vbt.edp_pps;
4852
4853 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4854 * our hw here, which are all in 100usec. */
4855 spec.t1_t3 = 210 * 10;
4856 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4857 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4858 spec.t10 = 500 * 10;
4859 /* This one is special and actually in units of 100ms, but zero
4860 * based in the hw (so we need to add 100 ms). But the sw vbt
4861 * table multiplies it with 1000 to make it in units of 100usec,
4862 * too. */
4863 spec.t11_t12 = (510 + 100) * 10;
4864
4865 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4866 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4867
4868 /* Use the max of the register settings and vbt. If both are
4869 * unset, fall back to the spec limits. */
4870 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
4871 spec.field : \
4872 max(cur.field, vbt.field))
4873 assign_final(t1_t3);
4874 assign_final(t8);
4875 assign_final(t9);
4876 assign_final(t10);
4877 assign_final(t11_t12);
4878 #undef assign_final
4879
4880 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
4881 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4882 intel_dp->backlight_on_delay = get_delay(t8);
4883 intel_dp->backlight_off_delay = get_delay(t9);
4884 intel_dp->panel_power_down_delay = get_delay(t10);
4885 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4886 #undef get_delay
4887
4888 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4889 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4890 intel_dp->panel_power_cycle_delay);
4891
4892 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4893 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4894 }
4895
4896 static void
4897 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4898 struct intel_dp *intel_dp)
4899 {
4900 struct drm_i915_private *dev_priv = dev->dev_private;
4901 u32 pp_on, pp_off, pp_div, port_sel = 0;
4902 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4903 int pp_on_reg, pp_off_reg, pp_div_reg;
4904 enum port port = dp_to_dig_port(intel_dp)->port;
4905 const struct edp_power_seq *seq = &intel_dp->pps_delays;
4906
4907 lockdep_assert_held(&dev_priv->pps_mutex);
4908
4909 if (HAS_PCH_SPLIT(dev)) {
4910 pp_on_reg = PCH_PP_ON_DELAYS;
4911 pp_off_reg = PCH_PP_OFF_DELAYS;
4912 pp_div_reg = PCH_PP_DIVISOR;
4913 } else {
4914 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4915
4916 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4917 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4918 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4919 }
4920
4921 /*
4922 * And finally store the new values in the power sequencer. The
4923 * backlight delays are set to 1 because we do manual waits on them. For
4924 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4925 * we'll end up waiting for the backlight off delay twice: once when we
4926 * do the manual sleep, and once when we disable the panel and wait for
4927 * the PP_STATUS bit to become zero.
4928 */
4929 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4930 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4931 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4932 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4933 /* Compute the divisor for the pp clock, simply match the Bspec
4934 * formula. */
4935 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4936 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4937 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4938
4939 /* Haswell doesn't have any port selection bits for the panel
4940 * power sequencer any more. */
4941 if (IS_VALLEYVIEW(dev)) {
4942 port_sel = PANEL_PORT_SELECT_VLV(port);
4943 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4944 if (port == PORT_A)
4945 port_sel = PANEL_PORT_SELECT_DPA;
4946 else
4947 port_sel = PANEL_PORT_SELECT_DPD;
4948 }
4949
4950 pp_on |= port_sel;
4951
4952 I915_WRITE(pp_on_reg, pp_on);
4953 I915_WRITE(pp_off_reg, pp_off);
4954 I915_WRITE(pp_div_reg, pp_div);
4955
4956 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4957 I915_READ(pp_on_reg),
4958 I915_READ(pp_off_reg),
4959 I915_READ(pp_div_reg));
4960 }
4961
4962 /**
4963 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4964 * @dev: DRM device
4965 * @refresh_rate: RR to be programmed
4966 *
4967 * This function gets called when refresh rate (RR) has to be changed from
4968 * one frequency to another. Switches can be between high and low RR
4969 * supported by the panel or to any other RR based on media playback (in
4970 * this case, RR value needs to be passed from user space).
4971 *
4972 * The caller of this function needs to take a lock on dev_priv->drrs.
4973 */
4974 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4975 {
4976 struct drm_i915_private *dev_priv = dev->dev_private;
4977 struct intel_encoder *encoder;
4978 struct intel_digital_port *dig_port = NULL;
4979 struct intel_dp *intel_dp = dev_priv->drrs.dp;
4980 struct intel_crtc_state *config = NULL;
4981 struct intel_crtc *intel_crtc = NULL;
4982 u32 reg, val;
4983 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4984
4985 if (refresh_rate <= 0) {
4986 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4987 return;
4988 }
4989
4990 if (intel_dp == NULL) {
4991 DRM_DEBUG_KMS("DRRS not supported.\n");
4992 return;
4993 }
4994
4995 /*
4996 * FIXME: This needs proper synchronization with psr state for some
4997 * platforms that cannot have PSR and DRRS enabled at the same time.
4998 */
4999
5000 dig_port = dp_to_dig_port(intel_dp);
5001 encoder = &dig_port->base;
5002 intel_crtc = encoder->new_crtc;
5003
5004 if (!intel_crtc) {
5005 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5006 return;
5007 }
5008
5009 config = intel_crtc->config;
5010
5011 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5012 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5013 return;
5014 }
5015
5016 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5017 refresh_rate)
5018 index = DRRS_LOW_RR;
5019
5020 if (index == dev_priv->drrs.refresh_rate_type) {
5021 DRM_DEBUG_KMS(
5022 "DRRS requested for previously set RR...ignoring\n");
5023 return;
5024 }
5025
5026 if (!intel_crtc->active) {
5027 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5028 return;
5029 }
5030
5031 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5032 switch (index) {
5033 case DRRS_HIGH_RR:
5034 intel_dp_set_m_n(intel_crtc, M1_N1);
5035 break;
5036 case DRRS_LOW_RR:
5037 intel_dp_set_m_n(intel_crtc, M2_N2);
5038 break;
5039 case DRRS_MAX_RR:
5040 default:
5041 DRM_ERROR("Unsupported refreshrate type\n");
5042 }
5043 } else if (INTEL_INFO(dev)->gen > 6) {
5044 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5045 val = I915_READ(reg);
5046
5047 if (index > DRRS_HIGH_RR) {
5048 if (IS_VALLEYVIEW(dev))
5049 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5050 else
5051 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5052 } else {
5053 if (IS_VALLEYVIEW(dev))
5054 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5055 else
5056 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5057 }
5058 I915_WRITE(reg, val);
5059 }
5060
5061 dev_priv->drrs.refresh_rate_type = index;
5062
5063 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5064 }
5065
5066 /**
5067 * intel_edp_drrs_enable - init drrs struct if supported
5068 * @intel_dp: DP struct
5069 *
5070 * Initializes frontbuffer_bits and drrs.dp
5071 */
5072 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5073 {
5074 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5075 struct drm_i915_private *dev_priv = dev->dev_private;
5076 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5077 struct drm_crtc *crtc = dig_port->base.base.crtc;
5078 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5079
5080 if (!intel_crtc->config->has_drrs) {
5081 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5082 return;
5083 }
5084
5085 mutex_lock(&dev_priv->drrs.mutex);
5086 if (WARN_ON(dev_priv->drrs.dp)) {
5087 DRM_ERROR("DRRS already enabled\n");
5088 goto unlock;
5089 }
5090
5091 dev_priv->drrs.busy_frontbuffer_bits = 0;
5092
5093 dev_priv->drrs.dp = intel_dp;
5094
5095 unlock:
5096 mutex_unlock(&dev_priv->drrs.mutex);
5097 }
5098
5099 /**
5100 * intel_edp_drrs_disable - Disable DRRS
5101 * @intel_dp: DP struct
5102 *
5103 */
5104 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5105 {
5106 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5107 struct drm_i915_private *dev_priv = dev->dev_private;
5108 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5109 struct drm_crtc *crtc = dig_port->base.base.crtc;
5110 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5111
5112 if (!intel_crtc->config->has_drrs)
5113 return;
5114
5115 mutex_lock(&dev_priv->drrs.mutex);
5116 if (!dev_priv->drrs.dp) {
5117 mutex_unlock(&dev_priv->drrs.mutex);
5118 return;
5119 }
5120
5121 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5122 intel_dp_set_drrs_state(dev_priv->dev,
5123 intel_dp->attached_connector->panel.
5124 fixed_mode->vrefresh);
5125
5126 dev_priv->drrs.dp = NULL;
5127 mutex_unlock(&dev_priv->drrs.mutex);
5128
5129 cancel_delayed_work_sync(&dev_priv->drrs.work);
5130 }
5131
5132 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5133 {
5134 struct drm_i915_private *dev_priv =
5135 container_of(work, typeof(*dev_priv), drrs.work.work);
5136 struct intel_dp *intel_dp;
5137
5138 mutex_lock(&dev_priv->drrs.mutex);
5139
5140 intel_dp = dev_priv->drrs.dp;
5141
5142 if (!intel_dp)
5143 goto unlock;
5144
5145 /*
5146 * The delayed work can race with an invalidate hence we need to
5147 * recheck.
5148 */
5149
5150 if (dev_priv->drrs.busy_frontbuffer_bits)
5151 goto unlock;
5152
5153 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5154 intel_dp_set_drrs_state(dev_priv->dev,
5155 intel_dp->attached_connector->panel.
5156 downclock_mode->vrefresh);
5157
5158 unlock:
5159
5160 mutex_unlock(&dev_priv->drrs.mutex);
5161 }
5162
5163 /**
5164 * intel_edp_drrs_invalidate - Invalidate DRRS
5165 * @dev: DRM device
5166 * @frontbuffer_bits: frontbuffer plane tracking bits
5167 *
5168 * When there is a disturbance on screen (due to cursor movement/time
5169 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5170 * high RR.
5171 *
5172 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5173 */
5174 void intel_edp_drrs_invalidate(struct drm_device *dev,
5175 unsigned frontbuffer_bits)
5176 {
5177 struct drm_i915_private *dev_priv = dev->dev_private;
5178 struct drm_crtc *crtc;
5179 enum pipe pipe;
5180
5181 if (!dev_priv->drrs.dp)
5182 return;
5183
5184 cancel_delayed_work_sync(&dev_priv->drrs.work);
5185
5186 mutex_lock(&dev_priv->drrs.mutex);
5187 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5188 pipe = to_intel_crtc(crtc)->pipe;
5189
5190 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5191 intel_dp_set_drrs_state(dev_priv->dev,
5192 dev_priv->drrs.dp->attached_connector->panel.
5193 fixed_mode->vrefresh);
5194 }
5195
5196 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5197
5198 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5199 mutex_unlock(&dev_priv->drrs.mutex);
5200 }
5201
5202 /**
5203 * intel_edp_drrs_flush - Flush DRRS
5204 * @dev: DRM device
5205 * @frontbuffer_bits: frontbuffer plane tracking bits
5206 *
5207 * When there is no movement on screen, DRRS work can be scheduled.
5208 * This DRRS work is responsible for setting relevant registers after a
5209 * timeout of 1 second.
5210 *
5211 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5212 */
5213 void intel_edp_drrs_flush(struct drm_device *dev,
5214 unsigned frontbuffer_bits)
5215 {
5216 struct drm_i915_private *dev_priv = dev->dev_private;
5217 struct drm_crtc *crtc;
5218 enum pipe pipe;
5219
5220 if (!dev_priv->drrs.dp)
5221 return;
5222
5223 cancel_delayed_work_sync(&dev_priv->drrs.work);
5224
5225 mutex_lock(&dev_priv->drrs.mutex);
5226 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5227 pipe = to_intel_crtc(crtc)->pipe;
5228 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5229
5230 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5231 !dev_priv->drrs.busy_frontbuffer_bits)
5232 schedule_delayed_work(&dev_priv->drrs.work,
5233 msecs_to_jiffies(1000));
5234 mutex_unlock(&dev_priv->drrs.mutex);
5235 }
5236
5237 /**
5238 * DOC: Display Refresh Rate Switching (DRRS)
5239 *
5240 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5241 * which enables swtching between low and high refresh rates,
5242 * dynamically, based on the usage scenario. This feature is applicable
5243 * for internal panels.
5244 *
5245 * Indication that the panel supports DRRS is given by the panel EDID, which
5246 * would list multiple refresh rates for one resolution.
5247 *
5248 * DRRS is of 2 types - static and seamless.
5249 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5250 * (may appear as a blink on screen) and is used in dock-undock scenario.
5251 * Seamless DRRS involves changing RR without any visual effect to the user
5252 * and can be used during normal system usage. This is done by programming
5253 * certain registers.
5254 *
5255 * Support for static/seamless DRRS may be indicated in the VBT based on
5256 * inputs from the panel spec.
5257 *
5258 * DRRS saves power by switching to low RR based on usage scenarios.
5259 *
5260 * eDP DRRS:-
5261 * The implementation is based on frontbuffer tracking implementation.
5262 * When there is a disturbance on the screen triggered by user activity or a
5263 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5264 * When there is no movement on screen, after a timeout of 1 second, a switch
5265 * to low RR is made.
5266 * For integration with frontbuffer tracking code,
5267 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5268 *
5269 * DRRS can be further extended to support other internal panels and also
5270 * the scenario of video playback wherein RR is set based on the rate
5271 * requested by userspace.
5272 */
5273
5274 /**
5275 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5276 * @intel_connector: eDP connector
5277 * @fixed_mode: preferred mode of panel
5278 *
5279 * This function is called only once at driver load to initialize basic
5280 * DRRS stuff.
5281 *
5282 * Returns:
5283 * Downclock mode if panel supports it, else return NULL.
5284 * DRRS support is determined by the presence of downclock mode (apart
5285 * from VBT setting).
5286 */
5287 static struct drm_display_mode *
5288 intel_dp_drrs_init(struct intel_connector *intel_connector,
5289 struct drm_display_mode *fixed_mode)
5290 {
5291 struct drm_connector *connector = &intel_connector->base;
5292 struct drm_device *dev = connector->dev;
5293 struct drm_i915_private *dev_priv = dev->dev_private;
5294 struct drm_display_mode *downclock_mode = NULL;
5295
5296 if (INTEL_INFO(dev)->gen <= 6) {
5297 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5298 return NULL;
5299 }
5300
5301 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5302 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5303 return NULL;
5304 }
5305
5306 downclock_mode = intel_find_panel_downclock
5307 (dev, fixed_mode, connector);
5308
5309 if (!downclock_mode) {
5310 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5311 return NULL;
5312 }
5313
5314 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5315
5316 mutex_init(&dev_priv->drrs.mutex);
5317
5318 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5319
5320 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5321 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5322 return downclock_mode;
5323 }
5324
5325 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5326 struct intel_connector *intel_connector)
5327 {
5328 struct drm_connector *connector = &intel_connector->base;
5329 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5330 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5331 struct drm_device *dev = intel_encoder->base.dev;
5332 struct drm_i915_private *dev_priv = dev->dev_private;
5333 struct drm_display_mode *fixed_mode = NULL;
5334 struct drm_display_mode *downclock_mode = NULL;
5335 bool has_dpcd;
5336 struct drm_display_mode *scan;
5337 struct edid *edid;
5338 enum pipe pipe = INVALID_PIPE;
5339
5340 if (!is_edp(intel_dp))
5341 return true;
5342
5343 pps_lock(intel_dp);
5344 intel_edp_panel_vdd_sanitize(intel_dp);
5345 pps_unlock(intel_dp);
5346
5347 /* Cache DPCD and EDID for edp. */
5348 has_dpcd = intel_dp_get_dpcd(intel_dp);
5349
5350 if (has_dpcd) {
5351 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5352 dev_priv->no_aux_handshake =
5353 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5354 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5355 } else {
5356 /* if this fails, presume the device is a ghost */
5357 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5358 return false;
5359 }
5360
5361 /* We now know it's not a ghost, init power sequence regs. */
5362 pps_lock(intel_dp);
5363 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5364 pps_unlock(intel_dp);
5365
5366 mutex_lock(&dev->mode_config.mutex);
5367 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5368 if (edid) {
5369 if (drm_add_edid_modes(connector, edid)) {
5370 drm_mode_connector_update_edid_property(connector,
5371 edid);
5372 drm_edid_to_eld(connector, edid);
5373 } else {
5374 kfree(edid);
5375 edid = ERR_PTR(-EINVAL);
5376 }
5377 } else {
5378 edid = ERR_PTR(-ENOENT);
5379 }
5380 intel_connector->edid = edid;
5381
5382 /* prefer fixed mode from EDID if available */
5383 list_for_each_entry(scan, &connector->probed_modes, head) {
5384 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5385 fixed_mode = drm_mode_duplicate(dev, scan);
5386 downclock_mode = intel_dp_drrs_init(
5387 intel_connector, fixed_mode);
5388 break;
5389 }
5390 }
5391
5392 /* fallback to VBT if available for eDP */
5393 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5394 fixed_mode = drm_mode_duplicate(dev,
5395 dev_priv->vbt.lfp_lvds_vbt_mode);
5396 if (fixed_mode)
5397 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5398 }
5399 mutex_unlock(&dev->mode_config.mutex);
5400
5401 if (IS_VALLEYVIEW(dev)) {
5402 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5403 register_reboot_notifier(&intel_dp->edp_notifier);
5404
5405 /*
5406 * Figure out the current pipe for the initial backlight setup.
5407 * If the current pipe isn't valid, try the PPS pipe, and if that
5408 * fails just assume pipe A.
5409 */
5410 if (IS_CHERRYVIEW(dev))
5411 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5412 else
5413 pipe = PORT_TO_PIPE(intel_dp->DP);
5414
5415 if (pipe != PIPE_A && pipe != PIPE_B)
5416 pipe = intel_dp->pps_pipe;
5417
5418 if (pipe != PIPE_A && pipe != PIPE_B)
5419 pipe = PIPE_A;
5420
5421 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5422 pipe_name(pipe));
5423 }
5424
5425 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5426 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5427 intel_panel_setup_backlight(connector, pipe);
5428
5429 return true;
5430 }
5431
5432 bool
5433 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5434 struct intel_connector *intel_connector)
5435 {
5436 struct drm_connector *connector = &intel_connector->base;
5437 struct intel_dp *intel_dp = &intel_dig_port->dp;
5438 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5439 struct drm_device *dev = intel_encoder->base.dev;
5440 struct drm_i915_private *dev_priv = dev->dev_private;
5441 enum port port = intel_dig_port->port;
5442 int type;
5443
5444 intel_dp->pps_pipe = INVALID_PIPE;
5445
5446 /* intel_dp vfuncs */
5447 if (INTEL_INFO(dev)->gen >= 9)
5448 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5449 else if (IS_VALLEYVIEW(dev))
5450 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5451 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5452 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5453 else if (HAS_PCH_SPLIT(dev))
5454 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5455 else
5456 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5457
5458 if (INTEL_INFO(dev)->gen >= 9)
5459 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5460 else
5461 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5462
5463 /* Preserve the current hw state. */
5464 intel_dp->DP = I915_READ(intel_dp->output_reg);
5465 intel_dp->attached_connector = intel_connector;
5466
5467 if (intel_dp_is_edp(dev, port))
5468 type = DRM_MODE_CONNECTOR_eDP;
5469 else
5470 type = DRM_MODE_CONNECTOR_DisplayPort;
5471
5472 /*
5473 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5474 * for DP the encoder type can be set by the caller to
5475 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5476 */
5477 if (type == DRM_MODE_CONNECTOR_eDP)
5478 intel_encoder->type = INTEL_OUTPUT_EDP;
5479
5480 /* eDP only on port B and/or C on vlv/chv */
5481 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5482 port != PORT_B && port != PORT_C))
5483 return false;
5484
5485 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5486 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5487 port_name(port));
5488
5489 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5490 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5491
5492 connector->interlace_allowed = true;
5493 connector->doublescan_allowed = 0;
5494
5495 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5496 edp_panel_vdd_work);
5497
5498 intel_connector_attach_encoder(intel_connector, intel_encoder);
5499 drm_connector_register(connector);
5500
5501 if (HAS_DDI(dev))
5502 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5503 else
5504 intel_connector->get_hw_state = intel_connector_get_hw_state;
5505 intel_connector->unregister = intel_dp_connector_unregister;
5506
5507 /* Set up the hotplug pin. */
5508 switch (port) {
5509 case PORT_A:
5510 intel_encoder->hpd_pin = HPD_PORT_A;
5511 break;
5512 case PORT_B:
5513 intel_encoder->hpd_pin = HPD_PORT_B;
5514 break;
5515 case PORT_C:
5516 intel_encoder->hpd_pin = HPD_PORT_C;
5517 break;
5518 case PORT_D:
5519 intel_encoder->hpd_pin = HPD_PORT_D;
5520 break;
5521 default:
5522 BUG();
5523 }
5524
5525 if (is_edp(intel_dp)) {
5526 pps_lock(intel_dp);
5527 intel_dp_init_panel_power_timestamps(intel_dp);
5528 if (IS_VALLEYVIEW(dev))
5529 vlv_initial_power_sequencer_setup(intel_dp);
5530 else
5531 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5532 pps_unlock(intel_dp);
5533 }
5534
5535 intel_dp_aux_init(intel_dp, intel_connector);
5536
5537 /* init MST on ports that can support it */
5538 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5539 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5540 intel_dp_mst_encoder_init(intel_dig_port,
5541 intel_connector->base.base.id);
5542 }
5543 }
5544
5545 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5546 drm_dp_aux_unregister(&intel_dp->aux);
5547 if (is_edp(intel_dp)) {
5548 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5549 /*
5550 * vdd might still be enabled do to the delayed vdd off.
5551 * Make sure vdd is actually turned off here.
5552 */
5553 pps_lock(intel_dp);
5554 edp_panel_vdd_off_sync(intel_dp);
5555 pps_unlock(intel_dp);
5556 }
5557 drm_connector_unregister(connector);
5558 drm_connector_cleanup(connector);
5559 return false;
5560 }
5561
5562 intel_dp_add_properties(intel_dp, connector);
5563
5564 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5565 * 0xd. Failure to do so will result in spurious interrupts being
5566 * generated on the port when a cable is not attached.
5567 */
5568 if (IS_G4X(dev) && !IS_GM45(dev)) {
5569 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5570 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5571 }
5572
5573 return true;
5574 }
5575
5576 void
5577 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5578 {
5579 struct drm_i915_private *dev_priv = dev->dev_private;
5580 struct intel_digital_port *intel_dig_port;
5581 struct intel_encoder *intel_encoder;
5582 struct drm_encoder *encoder;
5583 struct intel_connector *intel_connector;
5584
5585 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5586 if (!intel_dig_port)
5587 return;
5588
5589 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
5590 if (!intel_connector) {
5591 kfree(intel_dig_port);
5592 return;
5593 }
5594
5595 intel_encoder = &intel_dig_port->base;
5596 encoder = &intel_encoder->base;
5597
5598 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5599 DRM_MODE_ENCODER_TMDS);
5600
5601 intel_encoder->compute_config = intel_dp_compute_config;
5602 intel_encoder->disable = intel_disable_dp;
5603 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5604 intel_encoder->get_config = intel_dp_get_config;
5605 intel_encoder->suspend = intel_dp_encoder_suspend;
5606 if (IS_CHERRYVIEW(dev)) {
5607 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5608 intel_encoder->pre_enable = chv_pre_enable_dp;
5609 intel_encoder->enable = vlv_enable_dp;
5610 intel_encoder->post_disable = chv_post_disable_dp;
5611 } else if (IS_VALLEYVIEW(dev)) {
5612 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5613 intel_encoder->pre_enable = vlv_pre_enable_dp;
5614 intel_encoder->enable = vlv_enable_dp;
5615 intel_encoder->post_disable = vlv_post_disable_dp;
5616 } else {
5617 intel_encoder->pre_enable = g4x_pre_enable_dp;
5618 intel_encoder->enable = g4x_enable_dp;
5619 if (INTEL_INFO(dev)->gen >= 5)
5620 intel_encoder->post_disable = ilk_post_disable_dp;
5621 }
5622
5623 intel_dig_port->port = port;
5624 intel_dig_port->dp.output_reg = output_reg;
5625
5626 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5627 if (IS_CHERRYVIEW(dev)) {
5628 if (port == PORT_D)
5629 intel_encoder->crtc_mask = 1 << 2;
5630 else
5631 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5632 } else {
5633 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5634 }
5635 intel_encoder->cloneable = 0;
5636 intel_encoder->hot_plug = intel_dp_hot_plug;
5637
5638 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5639 dev_priv->hpd_irq_port[port] = intel_dig_port;
5640
5641 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5642 drm_encoder_cleanup(encoder);
5643 kfree(intel_dig_port);
5644 kfree(intel_connector);
5645 }
5646 }
5647
5648 void intel_dp_mst_suspend(struct drm_device *dev)
5649 {
5650 struct drm_i915_private *dev_priv = dev->dev_private;
5651 int i;
5652
5653 /* disable MST */
5654 for (i = 0; i < I915_MAX_PORTS; i++) {
5655 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5656 if (!intel_dig_port)
5657 continue;
5658
5659 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5660 if (!intel_dig_port->dp.can_mst)
5661 continue;
5662 if (intel_dig_port->dp.is_mst)
5663 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5664 }
5665 }
5666 }
5667
5668 void intel_dp_mst_resume(struct drm_device *dev)
5669 {
5670 struct drm_i915_private *dev_priv = dev->dev_private;
5671 int i;
5672
5673 for (i = 0; i < I915_MAX_PORTS; i++) {
5674 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5675 if (!intel_dig_port)
5676 continue;
5677 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5678 int ret;
5679
5680 if (!intel_dig_port->dp.can_mst)
5681 continue;
5682
5683 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5684 if (ret != 0) {
5685 intel_dp_check_mst_status(&intel_dig_port->dp);
5686 }
5687 }
5688 }
5689 }
This page took 0.16817 seconds and 5 git commands to generate.