Merge remote-tracking branch 'drm-upstream/drm-next' into drm-intel-next-queued
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51 int link_bw;
52 struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56 { DP_LINK_BW_1_62,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { DP_LINK_BW_2_7,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63 { DP_LINK_BW_1_62,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { DP_LINK_BW_2_7,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70 { DP_LINK_BW_1_62,
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72 { DP_LINK_BW_2_7,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80 static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
99 243000, 270000, 324000, 405000,
100 420000, 432000, 540000 };
101 static const int default_rates[] = { 162000, 270000, 540000 };
102
103 /**
104 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
105 * @intel_dp: DP struct
106 *
107 * If a CPU or PCH DP output is attached to an eDP panel, this function
108 * will return true, and false otherwise.
109 */
110 static bool is_edp(struct intel_dp *intel_dp)
111 {
112 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113
114 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
115 }
116
117 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
118 {
119 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
120
121 return intel_dig_port->base.base.dev;
122 }
123
124 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
125 {
126 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
127 }
128
129 static void intel_dp_link_down(struct intel_dp *intel_dp);
130 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
131 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
132 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
133 static void vlv_steal_power_sequencer(struct drm_device *dev,
134 enum pipe pipe);
135
136 static int
137 intel_dp_max_link_bw(struct intel_dp *intel_dp)
138 {
139 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
140
141 switch (max_link_bw) {
142 case DP_LINK_BW_1_62:
143 case DP_LINK_BW_2_7:
144 case DP_LINK_BW_5_4:
145 break;
146 default:
147 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
148 max_link_bw);
149 max_link_bw = DP_LINK_BW_1_62;
150 break;
151 }
152 return max_link_bw;
153 }
154
155 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
156 {
157 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
158 struct drm_device *dev = intel_dig_port->base.base.dev;
159 u8 source_max, sink_max;
160
161 source_max = 4;
162 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
163 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
164 source_max = 2;
165
166 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
167
168 return min(source_max, sink_max);
169 }
170
171 /*
172 * The units on the numbers in the next two are... bizarre. Examples will
173 * make it clearer; this one parallels an example in the eDP spec.
174 *
175 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
176 *
177 * 270000 * 1 * 8 / 10 == 216000
178 *
179 * The actual data capacity of that configuration is 2.16Gbit/s, so the
180 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
181 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
182 * 119000. At 18bpp that's 2142000 kilobits per second.
183 *
184 * Thus the strange-looking division by 10 in intel_dp_link_required, to
185 * get the result in decakilobits instead of kilobits.
186 */
187
188 static int
189 intel_dp_link_required(int pixel_clock, int bpp)
190 {
191 return (pixel_clock * bpp + 9) / 10;
192 }
193
194 static int
195 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
196 {
197 return (max_link_clock * max_lanes * 8) / 10;
198 }
199
200 static enum drm_mode_status
201 intel_dp_mode_valid(struct drm_connector *connector,
202 struct drm_display_mode *mode)
203 {
204 struct intel_dp *intel_dp = intel_attached_dp(connector);
205 struct intel_connector *intel_connector = to_intel_connector(connector);
206 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
207 int target_clock = mode->clock;
208 int max_rate, mode_rate, max_lanes, max_link_clock;
209
210 if (is_edp(intel_dp) && fixed_mode) {
211 if (mode->hdisplay > fixed_mode->hdisplay)
212 return MODE_PANEL;
213
214 if (mode->vdisplay > fixed_mode->vdisplay)
215 return MODE_PANEL;
216
217 target_clock = fixed_mode->clock;
218 }
219
220 max_link_clock = intel_dp_max_link_rate(intel_dp);
221 max_lanes = intel_dp_max_lane_count(intel_dp);
222
223 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
224 mode_rate = intel_dp_link_required(target_clock, 18);
225
226 if (mode_rate > max_rate)
227 return MODE_CLOCK_HIGH;
228
229 if (mode->clock < 10000)
230 return MODE_CLOCK_LOW;
231
232 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
233 return MODE_H_ILLEGAL;
234
235 return MODE_OK;
236 }
237
238 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
239 {
240 int i;
241 uint32_t v = 0;
242
243 if (src_bytes > 4)
244 src_bytes = 4;
245 for (i = 0; i < src_bytes; i++)
246 v |= ((uint32_t) src[i]) << ((3-i) * 8);
247 return v;
248 }
249
250 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
251 {
252 int i;
253 if (dst_bytes > 4)
254 dst_bytes = 4;
255 for (i = 0; i < dst_bytes; i++)
256 dst[i] = src >> ((3-i) * 8);
257 }
258
259 /* hrawclock is 1/4 the FSB frequency */
260 static int
261 intel_hrawclk(struct drm_device *dev)
262 {
263 struct drm_i915_private *dev_priv = dev->dev_private;
264 uint32_t clkcfg;
265
266 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
267 if (IS_VALLEYVIEW(dev))
268 return 200;
269
270 clkcfg = I915_READ(CLKCFG);
271 switch (clkcfg & CLKCFG_FSB_MASK) {
272 case CLKCFG_FSB_400:
273 return 100;
274 case CLKCFG_FSB_533:
275 return 133;
276 case CLKCFG_FSB_667:
277 return 166;
278 case CLKCFG_FSB_800:
279 return 200;
280 case CLKCFG_FSB_1067:
281 return 266;
282 case CLKCFG_FSB_1333:
283 return 333;
284 /* these two are just a guess; one of them might be right */
285 case CLKCFG_FSB_1600:
286 case CLKCFG_FSB_1600_ALT:
287 return 400;
288 default:
289 return 133;
290 }
291 }
292
293 static void
294 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
295 struct intel_dp *intel_dp);
296 static void
297 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
298 struct intel_dp *intel_dp);
299
300 static void pps_lock(struct intel_dp *intel_dp)
301 {
302 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
303 struct intel_encoder *encoder = &intel_dig_port->base;
304 struct drm_device *dev = encoder->base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum intel_display_power_domain power_domain;
307
308 /*
309 * See vlv_power_sequencer_reset() why we need
310 * a power domain reference here.
311 */
312 power_domain = intel_display_port_power_domain(encoder);
313 intel_display_power_get(dev_priv, power_domain);
314
315 mutex_lock(&dev_priv->pps_mutex);
316 }
317
318 static void pps_unlock(struct intel_dp *intel_dp)
319 {
320 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
321 struct intel_encoder *encoder = &intel_dig_port->base;
322 struct drm_device *dev = encoder->base.dev;
323 struct drm_i915_private *dev_priv = dev->dev_private;
324 enum intel_display_power_domain power_domain;
325
326 mutex_unlock(&dev_priv->pps_mutex);
327
328 power_domain = intel_display_port_power_domain(encoder);
329 intel_display_power_put(dev_priv, power_domain);
330 }
331
332 static void
333 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
334 {
335 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
336 struct drm_device *dev = intel_dig_port->base.base.dev;
337 struct drm_i915_private *dev_priv = dev->dev_private;
338 enum pipe pipe = intel_dp->pps_pipe;
339 bool pll_enabled;
340 uint32_t DP;
341
342 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
343 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
344 pipe_name(pipe), port_name(intel_dig_port->port)))
345 return;
346
347 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
348 pipe_name(pipe), port_name(intel_dig_port->port));
349
350 /* Preserve the BIOS-computed detected bit. This is
351 * supposed to be read-only.
352 */
353 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
354 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
355 DP |= DP_PORT_WIDTH(1);
356 DP |= DP_LINK_TRAIN_PAT_1;
357
358 if (IS_CHERRYVIEW(dev))
359 DP |= DP_PIPE_SELECT_CHV(pipe);
360 else if (pipe == PIPE_B)
361 DP |= DP_PIPEB_SELECT;
362
363 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
364
365 /*
366 * The DPLL for the pipe must be enabled for this to work.
367 * So enable temporarily it if it's not already enabled.
368 */
369 if (!pll_enabled)
370 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
371 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
372
373 /*
374 * Similar magic as in intel_dp_enable_port().
375 * We _must_ do this port enable + disable trick
376 * to make this power seqeuencer lock onto the port.
377 * Otherwise even VDD force bit won't work.
378 */
379 I915_WRITE(intel_dp->output_reg, DP);
380 POSTING_READ(intel_dp->output_reg);
381
382 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
384
385 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
386 POSTING_READ(intel_dp->output_reg);
387
388 if (!pll_enabled)
389 vlv_force_pll_off(dev, pipe);
390 }
391
392 static enum pipe
393 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
394 {
395 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
396 struct drm_device *dev = intel_dig_port->base.base.dev;
397 struct drm_i915_private *dev_priv = dev->dev_private;
398 struct intel_encoder *encoder;
399 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
400 enum pipe pipe;
401
402 lockdep_assert_held(&dev_priv->pps_mutex);
403
404 /* We should never land here with regular DP ports */
405 WARN_ON(!is_edp(intel_dp));
406
407 if (intel_dp->pps_pipe != INVALID_PIPE)
408 return intel_dp->pps_pipe;
409
410 /*
411 * We don't have power sequencer currently.
412 * Pick one that's not used by other ports.
413 */
414 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
415 base.head) {
416 struct intel_dp *tmp;
417
418 if (encoder->type != INTEL_OUTPUT_EDP)
419 continue;
420
421 tmp = enc_to_intel_dp(&encoder->base);
422
423 if (tmp->pps_pipe != INVALID_PIPE)
424 pipes &= ~(1 << tmp->pps_pipe);
425 }
426
427 /*
428 * Didn't find one. This should not happen since there
429 * are two power sequencers and up to two eDP ports.
430 */
431 if (WARN_ON(pipes == 0))
432 pipe = PIPE_A;
433 else
434 pipe = ffs(pipes) - 1;
435
436 vlv_steal_power_sequencer(dev, pipe);
437 intel_dp->pps_pipe = pipe;
438
439 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
440 pipe_name(intel_dp->pps_pipe),
441 port_name(intel_dig_port->port));
442
443 /* init power sequencer on this pipe and port */
444 intel_dp_init_panel_power_sequencer(dev, intel_dp);
445 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
446
447 /*
448 * Even vdd force doesn't work until we've made
449 * the power sequencer lock in on the port.
450 */
451 vlv_power_sequencer_kick(intel_dp);
452
453 return intel_dp->pps_pipe;
454 }
455
456 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
457 enum pipe pipe);
458
459 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
460 enum pipe pipe)
461 {
462 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
463 }
464
465 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
466 enum pipe pipe)
467 {
468 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
469 }
470
471 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
472 enum pipe pipe)
473 {
474 return true;
475 }
476
477 static enum pipe
478 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
479 enum port port,
480 vlv_pipe_check pipe_check)
481 {
482 enum pipe pipe;
483
484 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
485 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
486 PANEL_PORT_SELECT_MASK;
487
488 if (port_sel != PANEL_PORT_SELECT_VLV(port))
489 continue;
490
491 if (!pipe_check(dev_priv, pipe))
492 continue;
493
494 return pipe;
495 }
496
497 return INVALID_PIPE;
498 }
499
500 static void
501 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
502 {
503 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
504 struct drm_device *dev = intel_dig_port->base.base.dev;
505 struct drm_i915_private *dev_priv = dev->dev_private;
506 enum port port = intel_dig_port->port;
507
508 lockdep_assert_held(&dev_priv->pps_mutex);
509
510 /* try to find a pipe with this port selected */
511 /* first pick one where the panel is on */
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_has_pp_on);
514 /* didn't find one? pick one where vdd is on */
515 if (intel_dp->pps_pipe == INVALID_PIPE)
516 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
517 vlv_pipe_has_vdd_on);
518 /* didn't find one? pick one with just the correct port */
519 if (intel_dp->pps_pipe == INVALID_PIPE)
520 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
521 vlv_pipe_any);
522
523 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
524 if (intel_dp->pps_pipe == INVALID_PIPE) {
525 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
526 port_name(port));
527 return;
528 }
529
530 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
531 port_name(port), pipe_name(intel_dp->pps_pipe));
532
533 intel_dp_init_panel_power_sequencer(dev, intel_dp);
534 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
535 }
536
537 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
538 {
539 struct drm_device *dev = dev_priv->dev;
540 struct intel_encoder *encoder;
541
542 if (WARN_ON(!IS_VALLEYVIEW(dev)))
543 return;
544
545 /*
546 * We can't grab pps_mutex here due to deadlock with power_domain
547 * mutex when power_domain functions are called while holding pps_mutex.
548 * That also means that in order to use pps_pipe the code needs to
549 * hold both a power domain reference and pps_mutex, and the power domain
550 * reference get/put must be done while _not_ holding pps_mutex.
551 * pps_{lock,unlock}() do these steps in the correct order, so one
552 * should use them always.
553 */
554
555 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
556 struct intel_dp *intel_dp;
557
558 if (encoder->type != INTEL_OUTPUT_EDP)
559 continue;
560
561 intel_dp = enc_to_intel_dp(&encoder->base);
562 intel_dp->pps_pipe = INVALID_PIPE;
563 }
564 }
565
566 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
567 {
568 struct drm_device *dev = intel_dp_to_dev(intel_dp);
569
570 if (HAS_PCH_SPLIT(dev))
571 return PCH_PP_CONTROL;
572 else
573 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
574 }
575
576 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
577 {
578 struct drm_device *dev = intel_dp_to_dev(intel_dp);
579
580 if (HAS_PCH_SPLIT(dev))
581 return PCH_PP_STATUS;
582 else
583 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
584 }
585
586 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
587 This function only applicable when panel PM state is not to be tracked */
588 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
589 void *unused)
590 {
591 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
592 edp_notifier);
593 struct drm_device *dev = intel_dp_to_dev(intel_dp);
594 struct drm_i915_private *dev_priv = dev->dev_private;
595 u32 pp_div;
596 u32 pp_ctrl_reg, pp_div_reg;
597
598 if (!is_edp(intel_dp) || code != SYS_RESTART)
599 return 0;
600
601 pps_lock(intel_dp);
602
603 if (IS_VALLEYVIEW(dev)) {
604 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
605
606 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
607 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
608 pp_div = I915_READ(pp_div_reg);
609 pp_div &= PP_REFERENCE_DIVIDER_MASK;
610
611 /* 0x1F write to PP_DIV_REG sets max cycle delay */
612 I915_WRITE(pp_div_reg, pp_div | 0x1F);
613 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
614 msleep(intel_dp->panel_power_cycle_delay);
615 }
616
617 pps_unlock(intel_dp);
618
619 return 0;
620 }
621
622 static bool edp_have_panel_power(struct intel_dp *intel_dp)
623 {
624 struct drm_device *dev = intel_dp_to_dev(intel_dp);
625 struct drm_i915_private *dev_priv = dev->dev_private;
626
627 lockdep_assert_held(&dev_priv->pps_mutex);
628
629 if (IS_VALLEYVIEW(dev) &&
630 intel_dp->pps_pipe == INVALID_PIPE)
631 return false;
632
633 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
634 }
635
636 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
637 {
638 struct drm_device *dev = intel_dp_to_dev(intel_dp);
639 struct drm_i915_private *dev_priv = dev->dev_private;
640
641 lockdep_assert_held(&dev_priv->pps_mutex);
642
643 if (IS_VALLEYVIEW(dev) &&
644 intel_dp->pps_pipe == INVALID_PIPE)
645 return false;
646
647 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
648 }
649
650 static void
651 intel_dp_check_edp(struct intel_dp *intel_dp)
652 {
653 struct drm_device *dev = intel_dp_to_dev(intel_dp);
654 struct drm_i915_private *dev_priv = dev->dev_private;
655
656 if (!is_edp(intel_dp))
657 return;
658
659 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
660 WARN(1, "eDP powered off while attempting aux channel communication.\n");
661 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
662 I915_READ(_pp_stat_reg(intel_dp)),
663 I915_READ(_pp_ctrl_reg(intel_dp)));
664 }
665 }
666
667 static uint32_t
668 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
669 {
670 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
671 struct drm_device *dev = intel_dig_port->base.base.dev;
672 struct drm_i915_private *dev_priv = dev->dev_private;
673 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
674 uint32_t status;
675 bool done;
676
677 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
678 if (has_aux_irq)
679 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
680 msecs_to_jiffies_timeout(10));
681 else
682 done = wait_for_atomic(C, 10) == 0;
683 if (!done)
684 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
685 has_aux_irq);
686 #undef C
687
688 return status;
689 }
690
691 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
692 {
693 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
694 struct drm_device *dev = intel_dig_port->base.base.dev;
695
696 /*
697 * The clock divider is based off the hrawclk, and would like to run at
698 * 2MHz. So, take the hrawclk value and divide by 2 and use that
699 */
700 return index ? 0 : intel_hrawclk(dev) / 2;
701 }
702
703 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
704 {
705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
706 struct drm_device *dev = intel_dig_port->base.base.dev;
707 struct drm_i915_private *dev_priv = dev->dev_private;
708
709 if (index)
710 return 0;
711
712 if (intel_dig_port->port == PORT_A) {
713 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
714 } else {
715 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
716 }
717 }
718
719 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
720 {
721 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
722 struct drm_device *dev = intel_dig_port->base.base.dev;
723 struct drm_i915_private *dev_priv = dev->dev_private;
724
725 if (intel_dig_port->port == PORT_A) {
726 if (index)
727 return 0;
728 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
729 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
730 /* Workaround for non-ULT HSW */
731 switch (index) {
732 case 0: return 63;
733 case 1: return 72;
734 default: return 0;
735 }
736 } else {
737 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
738 }
739 }
740
741 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
742 {
743 return index ? 0 : 100;
744 }
745
746 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
747 {
748 /*
749 * SKL doesn't need us to program the AUX clock divider (Hardware will
750 * derive the clock from CDCLK automatically). We still implement the
751 * get_aux_clock_divider vfunc to plug-in into the existing code.
752 */
753 return index ? 0 : 1;
754 }
755
756 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
757 bool has_aux_irq,
758 int send_bytes,
759 uint32_t aux_clock_divider)
760 {
761 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
762 struct drm_device *dev = intel_dig_port->base.base.dev;
763 uint32_t precharge, timeout;
764
765 if (IS_GEN6(dev))
766 precharge = 3;
767 else
768 precharge = 5;
769
770 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
771 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
772 else
773 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
774
775 return DP_AUX_CH_CTL_SEND_BUSY |
776 DP_AUX_CH_CTL_DONE |
777 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
778 DP_AUX_CH_CTL_TIME_OUT_ERROR |
779 timeout |
780 DP_AUX_CH_CTL_RECEIVE_ERROR |
781 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
782 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
783 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
784 }
785
786 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
787 bool has_aux_irq,
788 int send_bytes,
789 uint32_t unused)
790 {
791 return DP_AUX_CH_CTL_SEND_BUSY |
792 DP_AUX_CH_CTL_DONE |
793 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
794 DP_AUX_CH_CTL_TIME_OUT_ERROR |
795 DP_AUX_CH_CTL_TIME_OUT_1600us |
796 DP_AUX_CH_CTL_RECEIVE_ERROR |
797 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
798 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
799 }
800
801 static int
802 intel_dp_aux_ch(struct intel_dp *intel_dp,
803 const uint8_t *send, int send_bytes,
804 uint8_t *recv, int recv_size)
805 {
806 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
807 struct drm_device *dev = intel_dig_port->base.base.dev;
808 struct drm_i915_private *dev_priv = dev->dev_private;
809 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
810 uint32_t ch_data = ch_ctl + 4;
811 uint32_t aux_clock_divider;
812 int i, ret, recv_bytes;
813 uint32_t status;
814 int try, clock = 0;
815 bool has_aux_irq = HAS_AUX_IRQ(dev);
816 bool vdd;
817
818 pps_lock(intel_dp);
819
820 /*
821 * We will be called with VDD already enabled for dpcd/edid/oui reads.
822 * In such cases we want to leave VDD enabled and it's up to upper layers
823 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
824 * ourselves.
825 */
826 vdd = edp_panel_vdd_on(intel_dp);
827
828 /* dp aux is extremely sensitive to irq latency, hence request the
829 * lowest possible wakeup latency and so prevent the cpu from going into
830 * deep sleep states.
831 */
832 pm_qos_update_request(&dev_priv->pm_qos, 0);
833
834 intel_dp_check_edp(intel_dp);
835
836 intel_aux_display_runtime_get(dev_priv);
837
838 /* Try to wait for any previous AUX channel activity */
839 for (try = 0; try < 3; try++) {
840 status = I915_READ_NOTRACE(ch_ctl);
841 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
842 break;
843 msleep(1);
844 }
845
846 if (try == 3) {
847 WARN(1, "dp_aux_ch not started status 0x%08x\n",
848 I915_READ(ch_ctl));
849 ret = -EBUSY;
850 goto out;
851 }
852
853 /* Only 5 data registers! */
854 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
855 ret = -E2BIG;
856 goto out;
857 }
858
859 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
860 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
861 has_aux_irq,
862 send_bytes,
863 aux_clock_divider);
864
865 /* Must try at least 3 times according to DP spec */
866 for (try = 0; try < 5; try++) {
867 /* Load the send data into the aux channel data registers */
868 for (i = 0; i < send_bytes; i += 4)
869 I915_WRITE(ch_data + i,
870 intel_dp_pack_aux(send + i,
871 send_bytes - i));
872
873 /* Send the command and wait for it to complete */
874 I915_WRITE(ch_ctl, send_ctl);
875
876 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
877
878 /* Clear done status and any errors */
879 I915_WRITE(ch_ctl,
880 status |
881 DP_AUX_CH_CTL_DONE |
882 DP_AUX_CH_CTL_TIME_OUT_ERROR |
883 DP_AUX_CH_CTL_RECEIVE_ERROR);
884
885 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
886 continue;
887
888 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
889 * 400us delay required for errors and timeouts
890 * Timeout errors from the HW already meet this
891 * requirement so skip to next iteration
892 */
893 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
894 usleep_range(400, 500);
895 continue;
896 }
897 if (status & DP_AUX_CH_CTL_DONE)
898 break;
899 }
900 if (status & DP_AUX_CH_CTL_DONE)
901 break;
902 }
903
904 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
905 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
906 ret = -EBUSY;
907 goto out;
908 }
909
910 /* Check for timeout or receive error.
911 * Timeouts occur when the sink is not connected
912 */
913 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
914 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
915 ret = -EIO;
916 goto out;
917 }
918
919 /* Timeouts occur when the device isn't connected, so they're
920 * "normal" -- don't fill the kernel log with these */
921 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
922 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
923 ret = -ETIMEDOUT;
924 goto out;
925 }
926
927 /* Unload any bytes sent back from the other side */
928 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
929 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
930 if (recv_bytes > recv_size)
931 recv_bytes = recv_size;
932
933 for (i = 0; i < recv_bytes; i += 4)
934 intel_dp_unpack_aux(I915_READ(ch_data + i),
935 recv + i, recv_bytes - i);
936
937 ret = recv_bytes;
938 out:
939 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
940 intel_aux_display_runtime_put(dev_priv);
941
942 if (vdd)
943 edp_panel_vdd_off(intel_dp, false);
944
945 pps_unlock(intel_dp);
946
947 return ret;
948 }
949
950 #define BARE_ADDRESS_SIZE 3
951 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
952 static ssize_t
953 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
954 {
955 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
956 uint8_t txbuf[20], rxbuf[20];
957 size_t txsize, rxsize;
958 int ret;
959
960 txbuf[0] = (msg->request << 4) |
961 ((msg->address >> 16) & 0xf);
962 txbuf[1] = (msg->address >> 8) & 0xff;
963 txbuf[2] = msg->address & 0xff;
964 txbuf[3] = msg->size - 1;
965
966 switch (msg->request & ~DP_AUX_I2C_MOT) {
967 case DP_AUX_NATIVE_WRITE:
968 case DP_AUX_I2C_WRITE:
969 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
970 rxsize = 2; /* 0 or 1 data bytes */
971
972 if (WARN_ON(txsize > 20))
973 return -E2BIG;
974
975 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
976
977 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
978 if (ret > 0) {
979 msg->reply = rxbuf[0] >> 4;
980
981 if (ret > 1) {
982 /* Number of bytes written in a short write. */
983 ret = clamp_t(int, rxbuf[1], 0, msg->size);
984 } else {
985 /* Return payload size. */
986 ret = msg->size;
987 }
988 }
989 break;
990
991 case DP_AUX_NATIVE_READ:
992 case DP_AUX_I2C_READ:
993 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
994 rxsize = msg->size + 1;
995
996 if (WARN_ON(rxsize > 20))
997 return -E2BIG;
998
999 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1000 if (ret > 0) {
1001 msg->reply = rxbuf[0] >> 4;
1002 /*
1003 * Assume happy day, and copy the data. The caller is
1004 * expected to check msg->reply before touching it.
1005 *
1006 * Return payload size.
1007 */
1008 ret--;
1009 memcpy(msg->buffer, rxbuf + 1, ret);
1010 }
1011 break;
1012
1013 default:
1014 ret = -EINVAL;
1015 break;
1016 }
1017
1018 return ret;
1019 }
1020
1021 static void
1022 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1023 {
1024 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1025 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1026 enum port port = intel_dig_port->port;
1027 const char *name = NULL;
1028 int ret;
1029
1030 switch (port) {
1031 case PORT_A:
1032 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1033 name = "DPDDC-A";
1034 break;
1035 case PORT_B:
1036 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1037 name = "DPDDC-B";
1038 break;
1039 case PORT_C:
1040 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1041 name = "DPDDC-C";
1042 break;
1043 case PORT_D:
1044 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1045 name = "DPDDC-D";
1046 break;
1047 default:
1048 BUG();
1049 }
1050
1051 /*
1052 * The AUX_CTL register is usually DP_CTL + 0x10.
1053 *
1054 * On Haswell and Broadwell though:
1055 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1056 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1057 *
1058 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1059 */
1060 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1061 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1062
1063 intel_dp->aux.name = name;
1064 intel_dp->aux.dev = dev->dev;
1065 intel_dp->aux.transfer = intel_dp_aux_transfer;
1066
1067 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1068 connector->base.kdev->kobj.name);
1069
1070 ret = drm_dp_aux_register(&intel_dp->aux);
1071 if (ret < 0) {
1072 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1073 name, ret);
1074 return;
1075 }
1076
1077 ret = sysfs_create_link(&connector->base.kdev->kobj,
1078 &intel_dp->aux.ddc.dev.kobj,
1079 intel_dp->aux.ddc.dev.kobj.name);
1080 if (ret < 0) {
1081 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1082 drm_dp_aux_unregister(&intel_dp->aux);
1083 }
1084 }
1085
1086 static void
1087 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1088 {
1089 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1090
1091 if (!intel_connector->mst_port)
1092 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1093 intel_dp->aux.ddc.dev.kobj.name);
1094 intel_connector_unregister(intel_connector);
1095 }
1096
1097 static void
1098 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1099 {
1100 u32 ctrl1;
1101
1102 memset(&pipe_config->dpll_hw_state, 0,
1103 sizeof(pipe_config->dpll_hw_state));
1104
1105 pipe_config->ddi_pll_sel = SKL_DPLL0;
1106 pipe_config->dpll_hw_state.cfgcr1 = 0;
1107 pipe_config->dpll_hw_state.cfgcr2 = 0;
1108
1109 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1110 switch (link_clock / 2) {
1111 case 81000:
1112 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1113 SKL_DPLL0);
1114 break;
1115 case 135000:
1116 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1117 SKL_DPLL0);
1118 break;
1119 case 270000:
1120 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1121 SKL_DPLL0);
1122 break;
1123 case 162000:
1124 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1125 SKL_DPLL0);
1126 break;
1127 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1128 results in CDCLK change. Need to handle the change of CDCLK by
1129 disabling pipes and re-enabling them */
1130 case 108000:
1131 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1132 SKL_DPLL0);
1133 break;
1134 case 216000:
1135 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1136 SKL_DPLL0);
1137 break;
1138
1139 }
1140 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1141 }
1142
1143 static void
1144 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1145 {
1146 switch (link_bw) {
1147 case DP_LINK_BW_1_62:
1148 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1149 break;
1150 case DP_LINK_BW_2_7:
1151 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1152 break;
1153 case DP_LINK_BW_5_4:
1154 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1155 break;
1156 }
1157 }
1158
1159 static int
1160 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1161 {
1162 if (intel_dp->num_sink_rates) {
1163 *sink_rates = intel_dp->sink_rates;
1164 return intel_dp->num_sink_rates;
1165 }
1166
1167 *sink_rates = default_rates;
1168
1169 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1170 }
1171
1172 static int
1173 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1174 {
1175 if (IS_BROXTON(dev)) {
1176 *source_rates = bxt_rates;
1177 return ARRAY_SIZE(bxt_rates);
1178 } else if (IS_SKYLAKE(dev)) {
1179 *source_rates = skl_rates;
1180 return ARRAY_SIZE(skl_rates);
1181 } else if (IS_CHERRYVIEW(dev)) {
1182 *source_rates = chv_rates;
1183 return ARRAY_SIZE(chv_rates);
1184 }
1185
1186 *source_rates = default_rates;
1187
1188 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1189 /* WaDisableHBR2:skl */
1190 return (DP_LINK_BW_2_7 >> 3) + 1;
1191 else if (INTEL_INFO(dev)->gen >= 8 ||
1192 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1193 return (DP_LINK_BW_5_4 >> 3) + 1;
1194 else
1195 return (DP_LINK_BW_2_7 >> 3) + 1;
1196 }
1197
1198 static void
1199 intel_dp_set_clock(struct intel_encoder *encoder,
1200 struct intel_crtc_state *pipe_config, int link_bw)
1201 {
1202 struct drm_device *dev = encoder->base.dev;
1203 const struct dp_link_dpll *divisor = NULL;
1204 int i, count = 0;
1205
1206 if (IS_G4X(dev)) {
1207 divisor = gen4_dpll;
1208 count = ARRAY_SIZE(gen4_dpll);
1209 } else if (HAS_PCH_SPLIT(dev)) {
1210 divisor = pch_dpll;
1211 count = ARRAY_SIZE(pch_dpll);
1212 } else if (IS_CHERRYVIEW(dev)) {
1213 divisor = chv_dpll;
1214 count = ARRAY_SIZE(chv_dpll);
1215 } else if (IS_VALLEYVIEW(dev)) {
1216 divisor = vlv_dpll;
1217 count = ARRAY_SIZE(vlv_dpll);
1218 }
1219
1220 if (divisor && count) {
1221 for (i = 0; i < count; i++) {
1222 if (link_bw == divisor[i].link_bw) {
1223 pipe_config->dpll = divisor[i].dpll;
1224 pipe_config->clock_set = true;
1225 break;
1226 }
1227 }
1228 }
1229 }
1230
1231 static int intersect_rates(const int *source_rates, int source_len,
1232 const int *sink_rates, int sink_len,
1233 int *common_rates)
1234 {
1235 int i = 0, j = 0, k = 0;
1236
1237 while (i < source_len && j < sink_len) {
1238 if (source_rates[i] == sink_rates[j]) {
1239 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1240 return k;
1241 common_rates[k] = source_rates[i];
1242 ++k;
1243 ++i;
1244 ++j;
1245 } else if (source_rates[i] < sink_rates[j]) {
1246 ++i;
1247 } else {
1248 ++j;
1249 }
1250 }
1251 return k;
1252 }
1253
1254 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1255 int *common_rates)
1256 {
1257 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1258 const int *source_rates, *sink_rates;
1259 int source_len, sink_len;
1260
1261 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1262 source_len = intel_dp_source_rates(dev, &source_rates);
1263
1264 return intersect_rates(source_rates, source_len,
1265 sink_rates, sink_len,
1266 common_rates);
1267 }
1268
1269 static void snprintf_int_array(char *str, size_t len,
1270 const int *array, int nelem)
1271 {
1272 int i;
1273
1274 str[0] = '\0';
1275
1276 for (i = 0; i < nelem; i++) {
1277 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1278 if (r >= len)
1279 return;
1280 str += r;
1281 len -= r;
1282 }
1283 }
1284
1285 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1286 {
1287 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1288 const int *source_rates, *sink_rates;
1289 int source_len, sink_len, common_len;
1290 int common_rates[DP_MAX_SUPPORTED_RATES];
1291 char str[128]; /* FIXME: too big for stack? */
1292
1293 if ((drm_debug & DRM_UT_KMS) == 0)
1294 return;
1295
1296 source_len = intel_dp_source_rates(dev, &source_rates);
1297 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1298 DRM_DEBUG_KMS("source rates: %s\n", str);
1299
1300 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1301 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1302 DRM_DEBUG_KMS("sink rates: %s\n", str);
1303
1304 common_len = intel_dp_common_rates(intel_dp, common_rates);
1305 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1306 DRM_DEBUG_KMS("common rates: %s\n", str);
1307 }
1308
1309 static int rate_to_index(int find, const int *rates)
1310 {
1311 int i = 0;
1312
1313 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1314 if (find == rates[i])
1315 break;
1316
1317 return i;
1318 }
1319
1320 int
1321 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1322 {
1323 int rates[DP_MAX_SUPPORTED_RATES] = {};
1324 int len;
1325
1326 len = intel_dp_common_rates(intel_dp, rates);
1327 if (WARN_ON(len <= 0))
1328 return 162000;
1329
1330 return rates[rate_to_index(0, rates) - 1];
1331 }
1332
1333 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1334 {
1335 return rate_to_index(rate, intel_dp->sink_rates);
1336 }
1337
1338 bool
1339 intel_dp_compute_config(struct intel_encoder *encoder,
1340 struct intel_crtc_state *pipe_config)
1341 {
1342 struct drm_device *dev = encoder->base.dev;
1343 struct drm_i915_private *dev_priv = dev->dev_private;
1344 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1345 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1346 enum port port = dp_to_dig_port(intel_dp)->port;
1347 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1348 struct intel_connector *intel_connector = intel_dp->attached_connector;
1349 int lane_count, clock;
1350 int min_lane_count = 1;
1351 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1352 /* Conveniently, the link BW constants become indices with a shift...*/
1353 int min_clock = 0;
1354 int max_clock;
1355 int bpp, mode_rate;
1356 int link_avail, link_clock;
1357 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1358 int common_len;
1359
1360 common_len = intel_dp_common_rates(intel_dp, common_rates);
1361
1362 /* No common link rates between source and sink */
1363 WARN_ON(common_len <= 0);
1364
1365 max_clock = common_len - 1;
1366
1367 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1368 pipe_config->has_pch_encoder = true;
1369
1370 pipe_config->has_dp_encoder = true;
1371 pipe_config->has_drrs = false;
1372 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1373
1374 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1375 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1376 adjusted_mode);
1377
1378 if (INTEL_INFO(dev)->gen >= 9) {
1379 int ret;
1380 ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1381 if (ret)
1382 return ret;
1383 }
1384
1385 if (!HAS_PCH_SPLIT(dev))
1386 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1387 intel_connector->panel.fitting_mode);
1388 else
1389 intel_pch_panel_fitting(intel_crtc, pipe_config,
1390 intel_connector->panel.fitting_mode);
1391 }
1392
1393 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1394 return false;
1395
1396 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1397 "max bw %d pixel clock %iKHz\n",
1398 max_lane_count, common_rates[max_clock],
1399 adjusted_mode->crtc_clock);
1400
1401 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1402 * bpc in between. */
1403 bpp = pipe_config->pipe_bpp;
1404 if (is_edp(intel_dp)) {
1405 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1406 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1407 dev_priv->vbt.edp_bpp);
1408 bpp = dev_priv->vbt.edp_bpp;
1409 }
1410
1411 /*
1412 * Use the maximum clock and number of lanes the eDP panel
1413 * advertizes being capable of. The panels are generally
1414 * designed to support only a single clock and lane
1415 * configuration, and typically these values correspond to the
1416 * native resolution of the panel.
1417 */
1418 min_lane_count = max_lane_count;
1419 min_clock = max_clock;
1420 }
1421
1422 for (; bpp >= 6*3; bpp -= 2*3) {
1423 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1424 bpp);
1425
1426 for (clock = min_clock; clock <= max_clock; clock++) {
1427 for (lane_count = min_lane_count;
1428 lane_count <= max_lane_count;
1429 lane_count <<= 1) {
1430
1431 link_clock = common_rates[clock];
1432 link_avail = intel_dp_max_data_rate(link_clock,
1433 lane_count);
1434
1435 if (mode_rate <= link_avail) {
1436 goto found;
1437 }
1438 }
1439 }
1440 }
1441
1442 return false;
1443
1444 found:
1445 if (intel_dp->color_range_auto) {
1446 /*
1447 * See:
1448 * CEA-861-E - 5.1 Default Encoding Parameters
1449 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1450 */
1451 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1452 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1453 else
1454 intel_dp->color_range = 0;
1455 }
1456
1457 if (intel_dp->color_range)
1458 pipe_config->limited_color_range = true;
1459
1460 intel_dp->lane_count = lane_count;
1461
1462 if (intel_dp->num_sink_rates) {
1463 intel_dp->link_bw = 0;
1464 intel_dp->rate_select =
1465 intel_dp_rate_select(intel_dp, common_rates[clock]);
1466 } else {
1467 intel_dp->link_bw =
1468 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1469 intel_dp->rate_select = 0;
1470 }
1471
1472 pipe_config->pipe_bpp = bpp;
1473 pipe_config->port_clock = common_rates[clock];
1474
1475 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1476 intel_dp->link_bw, intel_dp->lane_count,
1477 pipe_config->port_clock, bpp);
1478 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1479 mode_rate, link_avail);
1480
1481 intel_link_compute_m_n(bpp, lane_count,
1482 adjusted_mode->crtc_clock,
1483 pipe_config->port_clock,
1484 &pipe_config->dp_m_n);
1485
1486 if (intel_connector->panel.downclock_mode != NULL &&
1487 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1488 pipe_config->has_drrs = true;
1489 intel_link_compute_m_n(bpp, lane_count,
1490 intel_connector->panel.downclock_mode->clock,
1491 pipe_config->port_clock,
1492 &pipe_config->dp_m2_n2);
1493 }
1494
1495 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1496 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1497 else if (IS_BROXTON(dev))
1498 /* handled in ddi */;
1499 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1500 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1501 else
1502 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1503
1504 return true;
1505 }
1506
1507 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1508 {
1509 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1510 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1511 struct drm_device *dev = crtc->base.dev;
1512 struct drm_i915_private *dev_priv = dev->dev_private;
1513 u32 dpa_ctl;
1514
1515 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1516 crtc->config->port_clock);
1517 dpa_ctl = I915_READ(DP_A);
1518 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1519
1520 if (crtc->config->port_clock == 162000) {
1521 /* For a long time we've carried around a ILK-DevA w/a for the
1522 * 160MHz clock. If we're really unlucky, it's still required.
1523 */
1524 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1525 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1526 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1527 } else {
1528 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1529 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1530 }
1531
1532 I915_WRITE(DP_A, dpa_ctl);
1533
1534 POSTING_READ(DP_A);
1535 udelay(500);
1536 }
1537
1538 static void intel_dp_prepare(struct intel_encoder *encoder)
1539 {
1540 struct drm_device *dev = encoder->base.dev;
1541 struct drm_i915_private *dev_priv = dev->dev_private;
1542 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1543 enum port port = dp_to_dig_port(intel_dp)->port;
1544 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1545 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1546
1547 /*
1548 * There are four kinds of DP registers:
1549 *
1550 * IBX PCH
1551 * SNB CPU
1552 * IVB CPU
1553 * CPT PCH
1554 *
1555 * IBX PCH and CPU are the same for almost everything,
1556 * except that the CPU DP PLL is configured in this
1557 * register
1558 *
1559 * CPT PCH is quite different, having many bits moved
1560 * to the TRANS_DP_CTL register instead. That
1561 * configuration happens (oddly) in ironlake_pch_enable
1562 */
1563
1564 /* Preserve the BIOS-computed detected bit. This is
1565 * supposed to be read-only.
1566 */
1567 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1568
1569 /* Handle DP bits in common between all three register formats */
1570 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1571 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1572
1573 if (crtc->config->has_audio)
1574 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1575
1576 /* Split out the IBX/CPU vs CPT settings */
1577
1578 if (IS_GEN7(dev) && port == PORT_A) {
1579 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1580 intel_dp->DP |= DP_SYNC_HS_HIGH;
1581 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1582 intel_dp->DP |= DP_SYNC_VS_HIGH;
1583 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1584
1585 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1586 intel_dp->DP |= DP_ENHANCED_FRAMING;
1587
1588 intel_dp->DP |= crtc->pipe << 29;
1589 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1590 u32 trans_dp;
1591
1592 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1593
1594 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1595 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1596 trans_dp |= TRANS_DP_ENH_FRAMING;
1597 else
1598 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1599 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1600 } else {
1601 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1602 intel_dp->DP |= intel_dp->color_range;
1603
1604 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1605 intel_dp->DP |= DP_SYNC_HS_HIGH;
1606 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1607 intel_dp->DP |= DP_SYNC_VS_HIGH;
1608 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1609
1610 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1611 intel_dp->DP |= DP_ENHANCED_FRAMING;
1612
1613 if (IS_CHERRYVIEW(dev))
1614 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1615 else if (crtc->pipe == PIPE_B)
1616 intel_dp->DP |= DP_PIPEB_SELECT;
1617 }
1618 }
1619
1620 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1621 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1622
1623 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1624 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1625
1626 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1627 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1628
1629 static void wait_panel_status(struct intel_dp *intel_dp,
1630 u32 mask,
1631 u32 value)
1632 {
1633 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1634 struct drm_i915_private *dev_priv = dev->dev_private;
1635 u32 pp_stat_reg, pp_ctrl_reg;
1636
1637 lockdep_assert_held(&dev_priv->pps_mutex);
1638
1639 pp_stat_reg = _pp_stat_reg(intel_dp);
1640 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1641
1642 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1643 mask, value,
1644 I915_READ(pp_stat_reg),
1645 I915_READ(pp_ctrl_reg));
1646
1647 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1648 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1649 I915_READ(pp_stat_reg),
1650 I915_READ(pp_ctrl_reg));
1651 }
1652
1653 DRM_DEBUG_KMS("Wait complete\n");
1654 }
1655
1656 static void wait_panel_on(struct intel_dp *intel_dp)
1657 {
1658 DRM_DEBUG_KMS("Wait for panel power on\n");
1659 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1660 }
1661
1662 static void wait_panel_off(struct intel_dp *intel_dp)
1663 {
1664 DRM_DEBUG_KMS("Wait for panel power off time\n");
1665 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1666 }
1667
1668 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1669 {
1670 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1671
1672 /* When we disable the VDD override bit last we have to do the manual
1673 * wait. */
1674 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1675 intel_dp->panel_power_cycle_delay);
1676
1677 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1678 }
1679
1680 static void wait_backlight_on(struct intel_dp *intel_dp)
1681 {
1682 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1683 intel_dp->backlight_on_delay);
1684 }
1685
1686 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1687 {
1688 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1689 intel_dp->backlight_off_delay);
1690 }
1691
1692 /* Read the current pp_control value, unlocking the register if it
1693 * is locked
1694 */
1695
1696 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1697 {
1698 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1699 struct drm_i915_private *dev_priv = dev->dev_private;
1700 u32 control;
1701
1702 lockdep_assert_held(&dev_priv->pps_mutex);
1703
1704 control = I915_READ(_pp_ctrl_reg(intel_dp));
1705 control &= ~PANEL_UNLOCK_MASK;
1706 control |= PANEL_UNLOCK_REGS;
1707 return control;
1708 }
1709
1710 /*
1711 * Must be paired with edp_panel_vdd_off().
1712 * Must hold pps_mutex around the whole on/off sequence.
1713 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1714 */
1715 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1716 {
1717 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1718 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1719 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1720 struct drm_i915_private *dev_priv = dev->dev_private;
1721 enum intel_display_power_domain power_domain;
1722 u32 pp;
1723 u32 pp_stat_reg, pp_ctrl_reg;
1724 bool need_to_disable = !intel_dp->want_panel_vdd;
1725
1726 lockdep_assert_held(&dev_priv->pps_mutex);
1727
1728 if (!is_edp(intel_dp))
1729 return false;
1730
1731 cancel_delayed_work(&intel_dp->panel_vdd_work);
1732 intel_dp->want_panel_vdd = true;
1733
1734 if (edp_have_panel_vdd(intel_dp))
1735 return need_to_disable;
1736
1737 power_domain = intel_display_port_power_domain(intel_encoder);
1738 intel_display_power_get(dev_priv, power_domain);
1739
1740 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1741 port_name(intel_dig_port->port));
1742
1743 if (!edp_have_panel_power(intel_dp))
1744 wait_panel_power_cycle(intel_dp);
1745
1746 pp = ironlake_get_pp_control(intel_dp);
1747 pp |= EDP_FORCE_VDD;
1748
1749 pp_stat_reg = _pp_stat_reg(intel_dp);
1750 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1751
1752 I915_WRITE(pp_ctrl_reg, pp);
1753 POSTING_READ(pp_ctrl_reg);
1754 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1755 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1756 /*
1757 * If the panel wasn't on, delay before accessing aux channel
1758 */
1759 if (!edp_have_panel_power(intel_dp)) {
1760 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1761 port_name(intel_dig_port->port));
1762 msleep(intel_dp->panel_power_up_delay);
1763 }
1764
1765 return need_to_disable;
1766 }
1767
1768 /*
1769 * Must be paired with intel_edp_panel_vdd_off() or
1770 * intel_edp_panel_off().
1771 * Nested calls to these functions are not allowed since
1772 * we drop the lock. Caller must use some higher level
1773 * locking to prevent nested calls from other threads.
1774 */
1775 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1776 {
1777 bool vdd;
1778
1779 if (!is_edp(intel_dp))
1780 return;
1781
1782 pps_lock(intel_dp);
1783 vdd = edp_panel_vdd_on(intel_dp);
1784 pps_unlock(intel_dp);
1785
1786 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1787 port_name(dp_to_dig_port(intel_dp)->port));
1788 }
1789
1790 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1791 {
1792 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1793 struct drm_i915_private *dev_priv = dev->dev_private;
1794 struct intel_digital_port *intel_dig_port =
1795 dp_to_dig_port(intel_dp);
1796 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1797 enum intel_display_power_domain power_domain;
1798 u32 pp;
1799 u32 pp_stat_reg, pp_ctrl_reg;
1800
1801 lockdep_assert_held(&dev_priv->pps_mutex);
1802
1803 WARN_ON(intel_dp->want_panel_vdd);
1804
1805 if (!edp_have_panel_vdd(intel_dp))
1806 return;
1807
1808 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1809 port_name(intel_dig_port->port));
1810
1811 pp = ironlake_get_pp_control(intel_dp);
1812 pp &= ~EDP_FORCE_VDD;
1813
1814 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1815 pp_stat_reg = _pp_stat_reg(intel_dp);
1816
1817 I915_WRITE(pp_ctrl_reg, pp);
1818 POSTING_READ(pp_ctrl_reg);
1819
1820 /* Make sure sequencer is idle before allowing subsequent activity */
1821 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1822 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1823
1824 if ((pp & POWER_TARGET_ON) == 0)
1825 intel_dp->last_power_cycle = jiffies;
1826
1827 power_domain = intel_display_port_power_domain(intel_encoder);
1828 intel_display_power_put(dev_priv, power_domain);
1829 }
1830
1831 static void edp_panel_vdd_work(struct work_struct *__work)
1832 {
1833 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1834 struct intel_dp, panel_vdd_work);
1835
1836 pps_lock(intel_dp);
1837 if (!intel_dp->want_panel_vdd)
1838 edp_panel_vdd_off_sync(intel_dp);
1839 pps_unlock(intel_dp);
1840 }
1841
1842 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1843 {
1844 unsigned long delay;
1845
1846 /*
1847 * Queue the timer to fire a long time from now (relative to the power
1848 * down delay) to keep the panel power up across a sequence of
1849 * operations.
1850 */
1851 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1852 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1853 }
1854
1855 /*
1856 * Must be paired with edp_panel_vdd_on().
1857 * Must hold pps_mutex around the whole on/off sequence.
1858 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1859 */
1860 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1861 {
1862 struct drm_i915_private *dev_priv =
1863 intel_dp_to_dev(intel_dp)->dev_private;
1864
1865 lockdep_assert_held(&dev_priv->pps_mutex);
1866
1867 if (!is_edp(intel_dp))
1868 return;
1869
1870 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1871 port_name(dp_to_dig_port(intel_dp)->port));
1872
1873 intel_dp->want_panel_vdd = false;
1874
1875 if (sync)
1876 edp_panel_vdd_off_sync(intel_dp);
1877 else
1878 edp_panel_vdd_schedule_off(intel_dp);
1879 }
1880
1881 static void edp_panel_on(struct intel_dp *intel_dp)
1882 {
1883 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1884 struct drm_i915_private *dev_priv = dev->dev_private;
1885 u32 pp;
1886 u32 pp_ctrl_reg;
1887
1888 lockdep_assert_held(&dev_priv->pps_mutex);
1889
1890 if (!is_edp(intel_dp))
1891 return;
1892
1893 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1894 port_name(dp_to_dig_port(intel_dp)->port));
1895
1896 if (WARN(edp_have_panel_power(intel_dp),
1897 "eDP port %c panel power already on\n",
1898 port_name(dp_to_dig_port(intel_dp)->port)))
1899 return;
1900
1901 wait_panel_power_cycle(intel_dp);
1902
1903 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1904 pp = ironlake_get_pp_control(intel_dp);
1905 if (IS_GEN5(dev)) {
1906 /* ILK workaround: disable reset around power sequence */
1907 pp &= ~PANEL_POWER_RESET;
1908 I915_WRITE(pp_ctrl_reg, pp);
1909 POSTING_READ(pp_ctrl_reg);
1910 }
1911
1912 pp |= POWER_TARGET_ON;
1913 if (!IS_GEN5(dev))
1914 pp |= PANEL_POWER_RESET;
1915
1916 I915_WRITE(pp_ctrl_reg, pp);
1917 POSTING_READ(pp_ctrl_reg);
1918
1919 wait_panel_on(intel_dp);
1920 intel_dp->last_power_on = jiffies;
1921
1922 if (IS_GEN5(dev)) {
1923 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1924 I915_WRITE(pp_ctrl_reg, pp);
1925 POSTING_READ(pp_ctrl_reg);
1926 }
1927 }
1928
1929 void intel_edp_panel_on(struct intel_dp *intel_dp)
1930 {
1931 if (!is_edp(intel_dp))
1932 return;
1933
1934 pps_lock(intel_dp);
1935 edp_panel_on(intel_dp);
1936 pps_unlock(intel_dp);
1937 }
1938
1939
1940 static void edp_panel_off(struct intel_dp *intel_dp)
1941 {
1942 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1943 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1944 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1945 struct drm_i915_private *dev_priv = dev->dev_private;
1946 enum intel_display_power_domain power_domain;
1947 u32 pp;
1948 u32 pp_ctrl_reg;
1949
1950 lockdep_assert_held(&dev_priv->pps_mutex);
1951
1952 if (!is_edp(intel_dp))
1953 return;
1954
1955 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1956 port_name(dp_to_dig_port(intel_dp)->port));
1957
1958 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1959 port_name(dp_to_dig_port(intel_dp)->port));
1960
1961 pp = ironlake_get_pp_control(intel_dp);
1962 /* We need to switch off panel power _and_ force vdd, for otherwise some
1963 * panels get very unhappy and cease to work. */
1964 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1965 EDP_BLC_ENABLE);
1966
1967 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1968
1969 intel_dp->want_panel_vdd = false;
1970
1971 I915_WRITE(pp_ctrl_reg, pp);
1972 POSTING_READ(pp_ctrl_reg);
1973
1974 intel_dp->last_power_cycle = jiffies;
1975 wait_panel_off(intel_dp);
1976
1977 /* We got a reference when we enabled the VDD. */
1978 power_domain = intel_display_port_power_domain(intel_encoder);
1979 intel_display_power_put(dev_priv, power_domain);
1980 }
1981
1982 void intel_edp_panel_off(struct intel_dp *intel_dp)
1983 {
1984 if (!is_edp(intel_dp))
1985 return;
1986
1987 pps_lock(intel_dp);
1988 edp_panel_off(intel_dp);
1989 pps_unlock(intel_dp);
1990 }
1991
1992 /* Enable backlight in the panel power control. */
1993 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1994 {
1995 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1996 struct drm_device *dev = intel_dig_port->base.base.dev;
1997 struct drm_i915_private *dev_priv = dev->dev_private;
1998 u32 pp;
1999 u32 pp_ctrl_reg;
2000
2001 /*
2002 * If we enable the backlight right away following a panel power
2003 * on, we may see slight flicker as the panel syncs with the eDP
2004 * link. So delay a bit to make sure the image is solid before
2005 * allowing it to appear.
2006 */
2007 wait_backlight_on(intel_dp);
2008
2009 pps_lock(intel_dp);
2010
2011 pp = ironlake_get_pp_control(intel_dp);
2012 pp |= EDP_BLC_ENABLE;
2013
2014 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2015
2016 I915_WRITE(pp_ctrl_reg, pp);
2017 POSTING_READ(pp_ctrl_reg);
2018
2019 pps_unlock(intel_dp);
2020 }
2021
2022 /* Enable backlight PWM and backlight PP control. */
2023 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2024 {
2025 if (!is_edp(intel_dp))
2026 return;
2027
2028 DRM_DEBUG_KMS("\n");
2029
2030 intel_panel_enable_backlight(intel_dp->attached_connector);
2031 _intel_edp_backlight_on(intel_dp);
2032 }
2033
2034 /* Disable backlight in the panel power control. */
2035 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2036 {
2037 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2038 struct drm_i915_private *dev_priv = dev->dev_private;
2039 u32 pp;
2040 u32 pp_ctrl_reg;
2041
2042 if (!is_edp(intel_dp))
2043 return;
2044
2045 pps_lock(intel_dp);
2046
2047 pp = ironlake_get_pp_control(intel_dp);
2048 pp &= ~EDP_BLC_ENABLE;
2049
2050 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2051
2052 I915_WRITE(pp_ctrl_reg, pp);
2053 POSTING_READ(pp_ctrl_reg);
2054
2055 pps_unlock(intel_dp);
2056
2057 intel_dp->last_backlight_off = jiffies;
2058 edp_wait_backlight_off(intel_dp);
2059 }
2060
2061 /* Disable backlight PP control and backlight PWM. */
2062 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2063 {
2064 if (!is_edp(intel_dp))
2065 return;
2066
2067 DRM_DEBUG_KMS("\n");
2068
2069 _intel_edp_backlight_off(intel_dp);
2070 intel_panel_disable_backlight(intel_dp->attached_connector);
2071 }
2072
2073 /*
2074 * Hook for controlling the panel power control backlight through the bl_power
2075 * sysfs attribute. Take care to handle multiple calls.
2076 */
2077 static void intel_edp_backlight_power(struct intel_connector *connector,
2078 bool enable)
2079 {
2080 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2081 bool is_enabled;
2082
2083 pps_lock(intel_dp);
2084 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2085 pps_unlock(intel_dp);
2086
2087 if (is_enabled == enable)
2088 return;
2089
2090 DRM_DEBUG_KMS("panel power control backlight %s\n",
2091 enable ? "enable" : "disable");
2092
2093 if (enable)
2094 _intel_edp_backlight_on(intel_dp);
2095 else
2096 _intel_edp_backlight_off(intel_dp);
2097 }
2098
2099 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2100 {
2101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2102 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2103 struct drm_device *dev = crtc->dev;
2104 struct drm_i915_private *dev_priv = dev->dev_private;
2105 u32 dpa_ctl;
2106
2107 assert_pipe_disabled(dev_priv,
2108 to_intel_crtc(crtc)->pipe);
2109
2110 DRM_DEBUG_KMS("\n");
2111 dpa_ctl = I915_READ(DP_A);
2112 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2113 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2114
2115 /* We don't adjust intel_dp->DP while tearing down the link, to
2116 * facilitate link retraining (e.g. after hotplug). Hence clear all
2117 * enable bits here to ensure that we don't enable too much. */
2118 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2119 intel_dp->DP |= DP_PLL_ENABLE;
2120 I915_WRITE(DP_A, intel_dp->DP);
2121 POSTING_READ(DP_A);
2122 udelay(200);
2123 }
2124
2125 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2126 {
2127 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2128 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2129 struct drm_device *dev = crtc->dev;
2130 struct drm_i915_private *dev_priv = dev->dev_private;
2131 u32 dpa_ctl;
2132
2133 assert_pipe_disabled(dev_priv,
2134 to_intel_crtc(crtc)->pipe);
2135
2136 dpa_ctl = I915_READ(DP_A);
2137 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2138 "dp pll off, should be on\n");
2139 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2140
2141 /* We can't rely on the value tracked for the DP register in
2142 * intel_dp->DP because link_down must not change that (otherwise link
2143 * re-training will fail. */
2144 dpa_ctl &= ~DP_PLL_ENABLE;
2145 I915_WRITE(DP_A, dpa_ctl);
2146 POSTING_READ(DP_A);
2147 udelay(200);
2148 }
2149
2150 /* If the sink supports it, try to set the power state appropriately */
2151 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2152 {
2153 int ret, i;
2154
2155 /* Should have a valid DPCD by this point */
2156 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2157 return;
2158
2159 if (mode != DRM_MODE_DPMS_ON) {
2160 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2161 DP_SET_POWER_D3);
2162 } else {
2163 /*
2164 * When turning on, we need to retry for 1ms to give the sink
2165 * time to wake up.
2166 */
2167 for (i = 0; i < 3; i++) {
2168 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2169 DP_SET_POWER_D0);
2170 if (ret == 1)
2171 break;
2172 msleep(1);
2173 }
2174 }
2175
2176 if (ret != 1)
2177 DRM_DEBUG_KMS("failed to %s sink power state\n",
2178 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2179 }
2180
2181 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2182 enum pipe *pipe)
2183 {
2184 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2185 enum port port = dp_to_dig_port(intel_dp)->port;
2186 struct drm_device *dev = encoder->base.dev;
2187 struct drm_i915_private *dev_priv = dev->dev_private;
2188 enum intel_display_power_domain power_domain;
2189 u32 tmp;
2190
2191 power_domain = intel_display_port_power_domain(encoder);
2192 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2193 return false;
2194
2195 tmp = I915_READ(intel_dp->output_reg);
2196
2197 if (!(tmp & DP_PORT_EN))
2198 return false;
2199
2200 if (IS_GEN7(dev) && port == PORT_A) {
2201 *pipe = PORT_TO_PIPE_CPT(tmp);
2202 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2203 enum pipe p;
2204
2205 for_each_pipe(dev_priv, p) {
2206 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2207 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2208 *pipe = p;
2209 return true;
2210 }
2211 }
2212
2213 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2214 intel_dp->output_reg);
2215 } else if (IS_CHERRYVIEW(dev)) {
2216 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2217 } else {
2218 *pipe = PORT_TO_PIPE(tmp);
2219 }
2220
2221 return true;
2222 }
2223
2224 static void intel_dp_get_config(struct intel_encoder *encoder,
2225 struct intel_crtc_state *pipe_config)
2226 {
2227 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2228 u32 tmp, flags = 0;
2229 struct drm_device *dev = encoder->base.dev;
2230 struct drm_i915_private *dev_priv = dev->dev_private;
2231 enum port port = dp_to_dig_port(intel_dp)->port;
2232 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2233 int dotclock;
2234
2235 tmp = I915_READ(intel_dp->output_reg);
2236
2237 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2238
2239 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2240 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2241 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2242 flags |= DRM_MODE_FLAG_PHSYNC;
2243 else
2244 flags |= DRM_MODE_FLAG_NHSYNC;
2245
2246 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2247 flags |= DRM_MODE_FLAG_PVSYNC;
2248 else
2249 flags |= DRM_MODE_FLAG_NVSYNC;
2250 } else {
2251 if (tmp & DP_SYNC_HS_HIGH)
2252 flags |= DRM_MODE_FLAG_PHSYNC;
2253 else
2254 flags |= DRM_MODE_FLAG_NHSYNC;
2255
2256 if (tmp & DP_SYNC_VS_HIGH)
2257 flags |= DRM_MODE_FLAG_PVSYNC;
2258 else
2259 flags |= DRM_MODE_FLAG_NVSYNC;
2260 }
2261
2262 pipe_config->base.adjusted_mode.flags |= flags;
2263
2264 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2265 tmp & DP_COLOR_RANGE_16_235)
2266 pipe_config->limited_color_range = true;
2267
2268 pipe_config->has_dp_encoder = true;
2269
2270 intel_dp_get_m_n(crtc, pipe_config);
2271
2272 if (port == PORT_A) {
2273 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2274 pipe_config->port_clock = 162000;
2275 else
2276 pipe_config->port_clock = 270000;
2277 }
2278
2279 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2280 &pipe_config->dp_m_n);
2281
2282 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2283 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2284
2285 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2286
2287 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2288 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2289 /*
2290 * This is a big fat ugly hack.
2291 *
2292 * Some machines in UEFI boot mode provide us a VBT that has 18
2293 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2294 * unknown we fail to light up. Yet the same BIOS boots up with
2295 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2296 * max, not what it tells us to use.
2297 *
2298 * Note: This will still be broken if the eDP panel is not lit
2299 * up by the BIOS, and thus we can't get the mode at module
2300 * load.
2301 */
2302 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2303 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2304 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2305 }
2306 }
2307
2308 static void intel_disable_dp(struct intel_encoder *encoder)
2309 {
2310 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2311 struct drm_device *dev = encoder->base.dev;
2312 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2313
2314 if (crtc->config->has_audio)
2315 intel_audio_codec_disable(encoder);
2316
2317 if (HAS_PSR(dev) && !HAS_DDI(dev))
2318 intel_psr_disable(intel_dp);
2319
2320 /* Make sure the panel is off before trying to change the mode. But also
2321 * ensure that we have vdd while we switch off the panel. */
2322 intel_edp_panel_vdd_on(intel_dp);
2323 intel_edp_backlight_off(intel_dp);
2324 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2325 intel_edp_panel_off(intel_dp);
2326
2327 /* disable the port before the pipe on g4x */
2328 if (INTEL_INFO(dev)->gen < 5)
2329 intel_dp_link_down(intel_dp);
2330 }
2331
2332 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2333 {
2334 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2335 enum port port = dp_to_dig_port(intel_dp)->port;
2336
2337 intel_dp_link_down(intel_dp);
2338 if (port == PORT_A)
2339 ironlake_edp_pll_off(intel_dp);
2340 }
2341
2342 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2343 {
2344 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2345
2346 intel_dp_link_down(intel_dp);
2347 }
2348
2349 static void chv_post_disable_dp(struct intel_encoder *encoder)
2350 {
2351 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2352 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2353 struct drm_device *dev = encoder->base.dev;
2354 struct drm_i915_private *dev_priv = dev->dev_private;
2355 struct intel_crtc *intel_crtc =
2356 to_intel_crtc(encoder->base.crtc);
2357 enum dpio_channel ch = vlv_dport_to_channel(dport);
2358 enum pipe pipe = intel_crtc->pipe;
2359 u32 val;
2360
2361 intel_dp_link_down(intel_dp);
2362
2363 mutex_lock(&dev_priv->sb_lock);
2364
2365 /* Propagate soft reset to data lane reset */
2366 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2367 val |= CHV_PCS_REQ_SOFTRESET_EN;
2368 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2369
2370 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2371 val |= CHV_PCS_REQ_SOFTRESET_EN;
2372 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2373
2374 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2375 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2376 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2377
2378 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2379 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2380 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2381
2382 mutex_unlock(&dev_priv->sb_lock);
2383 }
2384
2385 static void
2386 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2387 uint32_t *DP,
2388 uint8_t dp_train_pat)
2389 {
2390 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2391 struct drm_device *dev = intel_dig_port->base.base.dev;
2392 struct drm_i915_private *dev_priv = dev->dev_private;
2393 enum port port = intel_dig_port->port;
2394
2395 if (HAS_DDI(dev)) {
2396 uint32_t temp = I915_READ(DP_TP_CTL(port));
2397
2398 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2399 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2400 else
2401 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2402
2403 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2404 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2405 case DP_TRAINING_PATTERN_DISABLE:
2406 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2407
2408 break;
2409 case DP_TRAINING_PATTERN_1:
2410 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2411 break;
2412 case DP_TRAINING_PATTERN_2:
2413 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2414 break;
2415 case DP_TRAINING_PATTERN_3:
2416 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2417 break;
2418 }
2419 I915_WRITE(DP_TP_CTL(port), temp);
2420
2421 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2422 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2423 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2424
2425 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2426 case DP_TRAINING_PATTERN_DISABLE:
2427 *DP |= DP_LINK_TRAIN_OFF_CPT;
2428 break;
2429 case DP_TRAINING_PATTERN_1:
2430 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2431 break;
2432 case DP_TRAINING_PATTERN_2:
2433 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2434 break;
2435 case DP_TRAINING_PATTERN_3:
2436 DRM_ERROR("DP training pattern 3 not supported\n");
2437 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2438 break;
2439 }
2440
2441 } else {
2442 if (IS_CHERRYVIEW(dev))
2443 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2444 else
2445 *DP &= ~DP_LINK_TRAIN_MASK;
2446
2447 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2448 case DP_TRAINING_PATTERN_DISABLE:
2449 *DP |= DP_LINK_TRAIN_OFF;
2450 break;
2451 case DP_TRAINING_PATTERN_1:
2452 *DP |= DP_LINK_TRAIN_PAT_1;
2453 break;
2454 case DP_TRAINING_PATTERN_2:
2455 *DP |= DP_LINK_TRAIN_PAT_2;
2456 break;
2457 case DP_TRAINING_PATTERN_3:
2458 if (IS_CHERRYVIEW(dev)) {
2459 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2460 } else {
2461 DRM_ERROR("DP training pattern 3 not supported\n");
2462 *DP |= DP_LINK_TRAIN_PAT_2;
2463 }
2464 break;
2465 }
2466 }
2467 }
2468
2469 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2470 {
2471 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2472 struct drm_i915_private *dev_priv = dev->dev_private;
2473
2474 /* enable with pattern 1 (as per spec) */
2475 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2476 DP_TRAINING_PATTERN_1);
2477
2478 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2479 POSTING_READ(intel_dp->output_reg);
2480
2481 /*
2482 * Magic for VLV/CHV. We _must_ first set up the register
2483 * without actually enabling the port, and then do another
2484 * write to enable the port. Otherwise link training will
2485 * fail when the power sequencer is freshly used for this port.
2486 */
2487 intel_dp->DP |= DP_PORT_EN;
2488
2489 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2490 POSTING_READ(intel_dp->output_reg);
2491 }
2492
2493 static void intel_enable_dp(struct intel_encoder *encoder)
2494 {
2495 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2496 struct drm_device *dev = encoder->base.dev;
2497 struct drm_i915_private *dev_priv = dev->dev_private;
2498 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2499 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2500 unsigned int lane_mask = 0x0;
2501
2502 if (WARN_ON(dp_reg & DP_PORT_EN))
2503 return;
2504
2505 pps_lock(intel_dp);
2506
2507 if (IS_VALLEYVIEW(dev))
2508 vlv_init_panel_power_sequencer(intel_dp);
2509
2510 intel_dp_enable_port(intel_dp);
2511
2512 edp_panel_vdd_on(intel_dp);
2513 edp_panel_on(intel_dp);
2514 edp_panel_vdd_off(intel_dp, true);
2515
2516 pps_unlock(intel_dp);
2517
2518 if (IS_VALLEYVIEW(dev))
2519 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2520 lane_mask);
2521
2522 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2523 intel_dp_start_link_train(intel_dp);
2524 intel_dp_complete_link_train(intel_dp);
2525 intel_dp_stop_link_train(intel_dp);
2526
2527 if (crtc->config->has_audio) {
2528 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2529 pipe_name(crtc->pipe));
2530 intel_audio_codec_enable(encoder);
2531 }
2532 }
2533
2534 static void g4x_enable_dp(struct intel_encoder *encoder)
2535 {
2536 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2537
2538 intel_enable_dp(encoder);
2539 intel_edp_backlight_on(intel_dp);
2540 }
2541
2542 static void vlv_enable_dp(struct intel_encoder *encoder)
2543 {
2544 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2545
2546 intel_edp_backlight_on(intel_dp);
2547 intel_psr_enable(intel_dp);
2548 }
2549
2550 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2551 {
2552 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2553 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2554
2555 intel_dp_prepare(encoder);
2556
2557 /* Only ilk+ has port A */
2558 if (dport->port == PORT_A) {
2559 ironlake_set_pll_cpu_edp(intel_dp);
2560 ironlake_edp_pll_on(intel_dp);
2561 }
2562 }
2563
2564 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2565 {
2566 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2567 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2568 enum pipe pipe = intel_dp->pps_pipe;
2569 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2570
2571 edp_panel_vdd_off_sync(intel_dp);
2572
2573 /*
2574 * VLV seems to get confused when multiple power seqeuencers
2575 * have the same port selected (even if only one has power/vdd
2576 * enabled). The failure manifests as vlv_wait_port_ready() failing
2577 * CHV on the other hand doesn't seem to mind having the same port
2578 * selected in multiple power seqeuencers, but let's clear the
2579 * port select always when logically disconnecting a power sequencer
2580 * from a port.
2581 */
2582 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2583 pipe_name(pipe), port_name(intel_dig_port->port));
2584 I915_WRITE(pp_on_reg, 0);
2585 POSTING_READ(pp_on_reg);
2586
2587 intel_dp->pps_pipe = INVALID_PIPE;
2588 }
2589
2590 static void vlv_steal_power_sequencer(struct drm_device *dev,
2591 enum pipe pipe)
2592 {
2593 struct drm_i915_private *dev_priv = dev->dev_private;
2594 struct intel_encoder *encoder;
2595
2596 lockdep_assert_held(&dev_priv->pps_mutex);
2597
2598 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2599 return;
2600
2601 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2602 base.head) {
2603 struct intel_dp *intel_dp;
2604 enum port port;
2605
2606 if (encoder->type != INTEL_OUTPUT_EDP)
2607 continue;
2608
2609 intel_dp = enc_to_intel_dp(&encoder->base);
2610 port = dp_to_dig_port(intel_dp)->port;
2611
2612 if (intel_dp->pps_pipe != pipe)
2613 continue;
2614
2615 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2616 pipe_name(pipe), port_name(port));
2617
2618 WARN(encoder->connectors_active,
2619 "stealing pipe %c power sequencer from active eDP port %c\n",
2620 pipe_name(pipe), port_name(port));
2621
2622 /* make sure vdd is off before we steal it */
2623 vlv_detach_power_sequencer(intel_dp);
2624 }
2625 }
2626
2627 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2628 {
2629 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2630 struct intel_encoder *encoder = &intel_dig_port->base;
2631 struct drm_device *dev = encoder->base.dev;
2632 struct drm_i915_private *dev_priv = dev->dev_private;
2633 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2634
2635 lockdep_assert_held(&dev_priv->pps_mutex);
2636
2637 if (!is_edp(intel_dp))
2638 return;
2639
2640 if (intel_dp->pps_pipe == crtc->pipe)
2641 return;
2642
2643 /*
2644 * If another power sequencer was being used on this
2645 * port previously make sure to turn off vdd there while
2646 * we still have control of it.
2647 */
2648 if (intel_dp->pps_pipe != INVALID_PIPE)
2649 vlv_detach_power_sequencer(intel_dp);
2650
2651 /*
2652 * We may be stealing the power
2653 * sequencer from another port.
2654 */
2655 vlv_steal_power_sequencer(dev, crtc->pipe);
2656
2657 /* now it's all ours */
2658 intel_dp->pps_pipe = crtc->pipe;
2659
2660 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2661 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2662
2663 /* init power sequencer on this pipe and port */
2664 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2665 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2666 }
2667
2668 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2669 {
2670 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2671 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2672 struct drm_device *dev = encoder->base.dev;
2673 struct drm_i915_private *dev_priv = dev->dev_private;
2674 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2675 enum dpio_channel port = vlv_dport_to_channel(dport);
2676 int pipe = intel_crtc->pipe;
2677 u32 val;
2678
2679 mutex_lock(&dev_priv->sb_lock);
2680
2681 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2682 val = 0;
2683 if (pipe)
2684 val |= (1<<21);
2685 else
2686 val &= ~(1<<21);
2687 val |= 0x001000c4;
2688 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2689 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2690 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2691
2692 mutex_unlock(&dev_priv->sb_lock);
2693
2694 intel_enable_dp(encoder);
2695 }
2696
2697 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2698 {
2699 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2700 struct drm_device *dev = encoder->base.dev;
2701 struct drm_i915_private *dev_priv = dev->dev_private;
2702 struct intel_crtc *intel_crtc =
2703 to_intel_crtc(encoder->base.crtc);
2704 enum dpio_channel port = vlv_dport_to_channel(dport);
2705 int pipe = intel_crtc->pipe;
2706
2707 intel_dp_prepare(encoder);
2708
2709 /* Program Tx lane resets to default */
2710 mutex_lock(&dev_priv->sb_lock);
2711 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2712 DPIO_PCS_TX_LANE2_RESET |
2713 DPIO_PCS_TX_LANE1_RESET);
2714 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2715 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2716 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2717 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2718 DPIO_PCS_CLK_SOFT_RESET);
2719
2720 /* Fix up inter-pair skew failure */
2721 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2722 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2723 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2724 mutex_unlock(&dev_priv->sb_lock);
2725 }
2726
2727 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2728 {
2729 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2730 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2731 struct drm_device *dev = encoder->base.dev;
2732 struct drm_i915_private *dev_priv = dev->dev_private;
2733 struct intel_crtc *intel_crtc =
2734 to_intel_crtc(encoder->base.crtc);
2735 enum dpio_channel ch = vlv_dport_to_channel(dport);
2736 int pipe = intel_crtc->pipe;
2737 int data, i, stagger;
2738 u32 val;
2739
2740 mutex_lock(&dev_priv->sb_lock);
2741
2742 /* allow hardware to manage TX FIFO reset source */
2743 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2744 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2745 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2746
2747 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2748 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2749 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2750
2751 /* Deassert soft data lane reset*/
2752 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2753 val |= CHV_PCS_REQ_SOFTRESET_EN;
2754 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2755
2756 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2757 val |= CHV_PCS_REQ_SOFTRESET_EN;
2758 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2759
2760 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2761 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2762 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2763
2764 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2765 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2766 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2767
2768 /* Program Tx lane latency optimal setting*/
2769 for (i = 0; i < 4; i++) {
2770 /* Set the upar bit */
2771 data = (i == 1) ? 0x0 : 0x1;
2772 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2773 data << DPIO_UPAR_SHIFT);
2774 }
2775
2776 /* Data lane stagger programming */
2777 if (intel_crtc->config->port_clock > 270000)
2778 stagger = 0x18;
2779 else if (intel_crtc->config->port_clock > 135000)
2780 stagger = 0xd;
2781 else if (intel_crtc->config->port_clock > 67500)
2782 stagger = 0x7;
2783 else if (intel_crtc->config->port_clock > 33750)
2784 stagger = 0x4;
2785 else
2786 stagger = 0x2;
2787
2788 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2789 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2790 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2791
2792 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2793 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2794 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2795
2796 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2797 DPIO_LANESTAGGER_STRAP(stagger) |
2798 DPIO_LANESTAGGER_STRAP_OVRD |
2799 DPIO_TX1_STAGGER_MASK(0x1f) |
2800 DPIO_TX1_STAGGER_MULT(6) |
2801 DPIO_TX2_STAGGER_MULT(0));
2802
2803 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2804 DPIO_LANESTAGGER_STRAP(stagger) |
2805 DPIO_LANESTAGGER_STRAP_OVRD |
2806 DPIO_TX1_STAGGER_MASK(0x1f) |
2807 DPIO_TX1_STAGGER_MULT(7) |
2808 DPIO_TX2_STAGGER_MULT(5));
2809
2810 mutex_unlock(&dev_priv->sb_lock);
2811
2812 intel_enable_dp(encoder);
2813 }
2814
2815 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2816 {
2817 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2818 struct drm_device *dev = encoder->base.dev;
2819 struct drm_i915_private *dev_priv = dev->dev_private;
2820 struct intel_crtc *intel_crtc =
2821 to_intel_crtc(encoder->base.crtc);
2822 enum dpio_channel ch = vlv_dport_to_channel(dport);
2823 enum pipe pipe = intel_crtc->pipe;
2824 u32 val;
2825
2826 intel_dp_prepare(encoder);
2827
2828 mutex_lock(&dev_priv->sb_lock);
2829
2830 /* program left/right clock distribution */
2831 if (pipe != PIPE_B) {
2832 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2833 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2834 if (ch == DPIO_CH0)
2835 val |= CHV_BUFLEFTENA1_FORCE;
2836 if (ch == DPIO_CH1)
2837 val |= CHV_BUFRIGHTENA1_FORCE;
2838 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2839 } else {
2840 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2841 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2842 if (ch == DPIO_CH0)
2843 val |= CHV_BUFLEFTENA2_FORCE;
2844 if (ch == DPIO_CH1)
2845 val |= CHV_BUFRIGHTENA2_FORCE;
2846 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2847 }
2848
2849 /* program clock channel usage */
2850 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2851 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2852 if (pipe != PIPE_B)
2853 val &= ~CHV_PCS_USEDCLKCHANNEL;
2854 else
2855 val |= CHV_PCS_USEDCLKCHANNEL;
2856 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2857
2858 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2859 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2860 if (pipe != PIPE_B)
2861 val &= ~CHV_PCS_USEDCLKCHANNEL;
2862 else
2863 val |= CHV_PCS_USEDCLKCHANNEL;
2864 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2865
2866 /*
2867 * This a a bit weird since generally CL
2868 * matches the pipe, but here we need to
2869 * pick the CL based on the port.
2870 */
2871 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2872 if (pipe != PIPE_B)
2873 val &= ~CHV_CMN_USEDCLKCHANNEL;
2874 else
2875 val |= CHV_CMN_USEDCLKCHANNEL;
2876 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2877
2878 mutex_unlock(&dev_priv->sb_lock);
2879 }
2880
2881 /*
2882 * Native read with retry for link status and receiver capability reads for
2883 * cases where the sink may still be asleep.
2884 *
2885 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2886 * supposed to retry 3 times per the spec.
2887 */
2888 static ssize_t
2889 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2890 void *buffer, size_t size)
2891 {
2892 ssize_t ret;
2893 int i;
2894
2895 /*
2896 * Sometime we just get the same incorrect byte repeated
2897 * over the entire buffer. Doing just one throw away read
2898 * initially seems to "solve" it.
2899 */
2900 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2901
2902 for (i = 0; i < 3; i++) {
2903 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2904 if (ret == size)
2905 return ret;
2906 msleep(1);
2907 }
2908
2909 return ret;
2910 }
2911
2912 /*
2913 * Fetch AUX CH registers 0x202 - 0x207 which contain
2914 * link status information
2915 */
2916 static bool
2917 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2918 {
2919 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2920 DP_LANE0_1_STATUS,
2921 link_status,
2922 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2923 }
2924
2925 /* These are source-specific values. */
2926 static uint8_t
2927 intel_dp_voltage_max(struct intel_dp *intel_dp)
2928 {
2929 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2930 struct drm_i915_private *dev_priv = dev->dev_private;
2931 enum port port = dp_to_dig_port(intel_dp)->port;
2932
2933 if (IS_BROXTON(dev))
2934 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2935 else if (INTEL_INFO(dev)->gen >= 9) {
2936 if (dev_priv->edp_low_vswing && port == PORT_A)
2937 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2938 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2939 } else if (IS_VALLEYVIEW(dev))
2940 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2941 else if (IS_GEN7(dev) && port == PORT_A)
2942 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2943 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2944 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2945 else
2946 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2947 }
2948
2949 static uint8_t
2950 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2951 {
2952 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2953 enum port port = dp_to_dig_port(intel_dp)->port;
2954
2955 if (INTEL_INFO(dev)->gen >= 9) {
2956 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2957 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2958 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2959 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2960 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2961 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2962 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2963 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2964 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2965 default:
2966 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2967 }
2968 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2969 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2970 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2971 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2972 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2973 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2974 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2975 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2976 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2977 default:
2978 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2979 }
2980 } else if (IS_VALLEYVIEW(dev)) {
2981 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2982 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2983 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2984 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2985 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2986 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2987 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2988 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2989 default:
2990 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2991 }
2992 } else if (IS_GEN7(dev) && port == PORT_A) {
2993 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2994 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2995 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2996 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2997 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2998 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2999 default:
3000 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3001 }
3002 } else {
3003 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3004 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3005 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3006 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3007 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3008 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3009 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3010 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3011 default:
3012 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3013 }
3014 }
3015 }
3016
3017 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3018 {
3019 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3020 struct drm_i915_private *dev_priv = dev->dev_private;
3021 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3022 struct intel_crtc *intel_crtc =
3023 to_intel_crtc(dport->base.base.crtc);
3024 unsigned long demph_reg_value, preemph_reg_value,
3025 uniqtranscale_reg_value;
3026 uint8_t train_set = intel_dp->train_set[0];
3027 enum dpio_channel port = vlv_dport_to_channel(dport);
3028 int pipe = intel_crtc->pipe;
3029
3030 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3031 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3032 preemph_reg_value = 0x0004000;
3033 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3034 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3035 demph_reg_value = 0x2B405555;
3036 uniqtranscale_reg_value = 0x552AB83A;
3037 break;
3038 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3039 demph_reg_value = 0x2B404040;
3040 uniqtranscale_reg_value = 0x5548B83A;
3041 break;
3042 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3043 demph_reg_value = 0x2B245555;
3044 uniqtranscale_reg_value = 0x5560B83A;
3045 break;
3046 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3047 demph_reg_value = 0x2B405555;
3048 uniqtranscale_reg_value = 0x5598DA3A;
3049 break;
3050 default:
3051 return 0;
3052 }
3053 break;
3054 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3055 preemph_reg_value = 0x0002000;
3056 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3057 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3058 demph_reg_value = 0x2B404040;
3059 uniqtranscale_reg_value = 0x5552B83A;
3060 break;
3061 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3062 demph_reg_value = 0x2B404848;
3063 uniqtranscale_reg_value = 0x5580B83A;
3064 break;
3065 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3066 demph_reg_value = 0x2B404040;
3067 uniqtranscale_reg_value = 0x55ADDA3A;
3068 break;
3069 default:
3070 return 0;
3071 }
3072 break;
3073 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3074 preemph_reg_value = 0x0000000;
3075 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3076 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3077 demph_reg_value = 0x2B305555;
3078 uniqtranscale_reg_value = 0x5570B83A;
3079 break;
3080 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3081 demph_reg_value = 0x2B2B4040;
3082 uniqtranscale_reg_value = 0x55ADDA3A;
3083 break;
3084 default:
3085 return 0;
3086 }
3087 break;
3088 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3089 preemph_reg_value = 0x0006000;
3090 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3092 demph_reg_value = 0x1B405555;
3093 uniqtranscale_reg_value = 0x55ADDA3A;
3094 break;
3095 default:
3096 return 0;
3097 }
3098 break;
3099 default:
3100 return 0;
3101 }
3102
3103 mutex_lock(&dev_priv->sb_lock);
3104 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3105 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3106 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3107 uniqtranscale_reg_value);
3108 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3109 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3110 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3111 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3112 mutex_unlock(&dev_priv->sb_lock);
3113
3114 return 0;
3115 }
3116
3117 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3118 {
3119 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3120 struct drm_i915_private *dev_priv = dev->dev_private;
3121 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3122 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3123 u32 deemph_reg_value, margin_reg_value, val;
3124 uint8_t train_set = intel_dp->train_set[0];
3125 enum dpio_channel ch = vlv_dport_to_channel(dport);
3126 enum pipe pipe = intel_crtc->pipe;
3127 int i;
3128
3129 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3130 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3131 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3132 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3133 deemph_reg_value = 128;
3134 margin_reg_value = 52;
3135 break;
3136 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3137 deemph_reg_value = 128;
3138 margin_reg_value = 77;
3139 break;
3140 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3141 deemph_reg_value = 128;
3142 margin_reg_value = 102;
3143 break;
3144 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3145 deemph_reg_value = 128;
3146 margin_reg_value = 154;
3147 /* FIXME extra to set for 1200 */
3148 break;
3149 default:
3150 return 0;
3151 }
3152 break;
3153 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3154 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3155 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3156 deemph_reg_value = 85;
3157 margin_reg_value = 78;
3158 break;
3159 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3160 deemph_reg_value = 85;
3161 margin_reg_value = 116;
3162 break;
3163 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3164 deemph_reg_value = 85;
3165 margin_reg_value = 154;
3166 break;
3167 default:
3168 return 0;
3169 }
3170 break;
3171 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3172 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3173 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3174 deemph_reg_value = 64;
3175 margin_reg_value = 104;
3176 break;
3177 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3178 deemph_reg_value = 64;
3179 margin_reg_value = 154;
3180 break;
3181 default:
3182 return 0;
3183 }
3184 break;
3185 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3186 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3187 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3188 deemph_reg_value = 43;
3189 margin_reg_value = 154;
3190 break;
3191 default:
3192 return 0;
3193 }
3194 break;
3195 default:
3196 return 0;
3197 }
3198
3199 mutex_lock(&dev_priv->sb_lock);
3200
3201 /* Clear calc init */
3202 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3203 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3204 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3205 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3206 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3207
3208 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3209 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3210 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3211 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3212 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3213
3214 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3215 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3216 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3217 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3218
3219 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3220 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3221 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3222 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3223
3224 /* Program swing deemph */
3225 for (i = 0; i < 4; i++) {
3226 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3227 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3228 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3229 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3230 }
3231
3232 /* Program swing margin */
3233 for (i = 0; i < 4; i++) {
3234 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3235 val &= ~DPIO_SWING_MARGIN000_MASK;
3236 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3237 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3238 }
3239
3240 /* Disable unique transition scale */
3241 for (i = 0; i < 4; i++) {
3242 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3243 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3244 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3245 }
3246
3247 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3248 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3249 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3250 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3251
3252 /*
3253 * The document said it needs to set bit 27 for ch0 and bit 26
3254 * for ch1. Might be a typo in the doc.
3255 * For now, for this unique transition scale selection, set bit
3256 * 27 for ch0 and ch1.
3257 */
3258 for (i = 0; i < 4; i++) {
3259 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3260 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3261 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3262 }
3263
3264 for (i = 0; i < 4; i++) {
3265 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3266 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3267 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3268 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3269 }
3270 }
3271
3272 /* Start swing calculation */
3273 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3274 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3275 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3276
3277 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3278 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3279 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3280
3281 /* LRC Bypass */
3282 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3283 val |= DPIO_LRC_BYPASS;
3284 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3285
3286 mutex_unlock(&dev_priv->sb_lock);
3287
3288 return 0;
3289 }
3290
3291 static void
3292 intel_get_adjust_train(struct intel_dp *intel_dp,
3293 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3294 {
3295 uint8_t v = 0;
3296 uint8_t p = 0;
3297 int lane;
3298 uint8_t voltage_max;
3299 uint8_t preemph_max;
3300
3301 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3302 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3303 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3304
3305 if (this_v > v)
3306 v = this_v;
3307 if (this_p > p)
3308 p = this_p;
3309 }
3310
3311 voltage_max = intel_dp_voltage_max(intel_dp);
3312 if (v >= voltage_max)
3313 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3314
3315 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3316 if (p >= preemph_max)
3317 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3318
3319 for (lane = 0; lane < 4; lane++)
3320 intel_dp->train_set[lane] = v | p;
3321 }
3322
3323 static uint32_t
3324 gen4_signal_levels(uint8_t train_set)
3325 {
3326 uint32_t signal_levels = 0;
3327
3328 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3329 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3330 default:
3331 signal_levels |= DP_VOLTAGE_0_4;
3332 break;
3333 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3334 signal_levels |= DP_VOLTAGE_0_6;
3335 break;
3336 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3337 signal_levels |= DP_VOLTAGE_0_8;
3338 break;
3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3340 signal_levels |= DP_VOLTAGE_1_2;
3341 break;
3342 }
3343 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3344 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3345 default:
3346 signal_levels |= DP_PRE_EMPHASIS_0;
3347 break;
3348 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3349 signal_levels |= DP_PRE_EMPHASIS_3_5;
3350 break;
3351 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3352 signal_levels |= DP_PRE_EMPHASIS_6;
3353 break;
3354 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3355 signal_levels |= DP_PRE_EMPHASIS_9_5;
3356 break;
3357 }
3358 return signal_levels;
3359 }
3360
3361 /* Gen6's DP voltage swing and pre-emphasis control */
3362 static uint32_t
3363 gen6_edp_signal_levels(uint8_t train_set)
3364 {
3365 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3366 DP_TRAIN_PRE_EMPHASIS_MASK);
3367 switch (signal_levels) {
3368 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3369 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3370 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3372 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3375 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3376 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3377 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3378 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3379 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3380 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3381 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3382 default:
3383 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3384 "0x%x\n", signal_levels);
3385 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3386 }
3387 }
3388
3389 /* Gen7's DP voltage swing and pre-emphasis control */
3390 static uint32_t
3391 gen7_edp_signal_levels(uint8_t train_set)
3392 {
3393 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3394 DP_TRAIN_PRE_EMPHASIS_MASK);
3395 switch (signal_levels) {
3396 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3397 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3398 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3399 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3400 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3401 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3402
3403 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3404 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3405 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3406 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3407
3408 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3409 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3410 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3411 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3412
3413 default:
3414 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3415 "0x%x\n", signal_levels);
3416 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3417 }
3418 }
3419
3420 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3421 static uint32_t
3422 hsw_signal_levels(uint8_t train_set)
3423 {
3424 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3425 DP_TRAIN_PRE_EMPHASIS_MASK);
3426 switch (signal_levels) {
3427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3428 return DDI_BUF_TRANS_SELECT(0);
3429 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3430 return DDI_BUF_TRANS_SELECT(1);
3431 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3432 return DDI_BUF_TRANS_SELECT(2);
3433 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3434 return DDI_BUF_TRANS_SELECT(3);
3435
3436 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3437 return DDI_BUF_TRANS_SELECT(4);
3438 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3439 return DDI_BUF_TRANS_SELECT(5);
3440 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3441 return DDI_BUF_TRANS_SELECT(6);
3442
3443 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3444 return DDI_BUF_TRANS_SELECT(7);
3445 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3446 return DDI_BUF_TRANS_SELECT(8);
3447
3448 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3449 return DDI_BUF_TRANS_SELECT(9);
3450 default:
3451 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3452 "0x%x\n", signal_levels);
3453 return DDI_BUF_TRANS_SELECT(0);
3454 }
3455 }
3456
3457 static void bxt_signal_levels(struct intel_dp *intel_dp)
3458 {
3459 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3460 enum port port = dport->port;
3461 struct drm_device *dev = dport->base.base.dev;
3462 struct intel_encoder *encoder = &dport->base;
3463 uint8_t train_set = intel_dp->train_set[0];
3464 uint32_t level = 0;
3465
3466 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3467 DP_TRAIN_PRE_EMPHASIS_MASK);
3468 switch (signal_levels) {
3469 default:
3470 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3471 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3472 level = 0;
3473 break;
3474 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3475 level = 1;
3476 break;
3477 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3478 level = 2;
3479 break;
3480 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3481 level = 3;
3482 break;
3483 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3484 level = 4;
3485 break;
3486 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3487 level = 5;
3488 break;
3489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3490 level = 6;
3491 break;
3492 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3493 level = 7;
3494 break;
3495 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3496 level = 8;
3497 break;
3498 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3499 level = 9;
3500 break;
3501 }
3502
3503 bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3504 }
3505
3506 /* Properly updates "DP" with the correct signal levels. */
3507 static void
3508 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3509 {
3510 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3511 enum port port = intel_dig_port->port;
3512 struct drm_device *dev = intel_dig_port->base.base.dev;
3513 uint32_t signal_levels, mask;
3514 uint8_t train_set = intel_dp->train_set[0];
3515
3516 if (IS_BROXTON(dev)) {
3517 signal_levels = 0;
3518 bxt_signal_levels(intel_dp);
3519 mask = 0;
3520 } else if (HAS_DDI(dev)) {
3521 signal_levels = hsw_signal_levels(train_set);
3522 mask = DDI_BUF_EMP_MASK;
3523 } else if (IS_CHERRYVIEW(dev)) {
3524 signal_levels = chv_signal_levels(intel_dp);
3525 mask = 0;
3526 } else if (IS_VALLEYVIEW(dev)) {
3527 signal_levels = vlv_signal_levels(intel_dp);
3528 mask = 0;
3529 } else if (IS_GEN7(dev) && port == PORT_A) {
3530 signal_levels = gen7_edp_signal_levels(train_set);
3531 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3532 } else if (IS_GEN6(dev) && port == PORT_A) {
3533 signal_levels = gen6_edp_signal_levels(train_set);
3534 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3535 } else {
3536 signal_levels = gen4_signal_levels(train_set);
3537 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3538 }
3539
3540 if (mask)
3541 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3542
3543 DRM_DEBUG_KMS("Using vswing level %d\n",
3544 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3545 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3546 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3547 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3548
3549 *DP = (*DP & ~mask) | signal_levels;
3550 }
3551
3552 static bool
3553 intel_dp_set_link_train(struct intel_dp *intel_dp,
3554 uint32_t *DP,
3555 uint8_t dp_train_pat)
3556 {
3557 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3558 struct drm_device *dev = intel_dig_port->base.base.dev;
3559 struct drm_i915_private *dev_priv = dev->dev_private;
3560 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3561 int ret, len;
3562
3563 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3564
3565 I915_WRITE(intel_dp->output_reg, *DP);
3566 POSTING_READ(intel_dp->output_reg);
3567
3568 buf[0] = dp_train_pat;
3569 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3570 DP_TRAINING_PATTERN_DISABLE) {
3571 /* don't write DP_TRAINING_LANEx_SET on disable */
3572 len = 1;
3573 } else {
3574 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3575 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3576 len = intel_dp->lane_count + 1;
3577 }
3578
3579 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3580 buf, len);
3581
3582 return ret == len;
3583 }
3584
3585 static bool
3586 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3587 uint8_t dp_train_pat)
3588 {
3589 if (!intel_dp->train_set_valid)
3590 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3591 intel_dp_set_signal_levels(intel_dp, DP);
3592 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3593 }
3594
3595 static bool
3596 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3597 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3598 {
3599 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3600 struct drm_device *dev = intel_dig_port->base.base.dev;
3601 struct drm_i915_private *dev_priv = dev->dev_private;
3602 int ret;
3603
3604 intel_get_adjust_train(intel_dp, link_status);
3605 intel_dp_set_signal_levels(intel_dp, DP);
3606
3607 I915_WRITE(intel_dp->output_reg, *DP);
3608 POSTING_READ(intel_dp->output_reg);
3609
3610 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3611 intel_dp->train_set, intel_dp->lane_count);
3612
3613 return ret == intel_dp->lane_count;
3614 }
3615
3616 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3617 {
3618 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3619 struct drm_device *dev = intel_dig_port->base.base.dev;
3620 struct drm_i915_private *dev_priv = dev->dev_private;
3621 enum port port = intel_dig_port->port;
3622 uint32_t val;
3623
3624 if (!HAS_DDI(dev))
3625 return;
3626
3627 val = I915_READ(DP_TP_CTL(port));
3628 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3629 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3630 I915_WRITE(DP_TP_CTL(port), val);
3631
3632 /*
3633 * On PORT_A we can have only eDP in SST mode. There the only reason
3634 * we need to set idle transmission mode is to work around a HW issue
3635 * where we enable the pipe while not in idle link-training mode.
3636 * In this case there is requirement to wait for a minimum number of
3637 * idle patterns to be sent.
3638 */
3639 if (port == PORT_A)
3640 return;
3641
3642 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3643 1))
3644 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3645 }
3646
3647 /* Enable corresponding port and start training pattern 1 */
3648 void
3649 intel_dp_start_link_train(struct intel_dp *intel_dp)
3650 {
3651 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3652 struct drm_device *dev = encoder->dev;
3653 int i;
3654 uint8_t voltage;
3655 int voltage_tries, loop_tries;
3656 uint32_t DP = intel_dp->DP;
3657 uint8_t link_config[2];
3658
3659 if (HAS_DDI(dev))
3660 intel_ddi_prepare_link_retrain(encoder);
3661
3662 /* Write the link configuration data */
3663 link_config[0] = intel_dp->link_bw;
3664 link_config[1] = intel_dp->lane_count;
3665 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3666 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3667 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3668 if (intel_dp->num_sink_rates)
3669 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3670 &intel_dp->rate_select, 1);
3671
3672 link_config[0] = 0;
3673 link_config[1] = DP_SET_ANSI_8B10B;
3674 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3675
3676 DP |= DP_PORT_EN;
3677
3678 /* clock recovery */
3679 if (!intel_dp_reset_link_train(intel_dp, &DP,
3680 DP_TRAINING_PATTERN_1 |
3681 DP_LINK_SCRAMBLING_DISABLE)) {
3682 DRM_ERROR("failed to enable link training\n");
3683 return;
3684 }
3685
3686 voltage = 0xff;
3687 voltage_tries = 0;
3688 loop_tries = 0;
3689 for (;;) {
3690 uint8_t link_status[DP_LINK_STATUS_SIZE];
3691
3692 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3693 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3694 DRM_ERROR("failed to get link status\n");
3695 break;
3696 }
3697
3698 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3699 DRM_DEBUG_KMS("clock recovery OK\n");
3700 break;
3701 }
3702
3703 /*
3704 * if we used previously trained voltage and pre-emphasis values
3705 * and we don't get clock recovery, reset link training values
3706 */
3707 if (intel_dp->train_set_valid) {
3708 DRM_DEBUG_KMS("clock recovery not ok, reset");
3709 /* clear the flag as we are not reusing train set */
3710 intel_dp->train_set_valid = false;
3711 if (!intel_dp_reset_link_train(intel_dp, &DP,
3712 DP_TRAINING_PATTERN_1 |
3713 DP_LINK_SCRAMBLING_DISABLE)) {
3714 DRM_ERROR("failed to enable link training\n");
3715 return;
3716 }
3717 continue;
3718 }
3719
3720 /* Check to see if we've tried the max voltage */
3721 for (i = 0; i < intel_dp->lane_count; i++)
3722 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3723 break;
3724 if (i == intel_dp->lane_count) {
3725 ++loop_tries;
3726 if (loop_tries == 5) {
3727 DRM_ERROR("too many full retries, give up\n");
3728 break;
3729 }
3730 intel_dp_reset_link_train(intel_dp, &DP,
3731 DP_TRAINING_PATTERN_1 |
3732 DP_LINK_SCRAMBLING_DISABLE);
3733 voltage_tries = 0;
3734 continue;
3735 }
3736
3737 /* Check to see if we've tried the same voltage 5 times */
3738 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3739 ++voltage_tries;
3740 if (voltage_tries == 5) {
3741 DRM_ERROR("too many voltage retries, give up\n");
3742 break;
3743 }
3744 } else
3745 voltage_tries = 0;
3746 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3747
3748 /* Update training set as requested by target */
3749 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3750 DRM_ERROR("failed to update link training\n");
3751 break;
3752 }
3753 }
3754
3755 intel_dp->DP = DP;
3756 }
3757
3758 void
3759 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3760 {
3761 bool channel_eq = false;
3762 int tries, cr_tries;
3763 uint32_t DP = intel_dp->DP;
3764 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3765
3766 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3767 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3768 training_pattern = DP_TRAINING_PATTERN_3;
3769
3770 /* channel equalization */
3771 if (!intel_dp_set_link_train(intel_dp, &DP,
3772 training_pattern |
3773 DP_LINK_SCRAMBLING_DISABLE)) {
3774 DRM_ERROR("failed to start channel equalization\n");
3775 return;
3776 }
3777
3778 tries = 0;
3779 cr_tries = 0;
3780 channel_eq = false;
3781 for (;;) {
3782 uint8_t link_status[DP_LINK_STATUS_SIZE];
3783
3784 if (cr_tries > 5) {
3785 DRM_ERROR("failed to train DP, aborting\n");
3786 break;
3787 }
3788
3789 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3790 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3791 DRM_ERROR("failed to get link status\n");
3792 break;
3793 }
3794
3795 /* Make sure clock is still ok */
3796 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3797 intel_dp->train_set_valid = false;
3798 intel_dp_start_link_train(intel_dp);
3799 intel_dp_set_link_train(intel_dp, &DP,
3800 training_pattern |
3801 DP_LINK_SCRAMBLING_DISABLE);
3802 cr_tries++;
3803 continue;
3804 }
3805
3806 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3807 channel_eq = true;
3808 break;
3809 }
3810
3811 /* Try 5 times, then try clock recovery if that fails */
3812 if (tries > 5) {
3813 intel_dp->train_set_valid = false;
3814 intel_dp_start_link_train(intel_dp);
3815 intel_dp_set_link_train(intel_dp, &DP,
3816 training_pattern |
3817 DP_LINK_SCRAMBLING_DISABLE);
3818 tries = 0;
3819 cr_tries++;
3820 continue;
3821 }
3822
3823 /* Update training set as requested by target */
3824 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3825 DRM_ERROR("failed to update link training\n");
3826 break;
3827 }
3828 ++tries;
3829 }
3830
3831 intel_dp_set_idle_link_train(intel_dp);
3832
3833 intel_dp->DP = DP;
3834
3835 if (channel_eq) {
3836 intel_dp->train_set_valid = true;
3837 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3838 }
3839 }
3840
3841 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3842 {
3843 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3844 DP_TRAINING_PATTERN_DISABLE);
3845 }
3846
3847 static void
3848 intel_dp_link_down(struct intel_dp *intel_dp)
3849 {
3850 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3851 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3852 enum port port = intel_dig_port->port;
3853 struct drm_device *dev = intel_dig_port->base.base.dev;
3854 struct drm_i915_private *dev_priv = dev->dev_private;
3855 uint32_t DP = intel_dp->DP;
3856
3857 if (WARN_ON(HAS_DDI(dev)))
3858 return;
3859
3860 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3861 return;
3862
3863 DRM_DEBUG_KMS("\n");
3864
3865 if ((IS_GEN7(dev) && port == PORT_A) ||
3866 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3867 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3868 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3869 } else {
3870 if (IS_CHERRYVIEW(dev))
3871 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3872 else
3873 DP &= ~DP_LINK_TRAIN_MASK;
3874 DP |= DP_LINK_TRAIN_PAT_IDLE;
3875 }
3876 I915_WRITE(intel_dp->output_reg, DP);
3877 POSTING_READ(intel_dp->output_reg);
3878
3879 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3880 I915_WRITE(intel_dp->output_reg, DP);
3881 POSTING_READ(intel_dp->output_reg);
3882
3883 /*
3884 * HW workaround for IBX, we need to move the port
3885 * to transcoder A after disabling it to allow the
3886 * matching HDMI port to be enabled on transcoder A.
3887 */
3888 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3889 /* always enable with pattern 1 (as per spec) */
3890 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3891 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3892 I915_WRITE(intel_dp->output_reg, DP);
3893 POSTING_READ(intel_dp->output_reg);
3894
3895 DP &= ~DP_PORT_EN;
3896 I915_WRITE(intel_dp->output_reg, DP);
3897 POSTING_READ(intel_dp->output_reg);
3898 }
3899
3900 msleep(intel_dp->panel_power_down_delay);
3901 }
3902
3903 static bool
3904 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3905 {
3906 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3907 struct drm_device *dev = dig_port->base.base.dev;
3908 struct drm_i915_private *dev_priv = dev->dev_private;
3909 uint8_t rev;
3910
3911 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3912 sizeof(intel_dp->dpcd)) < 0)
3913 return false; /* aux transfer failed */
3914
3915 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3916
3917 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3918 return false; /* DPCD not present */
3919
3920 /* Check if the panel supports PSR */
3921 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3922 if (is_edp(intel_dp)) {
3923 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3924 intel_dp->psr_dpcd,
3925 sizeof(intel_dp->psr_dpcd));
3926 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3927 dev_priv->psr.sink_support = true;
3928 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3929 }
3930
3931 if (INTEL_INFO(dev)->gen >= 9 &&
3932 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3933 uint8_t frame_sync_cap;
3934
3935 dev_priv->psr.sink_support = true;
3936 intel_dp_dpcd_read_wake(&intel_dp->aux,
3937 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3938 &frame_sync_cap, 1);
3939 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3940 /* PSR2 needs frame sync as well */
3941 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3942 DRM_DEBUG_KMS("PSR2 %s on sink",
3943 dev_priv->psr.psr2_support ? "supported" : "not supported");
3944 }
3945 }
3946
3947 /* Training Pattern 3 support, both source and sink */
3948 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3949 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3950 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3951 intel_dp->use_tps3 = true;
3952 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3953 } else
3954 intel_dp->use_tps3 = false;
3955
3956 /* Intermediate frequency support */
3957 if (is_edp(intel_dp) &&
3958 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3959 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3960 (rev >= 0x03)) { /* eDp v1.4 or higher */
3961 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3962 int i;
3963
3964 intel_dp_dpcd_read_wake(&intel_dp->aux,
3965 DP_SUPPORTED_LINK_RATES,
3966 sink_rates,
3967 sizeof(sink_rates));
3968
3969 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3970 int val = le16_to_cpu(sink_rates[i]);
3971
3972 if (val == 0)
3973 break;
3974
3975 /* Value read is in kHz while drm clock is saved in deca-kHz */
3976 intel_dp->sink_rates[i] = (val * 200) / 10;
3977 }
3978 intel_dp->num_sink_rates = i;
3979 }
3980
3981 intel_dp_print_rates(intel_dp);
3982
3983 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3984 DP_DWN_STRM_PORT_PRESENT))
3985 return true; /* native DP sink */
3986
3987 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3988 return true; /* no per-port downstream info */
3989
3990 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3991 intel_dp->downstream_ports,
3992 DP_MAX_DOWNSTREAM_PORTS) < 0)
3993 return false; /* downstream port status fetch failed */
3994
3995 return true;
3996 }
3997
3998 static void
3999 intel_dp_probe_oui(struct intel_dp *intel_dp)
4000 {
4001 u8 buf[3];
4002
4003 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4004 return;
4005
4006 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4007 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4008 buf[0], buf[1], buf[2]);
4009
4010 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4011 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4012 buf[0], buf[1], buf[2]);
4013 }
4014
4015 static bool
4016 intel_dp_probe_mst(struct intel_dp *intel_dp)
4017 {
4018 u8 buf[1];
4019
4020 if (!intel_dp->can_mst)
4021 return false;
4022
4023 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4024 return false;
4025
4026 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4027 if (buf[0] & DP_MST_CAP) {
4028 DRM_DEBUG_KMS("Sink is MST capable\n");
4029 intel_dp->is_mst = true;
4030 } else {
4031 DRM_DEBUG_KMS("Sink is not MST capable\n");
4032 intel_dp->is_mst = false;
4033 }
4034 }
4035
4036 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4037 return intel_dp->is_mst;
4038 }
4039
4040 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4041 {
4042 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4043 struct drm_device *dev = intel_dig_port->base.base.dev;
4044 struct intel_crtc *intel_crtc =
4045 to_intel_crtc(intel_dig_port->base.base.crtc);
4046 u8 buf;
4047 int test_crc_count;
4048 int attempts = 6;
4049 int ret = 0;
4050
4051 hsw_disable_ips(intel_crtc);
4052
4053 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4054 ret = -EIO;
4055 goto out;
4056 }
4057
4058 if (!(buf & DP_TEST_CRC_SUPPORTED)) {
4059 ret = -ENOTTY;
4060 goto out;
4061 }
4062
4063 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4064 ret = -EIO;
4065 goto out;
4066 }
4067
4068 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4069 buf | DP_TEST_SINK_START) < 0) {
4070 ret = -EIO;
4071 goto out;
4072 }
4073
4074 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4075 ret = -EIO;
4076 goto out;
4077 }
4078
4079 test_crc_count = buf & DP_TEST_COUNT_MASK;
4080
4081 do {
4082 if (drm_dp_dpcd_readb(&intel_dp->aux,
4083 DP_TEST_SINK_MISC, &buf) < 0) {
4084 ret = -EIO;
4085 goto out;
4086 }
4087 intel_wait_for_vblank(dev, intel_crtc->pipe);
4088 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4089
4090 if (attempts == 0) {
4091 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4092 ret = -ETIMEDOUT;
4093 goto out;
4094 }
4095
4096 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4097 ret = -EIO;
4098 goto out;
4099 }
4100
4101 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4102 ret = -EIO;
4103 goto out;
4104 }
4105 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4106 buf & ~DP_TEST_SINK_START) < 0) {
4107 ret = -EIO;
4108 goto out;
4109 }
4110 out:
4111 hsw_enable_ips(intel_crtc);
4112 return ret;
4113 }
4114
4115 static bool
4116 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4117 {
4118 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4119 DP_DEVICE_SERVICE_IRQ_VECTOR,
4120 sink_irq_vector, 1) == 1;
4121 }
4122
4123 static bool
4124 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4125 {
4126 int ret;
4127
4128 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4129 DP_SINK_COUNT_ESI,
4130 sink_irq_vector, 14);
4131 if (ret != 14)
4132 return false;
4133
4134 return true;
4135 }
4136
4137 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4138 {
4139 uint8_t test_result = DP_TEST_ACK;
4140 return test_result;
4141 }
4142
4143 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4144 {
4145 uint8_t test_result = DP_TEST_NAK;
4146 return test_result;
4147 }
4148
4149 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4150 {
4151 uint8_t test_result = DP_TEST_NAK;
4152 struct intel_connector *intel_connector = intel_dp->attached_connector;
4153 struct drm_connector *connector = &intel_connector->base;
4154
4155 if (intel_connector->detect_edid == NULL ||
4156 connector->edid_corrupt ||
4157 intel_dp->aux.i2c_defer_count > 6) {
4158 /* Check EDID read for NACKs, DEFERs and corruption
4159 * (DP CTS 1.2 Core r1.1)
4160 * 4.2.2.4 : Failed EDID read, I2C_NAK
4161 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4162 * 4.2.2.6 : EDID corruption detected
4163 * Use failsafe mode for all cases
4164 */
4165 if (intel_dp->aux.i2c_nack_count > 0 ||
4166 intel_dp->aux.i2c_defer_count > 0)
4167 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4168 intel_dp->aux.i2c_nack_count,
4169 intel_dp->aux.i2c_defer_count);
4170 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4171 } else {
4172 if (!drm_dp_dpcd_write(&intel_dp->aux,
4173 DP_TEST_EDID_CHECKSUM,
4174 &intel_connector->detect_edid->checksum,
4175 1))
4176 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4177
4178 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4179 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4180 }
4181
4182 /* Set test active flag here so userspace doesn't interrupt things */
4183 intel_dp->compliance_test_active = 1;
4184
4185 return test_result;
4186 }
4187
4188 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4189 {
4190 uint8_t test_result = DP_TEST_NAK;
4191 return test_result;
4192 }
4193
4194 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4195 {
4196 uint8_t response = DP_TEST_NAK;
4197 uint8_t rxdata = 0;
4198 int status = 0;
4199
4200 intel_dp->compliance_test_active = 0;
4201 intel_dp->compliance_test_type = 0;
4202 intel_dp->compliance_test_data = 0;
4203
4204 intel_dp->aux.i2c_nack_count = 0;
4205 intel_dp->aux.i2c_defer_count = 0;
4206
4207 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4208 if (status <= 0) {
4209 DRM_DEBUG_KMS("Could not read test request from sink\n");
4210 goto update_status;
4211 }
4212
4213 switch (rxdata) {
4214 case DP_TEST_LINK_TRAINING:
4215 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4216 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4217 response = intel_dp_autotest_link_training(intel_dp);
4218 break;
4219 case DP_TEST_LINK_VIDEO_PATTERN:
4220 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4221 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4222 response = intel_dp_autotest_video_pattern(intel_dp);
4223 break;
4224 case DP_TEST_LINK_EDID_READ:
4225 DRM_DEBUG_KMS("EDID test requested\n");
4226 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4227 response = intel_dp_autotest_edid(intel_dp);
4228 break;
4229 case DP_TEST_LINK_PHY_TEST_PATTERN:
4230 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4231 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4232 response = intel_dp_autotest_phy_pattern(intel_dp);
4233 break;
4234 default:
4235 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4236 break;
4237 }
4238
4239 update_status:
4240 status = drm_dp_dpcd_write(&intel_dp->aux,
4241 DP_TEST_RESPONSE,
4242 &response, 1);
4243 if (status <= 0)
4244 DRM_DEBUG_KMS("Could not write test response to sink\n");
4245 }
4246
4247 static int
4248 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4249 {
4250 bool bret;
4251
4252 if (intel_dp->is_mst) {
4253 u8 esi[16] = { 0 };
4254 int ret = 0;
4255 int retry;
4256 bool handled;
4257 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4258 go_again:
4259 if (bret == true) {
4260
4261 /* check link status - esi[10] = 0x200c */
4262 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4263 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4264 intel_dp_start_link_train(intel_dp);
4265 intel_dp_complete_link_train(intel_dp);
4266 intel_dp_stop_link_train(intel_dp);
4267 }
4268
4269 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4270 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4271
4272 if (handled) {
4273 for (retry = 0; retry < 3; retry++) {
4274 int wret;
4275 wret = drm_dp_dpcd_write(&intel_dp->aux,
4276 DP_SINK_COUNT_ESI+1,
4277 &esi[1], 3);
4278 if (wret == 3) {
4279 break;
4280 }
4281 }
4282
4283 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4284 if (bret == true) {
4285 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4286 goto go_again;
4287 }
4288 } else
4289 ret = 0;
4290
4291 return ret;
4292 } else {
4293 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4294 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4295 intel_dp->is_mst = false;
4296 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4297 /* send a hotplug event */
4298 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4299 }
4300 }
4301 return -EINVAL;
4302 }
4303
4304 /*
4305 * According to DP spec
4306 * 5.1.2:
4307 * 1. Read DPCD
4308 * 2. Configure link according to Receiver Capabilities
4309 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4310 * 4. Check link status on receipt of hot-plug interrupt
4311 */
4312 static void
4313 intel_dp_check_link_status(struct intel_dp *intel_dp)
4314 {
4315 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4316 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4317 u8 sink_irq_vector;
4318 u8 link_status[DP_LINK_STATUS_SIZE];
4319
4320 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4321
4322 if (!intel_encoder->connectors_active)
4323 return;
4324
4325 if (WARN_ON(!intel_encoder->base.crtc))
4326 return;
4327
4328 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4329 return;
4330
4331 /* Try to read receiver status if the link appears to be up */
4332 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4333 return;
4334 }
4335
4336 /* Now read the DPCD to see if it's actually running */
4337 if (!intel_dp_get_dpcd(intel_dp)) {
4338 return;
4339 }
4340
4341 /* Try to read the source of the interrupt */
4342 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4343 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4344 /* Clear interrupt source */
4345 drm_dp_dpcd_writeb(&intel_dp->aux,
4346 DP_DEVICE_SERVICE_IRQ_VECTOR,
4347 sink_irq_vector);
4348
4349 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4350 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4351 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4352 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4353 }
4354
4355 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4356 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4357 intel_encoder->base.name);
4358 intel_dp_start_link_train(intel_dp);
4359 intel_dp_complete_link_train(intel_dp);
4360 intel_dp_stop_link_train(intel_dp);
4361 }
4362 }
4363
4364 /* XXX this is probably wrong for multiple downstream ports */
4365 static enum drm_connector_status
4366 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4367 {
4368 uint8_t *dpcd = intel_dp->dpcd;
4369 uint8_t type;
4370
4371 if (!intel_dp_get_dpcd(intel_dp))
4372 return connector_status_disconnected;
4373
4374 /* if there's no downstream port, we're done */
4375 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4376 return connector_status_connected;
4377
4378 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4379 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4380 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4381 uint8_t reg;
4382
4383 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4384 &reg, 1) < 0)
4385 return connector_status_unknown;
4386
4387 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4388 : connector_status_disconnected;
4389 }
4390
4391 /* If no HPD, poke DDC gently */
4392 if (drm_probe_ddc(&intel_dp->aux.ddc))
4393 return connector_status_connected;
4394
4395 /* Well we tried, say unknown for unreliable port types */
4396 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4397 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4398 if (type == DP_DS_PORT_TYPE_VGA ||
4399 type == DP_DS_PORT_TYPE_NON_EDID)
4400 return connector_status_unknown;
4401 } else {
4402 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4403 DP_DWN_STRM_PORT_TYPE_MASK;
4404 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4405 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4406 return connector_status_unknown;
4407 }
4408
4409 /* Anything else is out of spec, warn and ignore */
4410 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4411 return connector_status_disconnected;
4412 }
4413
4414 static enum drm_connector_status
4415 edp_detect(struct intel_dp *intel_dp)
4416 {
4417 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4418 enum drm_connector_status status;
4419
4420 status = intel_panel_detect(dev);
4421 if (status == connector_status_unknown)
4422 status = connector_status_connected;
4423
4424 return status;
4425 }
4426
4427 static enum drm_connector_status
4428 ironlake_dp_detect(struct intel_dp *intel_dp)
4429 {
4430 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4431 struct drm_i915_private *dev_priv = dev->dev_private;
4432 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4433
4434 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4435 return connector_status_disconnected;
4436
4437 return intel_dp_detect_dpcd(intel_dp);
4438 }
4439
4440 static int g4x_digital_port_connected(struct drm_device *dev,
4441 struct intel_digital_port *intel_dig_port)
4442 {
4443 struct drm_i915_private *dev_priv = dev->dev_private;
4444 uint32_t bit;
4445
4446 if (IS_VALLEYVIEW(dev)) {
4447 switch (intel_dig_port->port) {
4448 case PORT_B:
4449 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4450 break;
4451 case PORT_C:
4452 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4453 break;
4454 case PORT_D:
4455 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4456 break;
4457 default:
4458 return -EINVAL;
4459 }
4460 } else {
4461 switch (intel_dig_port->port) {
4462 case PORT_B:
4463 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4464 break;
4465 case PORT_C:
4466 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4467 break;
4468 case PORT_D:
4469 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4470 break;
4471 default:
4472 return -EINVAL;
4473 }
4474 }
4475
4476 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4477 return 0;
4478 return 1;
4479 }
4480
4481 static enum drm_connector_status
4482 g4x_dp_detect(struct intel_dp *intel_dp)
4483 {
4484 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4485 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4486 int ret;
4487
4488 /* Can't disconnect eDP, but you can close the lid... */
4489 if (is_edp(intel_dp)) {
4490 enum drm_connector_status status;
4491
4492 status = intel_panel_detect(dev);
4493 if (status == connector_status_unknown)
4494 status = connector_status_connected;
4495 return status;
4496 }
4497
4498 ret = g4x_digital_port_connected(dev, intel_dig_port);
4499 if (ret == -EINVAL)
4500 return connector_status_unknown;
4501 else if (ret == 0)
4502 return connector_status_disconnected;
4503
4504 return intel_dp_detect_dpcd(intel_dp);
4505 }
4506
4507 static struct edid *
4508 intel_dp_get_edid(struct intel_dp *intel_dp)
4509 {
4510 struct intel_connector *intel_connector = intel_dp->attached_connector;
4511
4512 /* use cached edid if we have one */
4513 if (intel_connector->edid) {
4514 /* invalid edid */
4515 if (IS_ERR(intel_connector->edid))
4516 return NULL;
4517
4518 return drm_edid_duplicate(intel_connector->edid);
4519 } else
4520 return drm_get_edid(&intel_connector->base,
4521 &intel_dp->aux.ddc);
4522 }
4523
4524 static void
4525 intel_dp_set_edid(struct intel_dp *intel_dp)
4526 {
4527 struct intel_connector *intel_connector = intel_dp->attached_connector;
4528 struct edid *edid;
4529
4530 edid = intel_dp_get_edid(intel_dp);
4531 intel_connector->detect_edid = edid;
4532
4533 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4534 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4535 else
4536 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4537 }
4538
4539 static void
4540 intel_dp_unset_edid(struct intel_dp *intel_dp)
4541 {
4542 struct intel_connector *intel_connector = intel_dp->attached_connector;
4543
4544 kfree(intel_connector->detect_edid);
4545 intel_connector->detect_edid = NULL;
4546
4547 intel_dp->has_audio = false;
4548 }
4549
4550 static enum intel_display_power_domain
4551 intel_dp_power_get(struct intel_dp *dp)
4552 {
4553 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4554 enum intel_display_power_domain power_domain;
4555
4556 power_domain = intel_display_port_power_domain(encoder);
4557 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4558
4559 return power_domain;
4560 }
4561
4562 static void
4563 intel_dp_power_put(struct intel_dp *dp,
4564 enum intel_display_power_domain power_domain)
4565 {
4566 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4567 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4568 }
4569
4570 static enum drm_connector_status
4571 intel_dp_detect(struct drm_connector *connector, bool force)
4572 {
4573 struct intel_dp *intel_dp = intel_attached_dp(connector);
4574 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4575 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4576 struct drm_device *dev = connector->dev;
4577 enum drm_connector_status status;
4578 enum intel_display_power_domain power_domain;
4579 bool ret;
4580 u8 sink_irq_vector;
4581
4582 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4583 connector->base.id, connector->name);
4584 intel_dp_unset_edid(intel_dp);
4585
4586 if (intel_dp->is_mst) {
4587 /* MST devices are disconnected from a monitor POV */
4588 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4589 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4590 return connector_status_disconnected;
4591 }
4592
4593 power_domain = intel_dp_power_get(intel_dp);
4594
4595 /* Can't disconnect eDP, but you can close the lid... */
4596 if (is_edp(intel_dp))
4597 status = edp_detect(intel_dp);
4598 else if (HAS_PCH_SPLIT(dev))
4599 status = ironlake_dp_detect(intel_dp);
4600 else
4601 status = g4x_dp_detect(intel_dp);
4602 if (status != connector_status_connected)
4603 goto out;
4604
4605 intel_dp_probe_oui(intel_dp);
4606
4607 ret = intel_dp_probe_mst(intel_dp);
4608 if (ret) {
4609 /* if we are in MST mode then this connector
4610 won't appear connected or have anything with EDID on it */
4611 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4612 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4613 status = connector_status_disconnected;
4614 goto out;
4615 }
4616
4617 intel_dp_set_edid(intel_dp);
4618
4619 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4620 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4621 status = connector_status_connected;
4622
4623 /* Try to read the source of the interrupt */
4624 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4625 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4626 /* Clear interrupt source */
4627 drm_dp_dpcd_writeb(&intel_dp->aux,
4628 DP_DEVICE_SERVICE_IRQ_VECTOR,
4629 sink_irq_vector);
4630
4631 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4632 intel_dp_handle_test_request(intel_dp);
4633 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4634 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4635 }
4636
4637 out:
4638 intel_dp_power_put(intel_dp, power_domain);
4639 return status;
4640 }
4641
4642 static void
4643 intel_dp_force(struct drm_connector *connector)
4644 {
4645 struct intel_dp *intel_dp = intel_attached_dp(connector);
4646 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4647 enum intel_display_power_domain power_domain;
4648
4649 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4650 connector->base.id, connector->name);
4651 intel_dp_unset_edid(intel_dp);
4652
4653 if (connector->status != connector_status_connected)
4654 return;
4655
4656 power_domain = intel_dp_power_get(intel_dp);
4657
4658 intel_dp_set_edid(intel_dp);
4659
4660 intel_dp_power_put(intel_dp, power_domain);
4661
4662 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4663 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4664 }
4665
4666 static int intel_dp_get_modes(struct drm_connector *connector)
4667 {
4668 struct intel_connector *intel_connector = to_intel_connector(connector);
4669 struct edid *edid;
4670
4671 edid = intel_connector->detect_edid;
4672 if (edid) {
4673 int ret = intel_connector_update_modes(connector, edid);
4674 if (ret)
4675 return ret;
4676 }
4677
4678 /* if eDP has no EDID, fall back to fixed mode */
4679 if (is_edp(intel_attached_dp(connector)) &&
4680 intel_connector->panel.fixed_mode) {
4681 struct drm_display_mode *mode;
4682
4683 mode = drm_mode_duplicate(connector->dev,
4684 intel_connector->panel.fixed_mode);
4685 if (mode) {
4686 drm_mode_probed_add(connector, mode);
4687 return 1;
4688 }
4689 }
4690
4691 return 0;
4692 }
4693
4694 static bool
4695 intel_dp_detect_audio(struct drm_connector *connector)
4696 {
4697 bool has_audio = false;
4698 struct edid *edid;
4699
4700 edid = to_intel_connector(connector)->detect_edid;
4701 if (edid)
4702 has_audio = drm_detect_monitor_audio(edid);
4703
4704 return has_audio;
4705 }
4706
4707 static int
4708 intel_dp_set_property(struct drm_connector *connector,
4709 struct drm_property *property,
4710 uint64_t val)
4711 {
4712 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4713 struct intel_connector *intel_connector = to_intel_connector(connector);
4714 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4715 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4716 int ret;
4717
4718 ret = drm_object_property_set_value(&connector->base, property, val);
4719 if (ret)
4720 return ret;
4721
4722 if (property == dev_priv->force_audio_property) {
4723 int i = val;
4724 bool has_audio;
4725
4726 if (i == intel_dp->force_audio)
4727 return 0;
4728
4729 intel_dp->force_audio = i;
4730
4731 if (i == HDMI_AUDIO_AUTO)
4732 has_audio = intel_dp_detect_audio(connector);
4733 else
4734 has_audio = (i == HDMI_AUDIO_ON);
4735
4736 if (has_audio == intel_dp->has_audio)
4737 return 0;
4738
4739 intel_dp->has_audio = has_audio;
4740 goto done;
4741 }
4742
4743 if (property == dev_priv->broadcast_rgb_property) {
4744 bool old_auto = intel_dp->color_range_auto;
4745 uint32_t old_range = intel_dp->color_range;
4746
4747 switch (val) {
4748 case INTEL_BROADCAST_RGB_AUTO:
4749 intel_dp->color_range_auto = true;
4750 break;
4751 case INTEL_BROADCAST_RGB_FULL:
4752 intel_dp->color_range_auto = false;
4753 intel_dp->color_range = 0;
4754 break;
4755 case INTEL_BROADCAST_RGB_LIMITED:
4756 intel_dp->color_range_auto = false;
4757 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4758 break;
4759 default:
4760 return -EINVAL;
4761 }
4762
4763 if (old_auto == intel_dp->color_range_auto &&
4764 old_range == intel_dp->color_range)
4765 return 0;
4766
4767 goto done;
4768 }
4769
4770 if (is_edp(intel_dp) &&
4771 property == connector->dev->mode_config.scaling_mode_property) {
4772 if (val == DRM_MODE_SCALE_NONE) {
4773 DRM_DEBUG_KMS("no scaling not supported\n");
4774 return -EINVAL;
4775 }
4776
4777 if (intel_connector->panel.fitting_mode == val) {
4778 /* the eDP scaling property is not changed */
4779 return 0;
4780 }
4781 intel_connector->panel.fitting_mode = val;
4782
4783 goto done;
4784 }
4785
4786 return -EINVAL;
4787
4788 done:
4789 if (intel_encoder->base.crtc)
4790 intel_crtc_restore_mode(intel_encoder->base.crtc);
4791
4792 return 0;
4793 }
4794
4795 static void
4796 intel_dp_connector_destroy(struct drm_connector *connector)
4797 {
4798 struct intel_connector *intel_connector = to_intel_connector(connector);
4799
4800 kfree(intel_connector->detect_edid);
4801
4802 if (!IS_ERR_OR_NULL(intel_connector->edid))
4803 kfree(intel_connector->edid);
4804
4805 /* Can't call is_edp() since the encoder may have been destroyed
4806 * already. */
4807 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4808 intel_panel_fini(&intel_connector->panel);
4809
4810 drm_connector_cleanup(connector);
4811 kfree(connector);
4812 }
4813
4814 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4815 {
4816 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4817 struct intel_dp *intel_dp = &intel_dig_port->dp;
4818
4819 drm_dp_aux_unregister(&intel_dp->aux);
4820 intel_dp_mst_encoder_cleanup(intel_dig_port);
4821 if (is_edp(intel_dp)) {
4822 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4823 /*
4824 * vdd might still be enabled do to the delayed vdd off.
4825 * Make sure vdd is actually turned off here.
4826 */
4827 pps_lock(intel_dp);
4828 edp_panel_vdd_off_sync(intel_dp);
4829 pps_unlock(intel_dp);
4830
4831 if (intel_dp->edp_notifier.notifier_call) {
4832 unregister_reboot_notifier(&intel_dp->edp_notifier);
4833 intel_dp->edp_notifier.notifier_call = NULL;
4834 }
4835 }
4836 drm_encoder_cleanup(encoder);
4837 kfree(intel_dig_port);
4838 }
4839
4840 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4841 {
4842 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4843
4844 if (!is_edp(intel_dp))
4845 return;
4846
4847 /*
4848 * vdd might still be enabled do to the delayed vdd off.
4849 * Make sure vdd is actually turned off here.
4850 */
4851 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4852 pps_lock(intel_dp);
4853 edp_panel_vdd_off_sync(intel_dp);
4854 pps_unlock(intel_dp);
4855 }
4856
4857 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4858 {
4859 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4860 struct drm_device *dev = intel_dig_port->base.base.dev;
4861 struct drm_i915_private *dev_priv = dev->dev_private;
4862 enum intel_display_power_domain power_domain;
4863
4864 lockdep_assert_held(&dev_priv->pps_mutex);
4865
4866 if (!edp_have_panel_vdd(intel_dp))
4867 return;
4868
4869 /*
4870 * The VDD bit needs a power domain reference, so if the bit is
4871 * already enabled when we boot or resume, grab this reference and
4872 * schedule a vdd off, so we don't hold on to the reference
4873 * indefinitely.
4874 */
4875 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4876 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4877 intel_display_power_get(dev_priv, power_domain);
4878
4879 edp_panel_vdd_schedule_off(intel_dp);
4880 }
4881
4882 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4883 {
4884 struct intel_dp *intel_dp;
4885
4886 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4887 return;
4888
4889 intel_dp = enc_to_intel_dp(encoder);
4890
4891 pps_lock(intel_dp);
4892
4893 /*
4894 * Read out the current power sequencer assignment,
4895 * in case the BIOS did something with it.
4896 */
4897 if (IS_VALLEYVIEW(encoder->dev))
4898 vlv_initial_power_sequencer_setup(intel_dp);
4899
4900 intel_edp_panel_vdd_sanitize(intel_dp);
4901
4902 pps_unlock(intel_dp);
4903 }
4904
4905 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4906 .dpms = intel_connector_dpms,
4907 .detect = intel_dp_detect,
4908 .force = intel_dp_force,
4909 .fill_modes = drm_helper_probe_single_connector_modes,
4910 .set_property = intel_dp_set_property,
4911 .atomic_get_property = intel_connector_atomic_get_property,
4912 .destroy = intel_dp_connector_destroy,
4913 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4914 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4915 };
4916
4917 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4918 .get_modes = intel_dp_get_modes,
4919 .mode_valid = intel_dp_mode_valid,
4920 .best_encoder = intel_best_encoder,
4921 };
4922
4923 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4924 .reset = intel_dp_encoder_reset,
4925 .destroy = intel_dp_encoder_destroy,
4926 };
4927
4928 enum irqreturn
4929 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4930 {
4931 struct intel_dp *intel_dp = &intel_dig_port->dp;
4932 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4933 struct drm_device *dev = intel_dig_port->base.base.dev;
4934 struct drm_i915_private *dev_priv = dev->dev_private;
4935 enum intel_display_power_domain power_domain;
4936 enum irqreturn ret = IRQ_NONE;
4937
4938 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4939 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4940
4941 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4942 /*
4943 * vdd off can generate a long pulse on eDP which
4944 * would require vdd on to handle it, and thus we
4945 * would end up in an endless cycle of
4946 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4947 */
4948 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4949 port_name(intel_dig_port->port));
4950 return IRQ_HANDLED;
4951 }
4952
4953 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4954 port_name(intel_dig_port->port),
4955 long_hpd ? "long" : "short");
4956
4957 power_domain = intel_display_port_power_domain(intel_encoder);
4958 intel_display_power_get(dev_priv, power_domain);
4959
4960 if (long_hpd) {
4961 /* indicate that we need to restart link training */
4962 intel_dp->train_set_valid = false;
4963
4964 if (HAS_PCH_SPLIT(dev)) {
4965 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4966 goto mst_fail;
4967 } else {
4968 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4969 goto mst_fail;
4970 }
4971
4972 if (!intel_dp_get_dpcd(intel_dp)) {
4973 goto mst_fail;
4974 }
4975
4976 intel_dp_probe_oui(intel_dp);
4977
4978 if (!intel_dp_probe_mst(intel_dp))
4979 goto mst_fail;
4980
4981 } else {
4982 if (intel_dp->is_mst) {
4983 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4984 goto mst_fail;
4985 }
4986
4987 if (!intel_dp->is_mst) {
4988 /*
4989 * we'll check the link status via the normal hot plug path later -
4990 * but for short hpds we should check it now
4991 */
4992 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4993 intel_dp_check_link_status(intel_dp);
4994 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4995 }
4996 }
4997
4998 ret = IRQ_HANDLED;
4999
5000 goto put_power;
5001 mst_fail:
5002 /* if we were in MST mode, and device is not there get out of MST mode */
5003 if (intel_dp->is_mst) {
5004 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5005 intel_dp->is_mst = false;
5006 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5007 }
5008 put_power:
5009 intel_display_power_put(dev_priv, power_domain);
5010
5011 return ret;
5012 }
5013
5014 /* Return which DP Port should be selected for Transcoder DP control */
5015 int
5016 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5017 {
5018 struct drm_device *dev = crtc->dev;
5019 struct intel_encoder *intel_encoder;
5020 struct intel_dp *intel_dp;
5021
5022 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5023 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5024
5025 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5026 intel_encoder->type == INTEL_OUTPUT_EDP)
5027 return intel_dp->output_reg;
5028 }
5029
5030 return -1;
5031 }
5032
5033 /* check the VBT to see whether the eDP is on DP-D port */
5034 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5035 {
5036 struct drm_i915_private *dev_priv = dev->dev_private;
5037 union child_device_config *p_child;
5038 int i;
5039 static const short port_mapping[] = {
5040 [PORT_B] = PORT_IDPB,
5041 [PORT_C] = PORT_IDPC,
5042 [PORT_D] = PORT_IDPD,
5043 };
5044
5045 if (port == PORT_A)
5046 return true;
5047
5048 if (!dev_priv->vbt.child_dev_num)
5049 return false;
5050
5051 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5052 p_child = dev_priv->vbt.child_dev + i;
5053
5054 if (p_child->common.dvo_port == port_mapping[port] &&
5055 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5056 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5057 return true;
5058 }
5059 return false;
5060 }
5061
5062 void
5063 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5064 {
5065 struct intel_connector *intel_connector = to_intel_connector(connector);
5066
5067 intel_attach_force_audio_property(connector);
5068 intel_attach_broadcast_rgb_property(connector);
5069 intel_dp->color_range_auto = true;
5070
5071 if (is_edp(intel_dp)) {
5072 drm_mode_create_scaling_mode_property(connector->dev);
5073 drm_object_attach_property(
5074 &connector->base,
5075 connector->dev->mode_config.scaling_mode_property,
5076 DRM_MODE_SCALE_ASPECT);
5077 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5078 }
5079 }
5080
5081 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5082 {
5083 intel_dp->last_power_cycle = jiffies;
5084 intel_dp->last_power_on = jiffies;
5085 intel_dp->last_backlight_off = jiffies;
5086 }
5087
5088 static void
5089 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5090 struct intel_dp *intel_dp)
5091 {
5092 struct drm_i915_private *dev_priv = dev->dev_private;
5093 struct edp_power_seq cur, vbt, spec,
5094 *final = &intel_dp->pps_delays;
5095 u32 pp_on, pp_off, pp_div, pp;
5096 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5097
5098 lockdep_assert_held(&dev_priv->pps_mutex);
5099
5100 /* already initialized? */
5101 if (final->t11_t12 != 0)
5102 return;
5103
5104 if (HAS_PCH_SPLIT(dev)) {
5105 pp_ctrl_reg = PCH_PP_CONTROL;
5106 pp_on_reg = PCH_PP_ON_DELAYS;
5107 pp_off_reg = PCH_PP_OFF_DELAYS;
5108 pp_div_reg = PCH_PP_DIVISOR;
5109 } else {
5110 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5111
5112 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5113 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5114 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5115 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5116 }
5117
5118 /* Workaround: Need to write PP_CONTROL with the unlock key as
5119 * the very first thing. */
5120 pp = ironlake_get_pp_control(intel_dp);
5121 I915_WRITE(pp_ctrl_reg, pp);
5122
5123 pp_on = I915_READ(pp_on_reg);
5124 pp_off = I915_READ(pp_off_reg);
5125 pp_div = I915_READ(pp_div_reg);
5126
5127 /* Pull timing values out of registers */
5128 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5129 PANEL_POWER_UP_DELAY_SHIFT;
5130
5131 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5132 PANEL_LIGHT_ON_DELAY_SHIFT;
5133
5134 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5135 PANEL_LIGHT_OFF_DELAY_SHIFT;
5136
5137 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5138 PANEL_POWER_DOWN_DELAY_SHIFT;
5139
5140 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5141 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5142
5143 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5144 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5145
5146 vbt = dev_priv->vbt.edp_pps;
5147
5148 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5149 * our hw here, which are all in 100usec. */
5150 spec.t1_t3 = 210 * 10;
5151 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5152 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5153 spec.t10 = 500 * 10;
5154 /* This one is special and actually in units of 100ms, but zero
5155 * based in the hw (so we need to add 100 ms). But the sw vbt
5156 * table multiplies it with 1000 to make it in units of 100usec,
5157 * too. */
5158 spec.t11_t12 = (510 + 100) * 10;
5159
5160 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5161 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5162
5163 /* Use the max of the register settings and vbt. If both are
5164 * unset, fall back to the spec limits. */
5165 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5166 spec.field : \
5167 max(cur.field, vbt.field))
5168 assign_final(t1_t3);
5169 assign_final(t8);
5170 assign_final(t9);
5171 assign_final(t10);
5172 assign_final(t11_t12);
5173 #undef assign_final
5174
5175 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5176 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5177 intel_dp->backlight_on_delay = get_delay(t8);
5178 intel_dp->backlight_off_delay = get_delay(t9);
5179 intel_dp->panel_power_down_delay = get_delay(t10);
5180 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5181 #undef get_delay
5182
5183 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5184 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5185 intel_dp->panel_power_cycle_delay);
5186
5187 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5188 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5189 }
5190
5191 static void
5192 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5193 struct intel_dp *intel_dp)
5194 {
5195 struct drm_i915_private *dev_priv = dev->dev_private;
5196 u32 pp_on, pp_off, pp_div, port_sel = 0;
5197 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5198 int pp_on_reg, pp_off_reg, pp_div_reg;
5199 enum port port = dp_to_dig_port(intel_dp)->port;
5200 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5201
5202 lockdep_assert_held(&dev_priv->pps_mutex);
5203
5204 if (HAS_PCH_SPLIT(dev)) {
5205 pp_on_reg = PCH_PP_ON_DELAYS;
5206 pp_off_reg = PCH_PP_OFF_DELAYS;
5207 pp_div_reg = PCH_PP_DIVISOR;
5208 } else {
5209 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5210
5211 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5212 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5213 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5214 }
5215
5216 /*
5217 * And finally store the new values in the power sequencer. The
5218 * backlight delays are set to 1 because we do manual waits on them. For
5219 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5220 * we'll end up waiting for the backlight off delay twice: once when we
5221 * do the manual sleep, and once when we disable the panel and wait for
5222 * the PP_STATUS bit to become zero.
5223 */
5224 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5225 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5226 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5227 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5228 /* Compute the divisor for the pp clock, simply match the Bspec
5229 * formula. */
5230 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5231 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5232 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5233
5234 /* Haswell doesn't have any port selection bits for the panel
5235 * power sequencer any more. */
5236 if (IS_VALLEYVIEW(dev)) {
5237 port_sel = PANEL_PORT_SELECT_VLV(port);
5238 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5239 if (port == PORT_A)
5240 port_sel = PANEL_PORT_SELECT_DPA;
5241 else
5242 port_sel = PANEL_PORT_SELECT_DPD;
5243 }
5244
5245 pp_on |= port_sel;
5246
5247 I915_WRITE(pp_on_reg, pp_on);
5248 I915_WRITE(pp_off_reg, pp_off);
5249 I915_WRITE(pp_div_reg, pp_div);
5250
5251 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5252 I915_READ(pp_on_reg),
5253 I915_READ(pp_off_reg),
5254 I915_READ(pp_div_reg));
5255 }
5256
5257 /**
5258 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5259 * @dev: DRM device
5260 * @refresh_rate: RR to be programmed
5261 *
5262 * This function gets called when refresh rate (RR) has to be changed from
5263 * one frequency to another. Switches can be between high and low RR
5264 * supported by the panel or to any other RR based on media playback (in
5265 * this case, RR value needs to be passed from user space).
5266 *
5267 * The caller of this function needs to take a lock on dev_priv->drrs.
5268 */
5269 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5270 {
5271 struct drm_i915_private *dev_priv = dev->dev_private;
5272 struct intel_encoder *encoder;
5273 struct intel_digital_port *dig_port = NULL;
5274 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5275 struct intel_crtc_state *config = NULL;
5276 struct intel_crtc *intel_crtc = NULL;
5277 u32 reg, val;
5278 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5279
5280 if (refresh_rate <= 0) {
5281 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5282 return;
5283 }
5284
5285 if (intel_dp == NULL) {
5286 DRM_DEBUG_KMS("DRRS not supported.\n");
5287 return;
5288 }
5289
5290 /*
5291 * FIXME: This needs proper synchronization with psr state for some
5292 * platforms that cannot have PSR and DRRS enabled at the same time.
5293 */
5294
5295 dig_port = dp_to_dig_port(intel_dp);
5296 encoder = &dig_port->base;
5297 intel_crtc = to_intel_crtc(encoder->base.crtc);
5298
5299 if (!intel_crtc) {
5300 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5301 return;
5302 }
5303
5304 config = intel_crtc->config;
5305
5306 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5307 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5308 return;
5309 }
5310
5311 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5312 refresh_rate)
5313 index = DRRS_LOW_RR;
5314
5315 if (index == dev_priv->drrs.refresh_rate_type) {
5316 DRM_DEBUG_KMS(
5317 "DRRS requested for previously set RR...ignoring\n");
5318 return;
5319 }
5320
5321 if (!intel_crtc->active) {
5322 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5323 return;
5324 }
5325
5326 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5327 switch (index) {
5328 case DRRS_HIGH_RR:
5329 intel_dp_set_m_n(intel_crtc, M1_N1);
5330 break;
5331 case DRRS_LOW_RR:
5332 intel_dp_set_m_n(intel_crtc, M2_N2);
5333 break;
5334 case DRRS_MAX_RR:
5335 default:
5336 DRM_ERROR("Unsupported refreshrate type\n");
5337 }
5338 } else if (INTEL_INFO(dev)->gen > 6) {
5339 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5340 val = I915_READ(reg);
5341
5342 if (index > DRRS_HIGH_RR) {
5343 if (IS_VALLEYVIEW(dev))
5344 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5345 else
5346 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5347 } else {
5348 if (IS_VALLEYVIEW(dev))
5349 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5350 else
5351 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5352 }
5353 I915_WRITE(reg, val);
5354 }
5355
5356 dev_priv->drrs.refresh_rate_type = index;
5357
5358 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5359 }
5360
5361 /**
5362 * intel_edp_drrs_enable - init drrs struct if supported
5363 * @intel_dp: DP struct
5364 *
5365 * Initializes frontbuffer_bits and drrs.dp
5366 */
5367 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5368 {
5369 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5370 struct drm_i915_private *dev_priv = dev->dev_private;
5371 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5372 struct drm_crtc *crtc = dig_port->base.base.crtc;
5373 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5374
5375 if (!intel_crtc->config->has_drrs) {
5376 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5377 return;
5378 }
5379
5380 mutex_lock(&dev_priv->drrs.mutex);
5381 if (WARN_ON(dev_priv->drrs.dp)) {
5382 DRM_ERROR("DRRS already enabled\n");
5383 goto unlock;
5384 }
5385
5386 dev_priv->drrs.busy_frontbuffer_bits = 0;
5387
5388 dev_priv->drrs.dp = intel_dp;
5389
5390 unlock:
5391 mutex_unlock(&dev_priv->drrs.mutex);
5392 }
5393
5394 /**
5395 * intel_edp_drrs_disable - Disable DRRS
5396 * @intel_dp: DP struct
5397 *
5398 */
5399 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5400 {
5401 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5402 struct drm_i915_private *dev_priv = dev->dev_private;
5403 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5404 struct drm_crtc *crtc = dig_port->base.base.crtc;
5405 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5406
5407 if (!intel_crtc->config->has_drrs)
5408 return;
5409
5410 mutex_lock(&dev_priv->drrs.mutex);
5411 if (!dev_priv->drrs.dp) {
5412 mutex_unlock(&dev_priv->drrs.mutex);
5413 return;
5414 }
5415
5416 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5417 intel_dp_set_drrs_state(dev_priv->dev,
5418 intel_dp->attached_connector->panel.
5419 fixed_mode->vrefresh);
5420
5421 dev_priv->drrs.dp = NULL;
5422 mutex_unlock(&dev_priv->drrs.mutex);
5423
5424 cancel_delayed_work_sync(&dev_priv->drrs.work);
5425 }
5426
5427 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5428 {
5429 struct drm_i915_private *dev_priv =
5430 container_of(work, typeof(*dev_priv), drrs.work.work);
5431 struct intel_dp *intel_dp;
5432
5433 mutex_lock(&dev_priv->drrs.mutex);
5434
5435 intel_dp = dev_priv->drrs.dp;
5436
5437 if (!intel_dp)
5438 goto unlock;
5439
5440 /*
5441 * The delayed work can race with an invalidate hence we need to
5442 * recheck.
5443 */
5444
5445 if (dev_priv->drrs.busy_frontbuffer_bits)
5446 goto unlock;
5447
5448 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5449 intel_dp_set_drrs_state(dev_priv->dev,
5450 intel_dp->attached_connector->panel.
5451 downclock_mode->vrefresh);
5452
5453 unlock:
5454 mutex_unlock(&dev_priv->drrs.mutex);
5455 }
5456
5457 /**
5458 * intel_edp_drrs_invalidate - Invalidate DRRS
5459 * @dev: DRM device
5460 * @frontbuffer_bits: frontbuffer plane tracking bits
5461 *
5462 * When there is a disturbance on screen (due to cursor movement/time
5463 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5464 * high RR.
5465 *
5466 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5467 */
5468 void intel_edp_drrs_invalidate(struct drm_device *dev,
5469 unsigned frontbuffer_bits)
5470 {
5471 struct drm_i915_private *dev_priv = dev->dev_private;
5472 struct drm_crtc *crtc;
5473 enum pipe pipe;
5474
5475 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5476 return;
5477
5478 cancel_delayed_work(&dev_priv->drrs.work);
5479
5480 mutex_lock(&dev_priv->drrs.mutex);
5481 if (!dev_priv->drrs.dp) {
5482 mutex_unlock(&dev_priv->drrs.mutex);
5483 return;
5484 }
5485
5486 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5487 pipe = to_intel_crtc(crtc)->pipe;
5488
5489 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5490 intel_dp_set_drrs_state(dev_priv->dev,
5491 dev_priv->drrs.dp->attached_connector->panel.
5492 fixed_mode->vrefresh);
5493 }
5494
5495 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5496
5497 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5498 mutex_unlock(&dev_priv->drrs.mutex);
5499 }
5500
5501 /**
5502 * intel_edp_drrs_flush - Flush DRRS
5503 * @dev: DRM device
5504 * @frontbuffer_bits: frontbuffer plane tracking bits
5505 *
5506 * When there is no movement on screen, DRRS work can be scheduled.
5507 * This DRRS work is responsible for setting relevant registers after a
5508 * timeout of 1 second.
5509 *
5510 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5511 */
5512 void intel_edp_drrs_flush(struct drm_device *dev,
5513 unsigned frontbuffer_bits)
5514 {
5515 struct drm_i915_private *dev_priv = dev->dev_private;
5516 struct drm_crtc *crtc;
5517 enum pipe pipe;
5518
5519 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5520 return;
5521
5522 cancel_delayed_work(&dev_priv->drrs.work);
5523
5524 mutex_lock(&dev_priv->drrs.mutex);
5525 if (!dev_priv->drrs.dp) {
5526 mutex_unlock(&dev_priv->drrs.mutex);
5527 return;
5528 }
5529
5530 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5531 pipe = to_intel_crtc(crtc)->pipe;
5532 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5533
5534 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5535 !dev_priv->drrs.busy_frontbuffer_bits)
5536 schedule_delayed_work(&dev_priv->drrs.work,
5537 msecs_to_jiffies(1000));
5538 mutex_unlock(&dev_priv->drrs.mutex);
5539 }
5540
5541 /**
5542 * DOC: Display Refresh Rate Switching (DRRS)
5543 *
5544 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5545 * which enables swtching between low and high refresh rates,
5546 * dynamically, based on the usage scenario. This feature is applicable
5547 * for internal panels.
5548 *
5549 * Indication that the panel supports DRRS is given by the panel EDID, which
5550 * would list multiple refresh rates for one resolution.
5551 *
5552 * DRRS is of 2 types - static and seamless.
5553 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5554 * (may appear as a blink on screen) and is used in dock-undock scenario.
5555 * Seamless DRRS involves changing RR without any visual effect to the user
5556 * and can be used during normal system usage. This is done by programming
5557 * certain registers.
5558 *
5559 * Support for static/seamless DRRS may be indicated in the VBT based on
5560 * inputs from the panel spec.
5561 *
5562 * DRRS saves power by switching to low RR based on usage scenarios.
5563 *
5564 * eDP DRRS:-
5565 * The implementation is based on frontbuffer tracking implementation.
5566 * When there is a disturbance on the screen triggered by user activity or a
5567 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5568 * When there is no movement on screen, after a timeout of 1 second, a switch
5569 * to low RR is made.
5570 * For integration with frontbuffer tracking code,
5571 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5572 *
5573 * DRRS can be further extended to support other internal panels and also
5574 * the scenario of video playback wherein RR is set based on the rate
5575 * requested by userspace.
5576 */
5577
5578 /**
5579 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5580 * @intel_connector: eDP connector
5581 * @fixed_mode: preferred mode of panel
5582 *
5583 * This function is called only once at driver load to initialize basic
5584 * DRRS stuff.
5585 *
5586 * Returns:
5587 * Downclock mode if panel supports it, else return NULL.
5588 * DRRS support is determined by the presence of downclock mode (apart
5589 * from VBT setting).
5590 */
5591 static struct drm_display_mode *
5592 intel_dp_drrs_init(struct intel_connector *intel_connector,
5593 struct drm_display_mode *fixed_mode)
5594 {
5595 struct drm_connector *connector = &intel_connector->base;
5596 struct drm_device *dev = connector->dev;
5597 struct drm_i915_private *dev_priv = dev->dev_private;
5598 struct drm_display_mode *downclock_mode = NULL;
5599
5600 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5601 mutex_init(&dev_priv->drrs.mutex);
5602
5603 if (INTEL_INFO(dev)->gen <= 6) {
5604 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5605 return NULL;
5606 }
5607
5608 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5609 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5610 return NULL;
5611 }
5612
5613 downclock_mode = intel_find_panel_downclock
5614 (dev, fixed_mode, connector);
5615
5616 if (!downclock_mode) {
5617 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5618 return NULL;
5619 }
5620
5621 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5622
5623 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5624 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5625 return downclock_mode;
5626 }
5627
5628 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5629 struct intel_connector *intel_connector)
5630 {
5631 struct drm_connector *connector = &intel_connector->base;
5632 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5633 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5634 struct drm_device *dev = intel_encoder->base.dev;
5635 struct drm_i915_private *dev_priv = dev->dev_private;
5636 struct drm_display_mode *fixed_mode = NULL;
5637 struct drm_display_mode *downclock_mode = NULL;
5638 bool has_dpcd;
5639 struct drm_display_mode *scan;
5640 struct edid *edid;
5641 enum pipe pipe = INVALID_PIPE;
5642
5643 if (!is_edp(intel_dp))
5644 return true;
5645
5646 pps_lock(intel_dp);
5647 intel_edp_panel_vdd_sanitize(intel_dp);
5648 pps_unlock(intel_dp);
5649
5650 /* Cache DPCD and EDID for edp. */
5651 has_dpcd = intel_dp_get_dpcd(intel_dp);
5652
5653 if (has_dpcd) {
5654 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5655 dev_priv->no_aux_handshake =
5656 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5657 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5658 } else {
5659 /* if this fails, presume the device is a ghost */
5660 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5661 return false;
5662 }
5663
5664 /* We now know it's not a ghost, init power sequence regs. */
5665 pps_lock(intel_dp);
5666 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5667 pps_unlock(intel_dp);
5668
5669 mutex_lock(&dev->mode_config.mutex);
5670 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5671 if (edid) {
5672 if (drm_add_edid_modes(connector, edid)) {
5673 drm_mode_connector_update_edid_property(connector,
5674 edid);
5675 drm_edid_to_eld(connector, edid);
5676 } else {
5677 kfree(edid);
5678 edid = ERR_PTR(-EINVAL);
5679 }
5680 } else {
5681 edid = ERR_PTR(-ENOENT);
5682 }
5683 intel_connector->edid = edid;
5684
5685 /* prefer fixed mode from EDID if available */
5686 list_for_each_entry(scan, &connector->probed_modes, head) {
5687 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5688 fixed_mode = drm_mode_duplicate(dev, scan);
5689 downclock_mode = intel_dp_drrs_init(
5690 intel_connector, fixed_mode);
5691 break;
5692 }
5693 }
5694
5695 /* fallback to VBT if available for eDP */
5696 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5697 fixed_mode = drm_mode_duplicate(dev,
5698 dev_priv->vbt.lfp_lvds_vbt_mode);
5699 if (fixed_mode)
5700 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5701 }
5702 mutex_unlock(&dev->mode_config.mutex);
5703
5704 if (IS_VALLEYVIEW(dev)) {
5705 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5706 register_reboot_notifier(&intel_dp->edp_notifier);
5707
5708 /*
5709 * Figure out the current pipe for the initial backlight setup.
5710 * If the current pipe isn't valid, try the PPS pipe, and if that
5711 * fails just assume pipe A.
5712 */
5713 if (IS_CHERRYVIEW(dev))
5714 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5715 else
5716 pipe = PORT_TO_PIPE(intel_dp->DP);
5717
5718 if (pipe != PIPE_A && pipe != PIPE_B)
5719 pipe = intel_dp->pps_pipe;
5720
5721 if (pipe != PIPE_A && pipe != PIPE_B)
5722 pipe = PIPE_A;
5723
5724 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5725 pipe_name(pipe));
5726 }
5727
5728 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5729 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5730 intel_panel_setup_backlight(connector, pipe);
5731
5732 return true;
5733 }
5734
5735 bool
5736 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5737 struct intel_connector *intel_connector)
5738 {
5739 struct drm_connector *connector = &intel_connector->base;
5740 struct intel_dp *intel_dp = &intel_dig_port->dp;
5741 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5742 struct drm_device *dev = intel_encoder->base.dev;
5743 struct drm_i915_private *dev_priv = dev->dev_private;
5744 enum port port = intel_dig_port->port;
5745 int type;
5746
5747 intel_dp->pps_pipe = INVALID_PIPE;
5748
5749 /* intel_dp vfuncs */
5750 if (INTEL_INFO(dev)->gen >= 9)
5751 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5752 else if (IS_VALLEYVIEW(dev))
5753 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5754 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5755 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5756 else if (HAS_PCH_SPLIT(dev))
5757 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5758 else
5759 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5760
5761 if (INTEL_INFO(dev)->gen >= 9)
5762 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5763 else
5764 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5765
5766 /* Preserve the current hw state. */
5767 intel_dp->DP = I915_READ(intel_dp->output_reg);
5768 intel_dp->attached_connector = intel_connector;
5769
5770 if (intel_dp_is_edp(dev, port))
5771 type = DRM_MODE_CONNECTOR_eDP;
5772 else
5773 type = DRM_MODE_CONNECTOR_DisplayPort;
5774
5775 /*
5776 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5777 * for DP the encoder type can be set by the caller to
5778 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5779 */
5780 if (type == DRM_MODE_CONNECTOR_eDP)
5781 intel_encoder->type = INTEL_OUTPUT_EDP;
5782
5783 /* eDP only on port B and/or C on vlv/chv */
5784 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5785 port != PORT_B && port != PORT_C))
5786 return false;
5787
5788 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5789 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5790 port_name(port));
5791
5792 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5793 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5794
5795 connector->interlace_allowed = true;
5796 connector->doublescan_allowed = 0;
5797
5798 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5799 edp_panel_vdd_work);
5800
5801 intel_connector_attach_encoder(intel_connector, intel_encoder);
5802 drm_connector_register(connector);
5803
5804 if (HAS_DDI(dev))
5805 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5806 else
5807 intel_connector->get_hw_state = intel_connector_get_hw_state;
5808 intel_connector->unregister = intel_dp_connector_unregister;
5809
5810 /* Set up the hotplug pin. */
5811 switch (port) {
5812 case PORT_A:
5813 intel_encoder->hpd_pin = HPD_PORT_A;
5814 break;
5815 case PORT_B:
5816 intel_encoder->hpd_pin = HPD_PORT_B;
5817 break;
5818 case PORT_C:
5819 intel_encoder->hpd_pin = HPD_PORT_C;
5820 break;
5821 case PORT_D:
5822 intel_encoder->hpd_pin = HPD_PORT_D;
5823 break;
5824 default:
5825 BUG();
5826 }
5827
5828 if (is_edp(intel_dp)) {
5829 pps_lock(intel_dp);
5830 intel_dp_init_panel_power_timestamps(intel_dp);
5831 if (IS_VALLEYVIEW(dev))
5832 vlv_initial_power_sequencer_setup(intel_dp);
5833 else
5834 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5835 pps_unlock(intel_dp);
5836 }
5837
5838 intel_dp_aux_init(intel_dp, intel_connector);
5839
5840 /* init MST on ports that can support it */
5841 if (HAS_DP_MST(dev) &&
5842 (port == PORT_B || port == PORT_C || port == PORT_D))
5843 intel_dp_mst_encoder_init(intel_dig_port,
5844 intel_connector->base.base.id);
5845
5846 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5847 drm_dp_aux_unregister(&intel_dp->aux);
5848 if (is_edp(intel_dp)) {
5849 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5850 /*
5851 * vdd might still be enabled do to the delayed vdd off.
5852 * Make sure vdd is actually turned off here.
5853 */
5854 pps_lock(intel_dp);
5855 edp_panel_vdd_off_sync(intel_dp);
5856 pps_unlock(intel_dp);
5857 }
5858 drm_connector_unregister(connector);
5859 drm_connector_cleanup(connector);
5860 return false;
5861 }
5862
5863 intel_dp_add_properties(intel_dp, connector);
5864
5865 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5866 * 0xd. Failure to do so will result in spurious interrupts being
5867 * generated on the port when a cable is not attached.
5868 */
5869 if (IS_G4X(dev) && !IS_GM45(dev)) {
5870 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5871 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5872 }
5873
5874 i915_debugfs_connector_add(connector);
5875
5876 return true;
5877 }
5878
5879 void
5880 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5881 {
5882 struct drm_i915_private *dev_priv = dev->dev_private;
5883 struct intel_digital_port *intel_dig_port;
5884 struct intel_encoder *intel_encoder;
5885 struct drm_encoder *encoder;
5886 struct intel_connector *intel_connector;
5887
5888 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5889 if (!intel_dig_port)
5890 return;
5891
5892 intel_connector = intel_connector_alloc();
5893 if (!intel_connector) {
5894 kfree(intel_dig_port);
5895 return;
5896 }
5897
5898 intel_encoder = &intel_dig_port->base;
5899 encoder = &intel_encoder->base;
5900
5901 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5902 DRM_MODE_ENCODER_TMDS);
5903
5904 intel_encoder->compute_config = intel_dp_compute_config;
5905 intel_encoder->disable = intel_disable_dp;
5906 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5907 intel_encoder->get_config = intel_dp_get_config;
5908 intel_encoder->suspend = intel_dp_encoder_suspend;
5909 if (IS_CHERRYVIEW(dev)) {
5910 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5911 intel_encoder->pre_enable = chv_pre_enable_dp;
5912 intel_encoder->enable = vlv_enable_dp;
5913 intel_encoder->post_disable = chv_post_disable_dp;
5914 } else if (IS_VALLEYVIEW(dev)) {
5915 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5916 intel_encoder->pre_enable = vlv_pre_enable_dp;
5917 intel_encoder->enable = vlv_enable_dp;
5918 intel_encoder->post_disable = vlv_post_disable_dp;
5919 } else {
5920 intel_encoder->pre_enable = g4x_pre_enable_dp;
5921 intel_encoder->enable = g4x_enable_dp;
5922 if (INTEL_INFO(dev)->gen >= 5)
5923 intel_encoder->post_disable = ilk_post_disable_dp;
5924 }
5925
5926 intel_dig_port->port = port;
5927 intel_dig_port->dp.output_reg = output_reg;
5928
5929 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5930 if (IS_CHERRYVIEW(dev)) {
5931 if (port == PORT_D)
5932 intel_encoder->crtc_mask = 1 << 2;
5933 else
5934 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5935 } else {
5936 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5937 }
5938 intel_encoder->cloneable = 0;
5939
5940 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5941 dev_priv->hotplug.irq_port[port] = intel_dig_port;
5942
5943 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5944 drm_encoder_cleanup(encoder);
5945 kfree(intel_dig_port);
5946 kfree(intel_connector);
5947 }
5948 }
5949
5950 void intel_dp_mst_suspend(struct drm_device *dev)
5951 {
5952 struct drm_i915_private *dev_priv = dev->dev_private;
5953 int i;
5954
5955 /* disable MST */
5956 for (i = 0; i < I915_MAX_PORTS; i++) {
5957 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5958 if (!intel_dig_port)
5959 continue;
5960
5961 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5962 if (!intel_dig_port->dp.can_mst)
5963 continue;
5964 if (intel_dig_port->dp.is_mst)
5965 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5966 }
5967 }
5968 }
5969
5970 void intel_dp_mst_resume(struct drm_device *dev)
5971 {
5972 struct drm_i915_private *dev_priv = dev->dev_private;
5973 int i;
5974
5975 for (i = 0; i < I915_MAX_PORTS; i++) {
5976 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5977 if (!intel_dig_port)
5978 continue;
5979 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5980 int ret;
5981
5982 if (!intel_dig_port->dp.can_mst)
5983 continue;
5984
5985 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5986 if (ret != 0) {
5987 intel_dp_check_mst_status(&intel_dig_port->dp);
5988 }
5989 }
5990 }
5991 }
This page took 0.251023 seconds and 6 git commands to generate.