drm/i915/skl: Buffer translation improvements
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51 int link_bw;
52 struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56 { DP_LINK_BW_1_62,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { DP_LINK_BW_2_7,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63 { DP_LINK_BW_1_62,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { DP_LINK_BW_2_7,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70 { DP_LINK_BW_1_62,
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72 { DP_LINK_BW_2_7,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80 static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
99 243000, 270000, 324000, 405000,
100 420000, 432000, 540000 };
101 static const int default_rates[] = { 162000, 270000, 540000 };
102
103 /**
104 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
105 * @intel_dp: DP struct
106 *
107 * If a CPU or PCH DP output is attached to an eDP panel, this function
108 * will return true, and false otherwise.
109 */
110 static bool is_edp(struct intel_dp *intel_dp)
111 {
112 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113
114 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
115 }
116
117 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
118 {
119 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
120
121 return intel_dig_port->base.base.dev;
122 }
123
124 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
125 {
126 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
127 }
128
129 static void intel_dp_link_down(struct intel_dp *intel_dp);
130 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
131 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
132 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
133 static void vlv_steal_power_sequencer(struct drm_device *dev,
134 enum pipe pipe);
135
136 static int
137 intel_dp_max_link_bw(struct intel_dp *intel_dp)
138 {
139 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
140
141 switch (max_link_bw) {
142 case DP_LINK_BW_1_62:
143 case DP_LINK_BW_2_7:
144 case DP_LINK_BW_5_4:
145 break;
146 default:
147 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
148 max_link_bw);
149 max_link_bw = DP_LINK_BW_1_62;
150 break;
151 }
152 return max_link_bw;
153 }
154
155 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
156 {
157 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
158 struct drm_device *dev = intel_dig_port->base.base.dev;
159 u8 source_max, sink_max;
160
161 source_max = 4;
162 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
163 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
164 source_max = 2;
165
166 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
167
168 return min(source_max, sink_max);
169 }
170
171 /*
172 * The units on the numbers in the next two are... bizarre. Examples will
173 * make it clearer; this one parallels an example in the eDP spec.
174 *
175 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
176 *
177 * 270000 * 1 * 8 / 10 == 216000
178 *
179 * The actual data capacity of that configuration is 2.16Gbit/s, so the
180 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
181 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
182 * 119000. At 18bpp that's 2142000 kilobits per second.
183 *
184 * Thus the strange-looking division by 10 in intel_dp_link_required, to
185 * get the result in decakilobits instead of kilobits.
186 */
187
188 static int
189 intel_dp_link_required(int pixel_clock, int bpp)
190 {
191 return (pixel_clock * bpp + 9) / 10;
192 }
193
194 static int
195 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
196 {
197 return (max_link_clock * max_lanes * 8) / 10;
198 }
199
200 static enum drm_mode_status
201 intel_dp_mode_valid(struct drm_connector *connector,
202 struct drm_display_mode *mode)
203 {
204 struct intel_dp *intel_dp = intel_attached_dp(connector);
205 struct intel_connector *intel_connector = to_intel_connector(connector);
206 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
207 int target_clock = mode->clock;
208 int max_rate, mode_rate, max_lanes, max_link_clock;
209
210 if (is_edp(intel_dp) && fixed_mode) {
211 if (mode->hdisplay > fixed_mode->hdisplay)
212 return MODE_PANEL;
213
214 if (mode->vdisplay > fixed_mode->vdisplay)
215 return MODE_PANEL;
216
217 target_clock = fixed_mode->clock;
218 }
219
220 max_link_clock = intel_dp_max_link_rate(intel_dp);
221 max_lanes = intel_dp_max_lane_count(intel_dp);
222
223 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
224 mode_rate = intel_dp_link_required(target_clock, 18);
225
226 if (mode_rate > max_rate)
227 return MODE_CLOCK_HIGH;
228
229 if (mode->clock < 10000)
230 return MODE_CLOCK_LOW;
231
232 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
233 return MODE_H_ILLEGAL;
234
235 return MODE_OK;
236 }
237
238 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
239 {
240 int i;
241 uint32_t v = 0;
242
243 if (src_bytes > 4)
244 src_bytes = 4;
245 for (i = 0; i < src_bytes; i++)
246 v |= ((uint32_t) src[i]) << ((3-i) * 8);
247 return v;
248 }
249
250 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
251 {
252 int i;
253 if (dst_bytes > 4)
254 dst_bytes = 4;
255 for (i = 0; i < dst_bytes; i++)
256 dst[i] = src >> ((3-i) * 8);
257 }
258
259 /* hrawclock is 1/4 the FSB frequency */
260 static int
261 intel_hrawclk(struct drm_device *dev)
262 {
263 struct drm_i915_private *dev_priv = dev->dev_private;
264 uint32_t clkcfg;
265
266 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
267 if (IS_VALLEYVIEW(dev))
268 return 200;
269
270 clkcfg = I915_READ(CLKCFG);
271 switch (clkcfg & CLKCFG_FSB_MASK) {
272 case CLKCFG_FSB_400:
273 return 100;
274 case CLKCFG_FSB_533:
275 return 133;
276 case CLKCFG_FSB_667:
277 return 166;
278 case CLKCFG_FSB_800:
279 return 200;
280 case CLKCFG_FSB_1067:
281 return 266;
282 case CLKCFG_FSB_1333:
283 return 333;
284 /* these two are just a guess; one of them might be right */
285 case CLKCFG_FSB_1600:
286 case CLKCFG_FSB_1600_ALT:
287 return 400;
288 default:
289 return 133;
290 }
291 }
292
293 static void
294 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
295 struct intel_dp *intel_dp);
296 static void
297 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
298 struct intel_dp *intel_dp);
299
300 static void pps_lock(struct intel_dp *intel_dp)
301 {
302 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
303 struct intel_encoder *encoder = &intel_dig_port->base;
304 struct drm_device *dev = encoder->base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum intel_display_power_domain power_domain;
307
308 /*
309 * See vlv_power_sequencer_reset() why we need
310 * a power domain reference here.
311 */
312 power_domain = intel_display_port_power_domain(encoder);
313 intel_display_power_get(dev_priv, power_domain);
314
315 mutex_lock(&dev_priv->pps_mutex);
316 }
317
318 static void pps_unlock(struct intel_dp *intel_dp)
319 {
320 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
321 struct intel_encoder *encoder = &intel_dig_port->base;
322 struct drm_device *dev = encoder->base.dev;
323 struct drm_i915_private *dev_priv = dev->dev_private;
324 enum intel_display_power_domain power_domain;
325
326 mutex_unlock(&dev_priv->pps_mutex);
327
328 power_domain = intel_display_port_power_domain(encoder);
329 intel_display_power_put(dev_priv, power_domain);
330 }
331
332 static void
333 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
334 {
335 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
336 struct drm_device *dev = intel_dig_port->base.base.dev;
337 struct drm_i915_private *dev_priv = dev->dev_private;
338 enum pipe pipe = intel_dp->pps_pipe;
339 bool pll_enabled;
340 uint32_t DP;
341
342 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
343 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
344 pipe_name(pipe), port_name(intel_dig_port->port)))
345 return;
346
347 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
348 pipe_name(pipe), port_name(intel_dig_port->port));
349
350 /* Preserve the BIOS-computed detected bit. This is
351 * supposed to be read-only.
352 */
353 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
354 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
355 DP |= DP_PORT_WIDTH(1);
356 DP |= DP_LINK_TRAIN_PAT_1;
357
358 if (IS_CHERRYVIEW(dev))
359 DP |= DP_PIPE_SELECT_CHV(pipe);
360 else if (pipe == PIPE_B)
361 DP |= DP_PIPEB_SELECT;
362
363 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
364
365 /*
366 * The DPLL for the pipe must be enabled for this to work.
367 * So enable temporarily it if it's not already enabled.
368 */
369 if (!pll_enabled)
370 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
371 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
372
373 /*
374 * Similar magic as in intel_dp_enable_port().
375 * We _must_ do this port enable + disable trick
376 * to make this power seqeuencer lock onto the port.
377 * Otherwise even VDD force bit won't work.
378 */
379 I915_WRITE(intel_dp->output_reg, DP);
380 POSTING_READ(intel_dp->output_reg);
381
382 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
384
385 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
386 POSTING_READ(intel_dp->output_reg);
387
388 if (!pll_enabled)
389 vlv_force_pll_off(dev, pipe);
390 }
391
392 static enum pipe
393 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
394 {
395 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
396 struct drm_device *dev = intel_dig_port->base.base.dev;
397 struct drm_i915_private *dev_priv = dev->dev_private;
398 struct intel_encoder *encoder;
399 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
400 enum pipe pipe;
401
402 lockdep_assert_held(&dev_priv->pps_mutex);
403
404 /* We should never land here with regular DP ports */
405 WARN_ON(!is_edp(intel_dp));
406
407 if (intel_dp->pps_pipe != INVALID_PIPE)
408 return intel_dp->pps_pipe;
409
410 /*
411 * We don't have power sequencer currently.
412 * Pick one that's not used by other ports.
413 */
414 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
415 base.head) {
416 struct intel_dp *tmp;
417
418 if (encoder->type != INTEL_OUTPUT_EDP)
419 continue;
420
421 tmp = enc_to_intel_dp(&encoder->base);
422
423 if (tmp->pps_pipe != INVALID_PIPE)
424 pipes &= ~(1 << tmp->pps_pipe);
425 }
426
427 /*
428 * Didn't find one. This should not happen since there
429 * are two power sequencers and up to two eDP ports.
430 */
431 if (WARN_ON(pipes == 0))
432 pipe = PIPE_A;
433 else
434 pipe = ffs(pipes) - 1;
435
436 vlv_steal_power_sequencer(dev, pipe);
437 intel_dp->pps_pipe = pipe;
438
439 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
440 pipe_name(intel_dp->pps_pipe),
441 port_name(intel_dig_port->port));
442
443 /* init power sequencer on this pipe and port */
444 intel_dp_init_panel_power_sequencer(dev, intel_dp);
445 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
446
447 /*
448 * Even vdd force doesn't work until we've made
449 * the power sequencer lock in on the port.
450 */
451 vlv_power_sequencer_kick(intel_dp);
452
453 return intel_dp->pps_pipe;
454 }
455
456 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
457 enum pipe pipe);
458
459 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
460 enum pipe pipe)
461 {
462 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
463 }
464
465 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
466 enum pipe pipe)
467 {
468 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
469 }
470
471 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
472 enum pipe pipe)
473 {
474 return true;
475 }
476
477 static enum pipe
478 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
479 enum port port,
480 vlv_pipe_check pipe_check)
481 {
482 enum pipe pipe;
483
484 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
485 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
486 PANEL_PORT_SELECT_MASK;
487
488 if (port_sel != PANEL_PORT_SELECT_VLV(port))
489 continue;
490
491 if (!pipe_check(dev_priv, pipe))
492 continue;
493
494 return pipe;
495 }
496
497 return INVALID_PIPE;
498 }
499
500 static void
501 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
502 {
503 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
504 struct drm_device *dev = intel_dig_port->base.base.dev;
505 struct drm_i915_private *dev_priv = dev->dev_private;
506 enum port port = intel_dig_port->port;
507
508 lockdep_assert_held(&dev_priv->pps_mutex);
509
510 /* try to find a pipe with this port selected */
511 /* first pick one where the panel is on */
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_has_pp_on);
514 /* didn't find one? pick one where vdd is on */
515 if (intel_dp->pps_pipe == INVALID_PIPE)
516 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
517 vlv_pipe_has_vdd_on);
518 /* didn't find one? pick one with just the correct port */
519 if (intel_dp->pps_pipe == INVALID_PIPE)
520 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
521 vlv_pipe_any);
522
523 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
524 if (intel_dp->pps_pipe == INVALID_PIPE) {
525 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
526 port_name(port));
527 return;
528 }
529
530 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
531 port_name(port), pipe_name(intel_dp->pps_pipe));
532
533 intel_dp_init_panel_power_sequencer(dev, intel_dp);
534 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
535 }
536
537 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
538 {
539 struct drm_device *dev = dev_priv->dev;
540 struct intel_encoder *encoder;
541
542 if (WARN_ON(!IS_VALLEYVIEW(dev)))
543 return;
544
545 /*
546 * We can't grab pps_mutex here due to deadlock with power_domain
547 * mutex when power_domain functions are called while holding pps_mutex.
548 * That also means that in order to use pps_pipe the code needs to
549 * hold both a power domain reference and pps_mutex, and the power domain
550 * reference get/put must be done while _not_ holding pps_mutex.
551 * pps_{lock,unlock}() do these steps in the correct order, so one
552 * should use them always.
553 */
554
555 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
556 struct intel_dp *intel_dp;
557
558 if (encoder->type != INTEL_OUTPUT_EDP)
559 continue;
560
561 intel_dp = enc_to_intel_dp(&encoder->base);
562 intel_dp->pps_pipe = INVALID_PIPE;
563 }
564 }
565
566 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
567 {
568 struct drm_device *dev = intel_dp_to_dev(intel_dp);
569
570 if (IS_BROXTON(dev))
571 return BXT_PP_CONTROL(0);
572 else if (HAS_PCH_SPLIT(dev))
573 return PCH_PP_CONTROL;
574 else
575 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
576 }
577
578 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
579 {
580 struct drm_device *dev = intel_dp_to_dev(intel_dp);
581
582 if (IS_BROXTON(dev))
583 return BXT_PP_STATUS(0);
584 else if (HAS_PCH_SPLIT(dev))
585 return PCH_PP_STATUS;
586 else
587 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
588 }
589
590 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
591 This function only applicable when panel PM state is not to be tracked */
592 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
593 void *unused)
594 {
595 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
596 edp_notifier);
597 struct drm_device *dev = intel_dp_to_dev(intel_dp);
598 struct drm_i915_private *dev_priv = dev->dev_private;
599 u32 pp_div;
600 u32 pp_ctrl_reg, pp_div_reg;
601
602 if (!is_edp(intel_dp) || code != SYS_RESTART)
603 return 0;
604
605 pps_lock(intel_dp);
606
607 if (IS_VALLEYVIEW(dev)) {
608 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
609
610 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
611 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
612 pp_div = I915_READ(pp_div_reg);
613 pp_div &= PP_REFERENCE_DIVIDER_MASK;
614
615 /* 0x1F write to PP_DIV_REG sets max cycle delay */
616 I915_WRITE(pp_div_reg, pp_div | 0x1F);
617 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
618 msleep(intel_dp->panel_power_cycle_delay);
619 }
620
621 pps_unlock(intel_dp);
622
623 return 0;
624 }
625
626 static bool edp_have_panel_power(struct intel_dp *intel_dp)
627 {
628 struct drm_device *dev = intel_dp_to_dev(intel_dp);
629 struct drm_i915_private *dev_priv = dev->dev_private;
630
631 lockdep_assert_held(&dev_priv->pps_mutex);
632
633 if (IS_VALLEYVIEW(dev) &&
634 intel_dp->pps_pipe == INVALID_PIPE)
635 return false;
636
637 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
638 }
639
640 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
641 {
642 struct drm_device *dev = intel_dp_to_dev(intel_dp);
643 struct drm_i915_private *dev_priv = dev->dev_private;
644
645 lockdep_assert_held(&dev_priv->pps_mutex);
646
647 if (IS_VALLEYVIEW(dev) &&
648 intel_dp->pps_pipe == INVALID_PIPE)
649 return false;
650
651 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
652 }
653
654 static void
655 intel_dp_check_edp(struct intel_dp *intel_dp)
656 {
657 struct drm_device *dev = intel_dp_to_dev(intel_dp);
658 struct drm_i915_private *dev_priv = dev->dev_private;
659
660 if (!is_edp(intel_dp))
661 return;
662
663 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
664 WARN(1, "eDP powered off while attempting aux channel communication.\n");
665 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
666 I915_READ(_pp_stat_reg(intel_dp)),
667 I915_READ(_pp_ctrl_reg(intel_dp)));
668 }
669 }
670
671 static uint32_t
672 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
673 {
674 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
675 struct drm_device *dev = intel_dig_port->base.base.dev;
676 struct drm_i915_private *dev_priv = dev->dev_private;
677 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
678 uint32_t status;
679 bool done;
680
681 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
682 if (has_aux_irq)
683 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
684 msecs_to_jiffies_timeout(10));
685 else
686 done = wait_for_atomic(C, 10) == 0;
687 if (!done)
688 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
689 has_aux_irq);
690 #undef C
691
692 return status;
693 }
694
695 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696 {
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
699
700 /*
701 * The clock divider is based off the hrawclk, and would like to run at
702 * 2MHz. So, take the hrawclk value and divide by 2 and use that
703 */
704 return index ? 0 : intel_hrawclk(dev) / 2;
705 }
706
707 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
708 {
709 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
710 struct drm_device *dev = intel_dig_port->base.base.dev;
711 struct drm_i915_private *dev_priv = dev->dev_private;
712
713 if (index)
714 return 0;
715
716 if (intel_dig_port->port == PORT_A) {
717 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
718
719 } else {
720 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721 }
722 }
723
724 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725 {
726 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
727 struct drm_device *dev = intel_dig_port->base.base.dev;
728 struct drm_i915_private *dev_priv = dev->dev_private;
729
730 if (intel_dig_port->port == PORT_A) {
731 if (index)
732 return 0;
733 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
734 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
735 /* Workaround for non-ULT HSW */
736 switch (index) {
737 case 0: return 63;
738 case 1: return 72;
739 default: return 0;
740 }
741 } else {
742 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
743 }
744 }
745
746 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
747 {
748 return index ? 0 : 100;
749 }
750
751 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
752 {
753 /*
754 * SKL doesn't need us to program the AUX clock divider (Hardware will
755 * derive the clock from CDCLK automatically). We still implement the
756 * get_aux_clock_divider vfunc to plug-in into the existing code.
757 */
758 return index ? 0 : 1;
759 }
760
761 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
762 bool has_aux_irq,
763 int send_bytes,
764 uint32_t aux_clock_divider)
765 {
766 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
767 struct drm_device *dev = intel_dig_port->base.base.dev;
768 uint32_t precharge, timeout;
769
770 if (IS_GEN6(dev))
771 precharge = 3;
772 else
773 precharge = 5;
774
775 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
776 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
777 else
778 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
779
780 return DP_AUX_CH_CTL_SEND_BUSY |
781 DP_AUX_CH_CTL_DONE |
782 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
783 DP_AUX_CH_CTL_TIME_OUT_ERROR |
784 timeout |
785 DP_AUX_CH_CTL_RECEIVE_ERROR |
786 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
787 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
789 }
790
791 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
792 bool has_aux_irq,
793 int send_bytes,
794 uint32_t unused)
795 {
796 return DP_AUX_CH_CTL_SEND_BUSY |
797 DP_AUX_CH_CTL_DONE |
798 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
799 DP_AUX_CH_CTL_TIME_OUT_ERROR |
800 DP_AUX_CH_CTL_TIME_OUT_1600us |
801 DP_AUX_CH_CTL_RECEIVE_ERROR |
802 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
803 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
804 }
805
806 static int
807 intel_dp_aux_ch(struct intel_dp *intel_dp,
808 const uint8_t *send, int send_bytes,
809 uint8_t *recv, int recv_size)
810 {
811 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
812 struct drm_device *dev = intel_dig_port->base.base.dev;
813 struct drm_i915_private *dev_priv = dev->dev_private;
814 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
815 uint32_t ch_data = ch_ctl + 4;
816 uint32_t aux_clock_divider;
817 int i, ret, recv_bytes;
818 uint32_t status;
819 int try, clock = 0;
820 bool has_aux_irq = HAS_AUX_IRQ(dev);
821 bool vdd;
822
823 pps_lock(intel_dp);
824
825 /*
826 * We will be called with VDD already enabled for dpcd/edid/oui reads.
827 * In such cases we want to leave VDD enabled and it's up to upper layers
828 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
829 * ourselves.
830 */
831 vdd = edp_panel_vdd_on(intel_dp);
832
833 /* dp aux is extremely sensitive to irq latency, hence request the
834 * lowest possible wakeup latency and so prevent the cpu from going into
835 * deep sleep states.
836 */
837 pm_qos_update_request(&dev_priv->pm_qos, 0);
838
839 intel_dp_check_edp(intel_dp);
840
841 intel_aux_display_runtime_get(dev_priv);
842
843 /* Try to wait for any previous AUX channel activity */
844 for (try = 0; try < 3; try++) {
845 status = I915_READ_NOTRACE(ch_ctl);
846 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
847 break;
848 msleep(1);
849 }
850
851 if (try == 3) {
852 WARN(1, "dp_aux_ch not started status 0x%08x\n",
853 I915_READ(ch_ctl));
854 ret = -EBUSY;
855 goto out;
856 }
857
858 /* Only 5 data registers! */
859 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
860 ret = -E2BIG;
861 goto out;
862 }
863
864 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
865 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
866 has_aux_irq,
867 send_bytes,
868 aux_clock_divider);
869
870 /* Must try at least 3 times according to DP spec */
871 for (try = 0; try < 5; try++) {
872 /* Load the send data into the aux channel data registers */
873 for (i = 0; i < send_bytes; i += 4)
874 I915_WRITE(ch_data + i,
875 intel_dp_pack_aux(send + i,
876 send_bytes - i));
877
878 /* Send the command and wait for it to complete */
879 I915_WRITE(ch_ctl, send_ctl);
880
881 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
882
883 /* Clear done status and any errors */
884 I915_WRITE(ch_ctl,
885 status |
886 DP_AUX_CH_CTL_DONE |
887 DP_AUX_CH_CTL_TIME_OUT_ERROR |
888 DP_AUX_CH_CTL_RECEIVE_ERROR);
889
890 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
891 continue;
892
893 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
894 * 400us delay required for errors and timeouts
895 * Timeout errors from the HW already meet this
896 * requirement so skip to next iteration
897 */
898 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
899 usleep_range(400, 500);
900 continue;
901 }
902 if (status & DP_AUX_CH_CTL_DONE)
903 break;
904 }
905 if (status & DP_AUX_CH_CTL_DONE)
906 break;
907 }
908
909 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
910 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
911 ret = -EBUSY;
912 goto out;
913 }
914
915 /* Check for timeout or receive error.
916 * Timeouts occur when the sink is not connected
917 */
918 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
919 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
920 ret = -EIO;
921 goto out;
922 }
923
924 /* Timeouts occur when the device isn't connected, so they're
925 * "normal" -- don't fill the kernel log with these */
926 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
927 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
928 ret = -ETIMEDOUT;
929 goto out;
930 }
931
932 /* Unload any bytes sent back from the other side */
933 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
934 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
935 if (recv_bytes > recv_size)
936 recv_bytes = recv_size;
937
938 for (i = 0; i < recv_bytes; i += 4)
939 intel_dp_unpack_aux(I915_READ(ch_data + i),
940 recv + i, recv_bytes - i);
941
942 ret = recv_bytes;
943 out:
944 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
945 intel_aux_display_runtime_put(dev_priv);
946
947 if (vdd)
948 edp_panel_vdd_off(intel_dp, false);
949
950 pps_unlock(intel_dp);
951
952 return ret;
953 }
954
955 #define BARE_ADDRESS_SIZE 3
956 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
957 static ssize_t
958 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
959 {
960 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
961 uint8_t txbuf[20], rxbuf[20];
962 size_t txsize, rxsize;
963 int ret;
964
965 txbuf[0] = (msg->request << 4) |
966 ((msg->address >> 16) & 0xf);
967 txbuf[1] = (msg->address >> 8) & 0xff;
968 txbuf[2] = msg->address & 0xff;
969 txbuf[3] = msg->size - 1;
970
971 switch (msg->request & ~DP_AUX_I2C_MOT) {
972 case DP_AUX_NATIVE_WRITE:
973 case DP_AUX_I2C_WRITE:
974 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
975 rxsize = 2; /* 0 or 1 data bytes */
976
977 if (WARN_ON(txsize > 20))
978 return -E2BIG;
979
980 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
981
982 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
983 if (ret > 0) {
984 msg->reply = rxbuf[0] >> 4;
985
986 if (ret > 1) {
987 /* Number of bytes written in a short write. */
988 ret = clamp_t(int, rxbuf[1], 0, msg->size);
989 } else {
990 /* Return payload size. */
991 ret = msg->size;
992 }
993 }
994 break;
995
996 case DP_AUX_NATIVE_READ:
997 case DP_AUX_I2C_READ:
998 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
999 rxsize = msg->size + 1;
1000
1001 if (WARN_ON(rxsize > 20))
1002 return -E2BIG;
1003
1004 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1005 if (ret > 0) {
1006 msg->reply = rxbuf[0] >> 4;
1007 /*
1008 * Assume happy day, and copy the data. The caller is
1009 * expected to check msg->reply before touching it.
1010 *
1011 * Return payload size.
1012 */
1013 ret--;
1014 memcpy(msg->buffer, rxbuf + 1, ret);
1015 }
1016 break;
1017
1018 default:
1019 ret = -EINVAL;
1020 break;
1021 }
1022
1023 return ret;
1024 }
1025
1026 static void
1027 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1028 {
1029 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1030 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1031 enum port port = intel_dig_port->port;
1032 const char *name = NULL;
1033 int ret;
1034
1035 switch (port) {
1036 case PORT_A:
1037 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1038 name = "DPDDC-A";
1039 break;
1040 case PORT_B:
1041 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1042 name = "DPDDC-B";
1043 break;
1044 case PORT_C:
1045 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1046 name = "DPDDC-C";
1047 break;
1048 case PORT_D:
1049 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1050 name = "DPDDC-D";
1051 break;
1052 default:
1053 BUG();
1054 }
1055
1056 /*
1057 * The AUX_CTL register is usually DP_CTL + 0x10.
1058 *
1059 * On Haswell and Broadwell though:
1060 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1061 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1062 *
1063 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1064 */
1065 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1066 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1067
1068 intel_dp->aux.name = name;
1069 intel_dp->aux.dev = dev->dev;
1070 intel_dp->aux.transfer = intel_dp_aux_transfer;
1071
1072 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1073 connector->base.kdev->kobj.name);
1074
1075 ret = drm_dp_aux_register(&intel_dp->aux);
1076 if (ret < 0) {
1077 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1078 name, ret);
1079 return;
1080 }
1081
1082 ret = sysfs_create_link(&connector->base.kdev->kobj,
1083 &intel_dp->aux.ddc.dev.kobj,
1084 intel_dp->aux.ddc.dev.kobj.name);
1085 if (ret < 0) {
1086 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1087 drm_dp_aux_unregister(&intel_dp->aux);
1088 }
1089 }
1090
1091 static void
1092 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1093 {
1094 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1095
1096 if (!intel_connector->mst_port)
1097 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1098 intel_dp->aux.ddc.dev.kobj.name);
1099 intel_connector_unregister(intel_connector);
1100 }
1101
1102 static void
1103 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1104 {
1105 u32 ctrl1;
1106
1107 memset(&pipe_config->dpll_hw_state, 0,
1108 sizeof(pipe_config->dpll_hw_state));
1109
1110 pipe_config->ddi_pll_sel = SKL_DPLL0;
1111 pipe_config->dpll_hw_state.cfgcr1 = 0;
1112 pipe_config->dpll_hw_state.cfgcr2 = 0;
1113
1114 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1115 switch (link_clock / 2) {
1116 case 81000:
1117 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1118 SKL_DPLL0);
1119 break;
1120 case 135000:
1121 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1122 SKL_DPLL0);
1123 break;
1124 case 270000:
1125 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1126 SKL_DPLL0);
1127 break;
1128 case 162000:
1129 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1130 SKL_DPLL0);
1131 break;
1132 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1133 results in CDCLK change. Need to handle the change of CDCLK by
1134 disabling pipes and re-enabling them */
1135 case 108000:
1136 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1137 SKL_DPLL0);
1138 break;
1139 case 216000:
1140 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1141 SKL_DPLL0);
1142 break;
1143
1144 }
1145 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1146 }
1147
1148 static void
1149 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1150 {
1151 switch (link_bw) {
1152 case DP_LINK_BW_1_62:
1153 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1154 break;
1155 case DP_LINK_BW_2_7:
1156 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1157 break;
1158 case DP_LINK_BW_5_4:
1159 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1160 break;
1161 }
1162 }
1163
1164 static int
1165 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1166 {
1167 if (intel_dp->num_sink_rates) {
1168 *sink_rates = intel_dp->sink_rates;
1169 return intel_dp->num_sink_rates;
1170 }
1171
1172 *sink_rates = default_rates;
1173
1174 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1175 }
1176
1177 static int
1178 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1179 {
1180 if (IS_BROXTON(dev)) {
1181 *source_rates = bxt_rates;
1182 return ARRAY_SIZE(bxt_rates);
1183 } else if (IS_SKYLAKE(dev)) {
1184 *source_rates = skl_rates;
1185 return ARRAY_SIZE(skl_rates);
1186 } else if (IS_CHERRYVIEW(dev)) {
1187 *source_rates = chv_rates;
1188 return ARRAY_SIZE(chv_rates);
1189 }
1190
1191 *source_rates = default_rates;
1192
1193 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1194 /* WaDisableHBR2:skl */
1195 return (DP_LINK_BW_2_7 >> 3) + 1;
1196 else if (INTEL_INFO(dev)->gen >= 8 ||
1197 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1198 return (DP_LINK_BW_5_4 >> 3) + 1;
1199 else
1200 return (DP_LINK_BW_2_7 >> 3) + 1;
1201 }
1202
1203 static void
1204 intel_dp_set_clock(struct intel_encoder *encoder,
1205 struct intel_crtc_state *pipe_config, int link_bw)
1206 {
1207 struct drm_device *dev = encoder->base.dev;
1208 const struct dp_link_dpll *divisor = NULL;
1209 int i, count = 0;
1210
1211 if (IS_G4X(dev)) {
1212 divisor = gen4_dpll;
1213 count = ARRAY_SIZE(gen4_dpll);
1214 } else if (HAS_PCH_SPLIT(dev)) {
1215 divisor = pch_dpll;
1216 count = ARRAY_SIZE(pch_dpll);
1217 } else if (IS_CHERRYVIEW(dev)) {
1218 divisor = chv_dpll;
1219 count = ARRAY_SIZE(chv_dpll);
1220 } else if (IS_VALLEYVIEW(dev)) {
1221 divisor = vlv_dpll;
1222 count = ARRAY_SIZE(vlv_dpll);
1223 }
1224
1225 if (divisor && count) {
1226 for (i = 0; i < count; i++) {
1227 if (link_bw == divisor[i].link_bw) {
1228 pipe_config->dpll = divisor[i].dpll;
1229 pipe_config->clock_set = true;
1230 break;
1231 }
1232 }
1233 }
1234 }
1235
1236 static int intersect_rates(const int *source_rates, int source_len,
1237 const int *sink_rates, int sink_len,
1238 int *common_rates)
1239 {
1240 int i = 0, j = 0, k = 0;
1241
1242 while (i < source_len && j < sink_len) {
1243 if (source_rates[i] == sink_rates[j]) {
1244 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1245 return k;
1246 common_rates[k] = source_rates[i];
1247 ++k;
1248 ++i;
1249 ++j;
1250 } else if (source_rates[i] < sink_rates[j]) {
1251 ++i;
1252 } else {
1253 ++j;
1254 }
1255 }
1256 return k;
1257 }
1258
1259 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1260 int *common_rates)
1261 {
1262 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1263 const int *source_rates, *sink_rates;
1264 int source_len, sink_len;
1265
1266 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1267 source_len = intel_dp_source_rates(dev, &source_rates);
1268
1269 return intersect_rates(source_rates, source_len,
1270 sink_rates, sink_len,
1271 common_rates);
1272 }
1273
1274 static void snprintf_int_array(char *str, size_t len,
1275 const int *array, int nelem)
1276 {
1277 int i;
1278
1279 str[0] = '\0';
1280
1281 for (i = 0; i < nelem; i++) {
1282 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1283 if (r >= len)
1284 return;
1285 str += r;
1286 len -= r;
1287 }
1288 }
1289
1290 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1291 {
1292 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1293 const int *source_rates, *sink_rates;
1294 int source_len, sink_len, common_len;
1295 int common_rates[DP_MAX_SUPPORTED_RATES];
1296 char str[128]; /* FIXME: too big for stack? */
1297
1298 if ((drm_debug & DRM_UT_KMS) == 0)
1299 return;
1300
1301 source_len = intel_dp_source_rates(dev, &source_rates);
1302 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1303 DRM_DEBUG_KMS("source rates: %s\n", str);
1304
1305 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1306 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1307 DRM_DEBUG_KMS("sink rates: %s\n", str);
1308
1309 common_len = intel_dp_common_rates(intel_dp, common_rates);
1310 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1311 DRM_DEBUG_KMS("common rates: %s\n", str);
1312 }
1313
1314 static int rate_to_index(int find, const int *rates)
1315 {
1316 int i = 0;
1317
1318 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1319 if (find == rates[i])
1320 break;
1321
1322 return i;
1323 }
1324
1325 int
1326 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1327 {
1328 int rates[DP_MAX_SUPPORTED_RATES] = {};
1329 int len;
1330
1331 len = intel_dp_common_rates(intel_dp, rates);
1332 if (WARN_ON(len <= 0))
1333 return 162000;
1334
1335 return rates[rate_to_index(0, rates) - 1];
1336 }
1337
1338 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1339 {
1340 return rate_to_index(rate, intel_dp->sink_rates);
1341 }
1342
1343 bool
1344 intel_dp_compute_config(struct intel_encoder *encoder,
1345 struct intel_crtc_state *pipe_config)
1346 {
1347 struct drm_device *dev = encoder->base.dev;
1348 struct drm_i915_private *dev_priv = dev->dev_private;
1349 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1350 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1351 enum port port = dp_to_dig_port(intel_dp)->port;
1352 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1353 struct intel_connector *intel_connector = intel_dp->attached_connector;
1354 int lane_count, clock;
1355 int min_lane_count = 1;
1356 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1357 /* Conveniently, the link BW constants become indices with a shift...*/
1358 int min_clock = 0;
1359 int max_clock;
1360 int bpp, mode_rate;
1361 int link_avail, link_clock;
1362 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1363 int common_len;
1364
1365 common_len = intel_dp_common_rates(intel_dp, common_rates);
1366
1367 /* No common link rates between source and sink */
1368 WARN_ON(common_len <= 0);
1369
1370 max_clock = common_len - 1;
1371
1372 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1373 pipe_config->has_pch_encoder = true;
1374
1375 pipe_config->has_dp_encoder = true;
1376 pipe_config->has_drrs = false;
1377 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1378
1379 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1380 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1381 adjusted_mode);
1382
1383 if (INTEL_INFO(dev)->gen >= 9) {
1384 int ret;
1385 ret = skl_update_scaler_crtc(pipe_config, 0);
1386 if (ret)
1387 return ret;
1388 }
1389
1390 if (!HAS_PCH_SPLIT(dev))
1391 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1392 intel_connector->panel.fitting_mode);
1393 else
1394 intel_pch_panel_fitting(intel_crtc, pipe_config,
1395 intel_connector->panel.fitting_mode);
1396 }
1397
1398 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1399 return false;
1400
1401 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1402 "max bw %d pixel clock %iKHz\n",
1403 max_lane_count, common_rates[max_clock],
1404 adjusted_mode->crtc_clock);
1405
1406 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1407 * bpc in between. */
1408 bpp = pipe_config->pipe_bpp;
1409 if (is_edp(intel_dp)) {
1410 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1411 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1412 dev_priv->vbt.edp_bpp);
1413 bpp = dev_priv->vbt.edp_bpp;
1414 }
1415
1416 /*
1417 * Use the maximum clock and number of lanes the eDP panel
1418 * advertizes being capable of. The panels are generally
1419 * designed to support only a single clock and lane
1420 * configuration, and typically these values correspond to the
1421 * native resolution of the panel.
1422 */
1423 min_lane_count = max_lane_count;
1424 min_clock = max_clock;
1425 }
1426
1427 for (; bpp >= 6*3; bpp -= 2*3) {
1428 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1429 bpp);
1430
1431 for (clock = min_clock; clock <= max_clock; clock++) {
1432 for (lane_count = min_lane_count;
1433 lane_count <= max_lane_count;
1434 lane_count <<= 1) {
1435
1436 link_clock = common_rates[clock];
1437 link_avail = intel_dp_max_data_rate(link_clock,
1438 lane_count);
1439
1440 if (mode_rate <= link_avail) {
1441 goto found;
1442 }
1443 }
1444 }
1445 }
1446
1447 return false;
1448
1449 found:
1450 if (intel_dp->color_range_auto) {
1451 /*
1452 * See:
1453 * CEA-861-E - 5.1 Default Encoding Parameters
1454 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1455 */
1456 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1457 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1458 else
1459 intel_dp->color_range = 0;
1460 }
1461
1462 if (intel_dp->color_range)
1463 pipe_config->limited_color_range = true;
1464
1465 intel_dp->lane_count = lane_count;
1466
1467 if (intel_dp->num_sink_rates) {
1468 intel_dp->link_bw = 0;
1469 intel_dp->rate_select =
1470 intel_dp_rate_select(intel_dp, common_rates[clock]);
1471 } else {
1472 intel_dp->link_bw =
1473 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1474 intel_dp->rate_select = 0;
1475 }
1476
1477 pipe_config->pipe_bpp = bpp;
1478 pipe_config->port_clock = common_rates[clock];
1479
1480 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1481 intel_dp->link_bw, intel_dp->lane_count,
1482 pipe_config->port_clock, bpp);
1483 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1484 mode_rate, link_avail);
1485
1486 intel_link_compute_m_n(bpp, lane_count,
1487 adjusted_mode->crtc_clock,
1488 pipe_config->port_clock,
1489 &pipe_config->dp_m_n);
1490
1491 if (intel_connector->panel.downclock_mode != NULL &&
1492 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1493 pipe_config->has_drrs = true;
1494 intel_link_compute_m_n(bpp, lane_count,
1495 intel_connector->panel.downclock_mode->clock,
1496 pipe_config->port_clock,
1497 &pipe_config->dp_m2_n2);
1498 }
1499
1500 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1501 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1502 else if (IS_BROXTON(dev))
1503 /* handled in ddi */;
1504 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1505 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1506 else
1507 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1508
1509 return true;
1510 }
1511
1512 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1513 {
1514 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1515 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1516 struct drm_device *dev = crtc->base.dev;
1517 struct drm_i915_private *dev_priv = dev->dev_private;
1518 u32 dpa_ctl;
1519
1520 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1521 crtc->config->port_clock);
1522 dpa_ctl = I915_READ(DP_A);
1523 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1524
1525 if (crtc->config->port_clock == 162000) {
1526 /* For a long time we've carried around a ILK-DevA w/a for the
1527 * 160MHz clock. If we're really unlucky, it's still required.
1528 */
1529 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1530 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1531 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1532 } else {
1533 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1534 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1535 }
1536
1537 I915_WRITE(DP_A, dpa_ctl);
1538
1539 POSTING_READ(DP_A);
1540 udelay(500);
1541 }
1542
1543 static void intel_dp_prepare(struct intel_encoder *encoder)
1544 {
1545 struct drm_device *dev = encoder->base.dev;
1546 struct drm_i915_private *dev_priv = dev->dev_private;
1547 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1548 enum port port = dp_to_dig_port(intel_dp)->port;
1549 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1550 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1551
1552 /*
1553 * There are four kinds of DP registers:
1554 *
1555 * IBX PCH
1556 * SNB CPU
1557 * IVB CPU
1558 * CPT PCH
1559 *
1560 * IBX PCH and CPU are the same for almost everything,
1561 * except that the CPU DP PLL is configured in this
1562 * register
1563 *
1564 * CPT PCH is quite different, having many bits moved
1565 * to the TRANS_DP_CTL register instead. That
1566 * configuration happens (oddly) in ironlake_pch_enable
1567 */
1568
1569 /* Preserve the BIOS-computed detected bit. This is
1570 * supposed to be read-only.
1571 */
1572 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1573
1574 /* Handle DP bits in common between all three register formats */
1575 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1576 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1577
1578 if (crtc->config->has_audio)
1579 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1580
1581 /* Split out the IBX/CPU vs CPT settings */
1582
1583 if (IS_GEN7(dev) && port == PORT_A) {
1584 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1585 intel_dp->DP |= DP_SYNC_HS_HIGH;
1586 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1587 intel_dp->DP |= DP_SYNC_VS_HIGH;
1588 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1589
1590 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1591 intel_dp->DP |= DP_ENHANCED_FRAMING;
1592
1593 intel_dp->DP |= crtc->pipe << 29;
1594 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1595 u32 trans_dp;
1596
1597 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1598
1599 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1600 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1601 trans_dp |= TRANS_DP_ENH_FRAMING;
1602 else
1603 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1604 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1605 } else {
1606 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1607 intel_dp->DP |= intel_dp->color_range;
1608
1609 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1610 intel_dp->DP |= DP_SYNC_HS_HIGH;
1611 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1612 intel_dp->DP |= DP_SYNC_VS_HIGH;
1613 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1614
1615 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1616 intel_dp->DP |= DP_ENHANCED_FRAMING;
1617
1618 if (IS_CHERRYVIEW(dev))
1619 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1620 else if (crtc->pipe == PIPE_B)
1621 intel_dp->DP |= DP_PIPEB_SELECT;
1622 }
1623 }
1624
1625 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1626 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1627
1628 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1629 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1630
1631 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1632 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1633
1634 static void wait_panel_status(struct intel_dp *intel_dp,
1635 u32 mask,
1636 u32 value)
1637 {
1638 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1639 struct drm_i915_private *dev_priv = dev->dev_private;
1640 u32 pp_stat_reg, pp_ctrl_reg;
1641
1642 lockdep_assert_held(&dev_priv->pps_mutex);
1643
1644 pp_stat_reg = _pp_stat_reg(intel_dp);
1645 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1646
1647 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1648 mask, value,
1649 I915_READ(pp_stat_reg),
1650 I915_READ(pp_ctrl_reg));
1651
1652 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1653 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1654 I915_READ(pp_stat_reg),
1655 I915_READ(pp_ctrl_reg));
1656 }
1657
1658 DRM_DEBUG_KMS("Wait complete\n");
1659 }
1660
1661 static void wait_panel_on(struct intel_dp *intel_dp)
1662 {
1663 DRM_DEBUG_KMS("Wait for panel power on\n");
1664 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1665 }
1666
1667 static void wait_panel_off(struct intel_dp *intel_dp)
1668 {
1669 DRM_DEBUG_KMS("Wait for panel power off time\n");
1670 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1671 }
1672
1673 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1674 {
1675 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1676
1677 /* When we disable the VDD override bit last we have to do the manual
1678 * wait. */
1679 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1680 intel_dp->panel_power_cycle_delay);
1681
1682 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1683 }
1684
1685 static void wait_backlight_on(struct intel_dp *intel_dp)
1686 {
1687 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1688 intel_dp->backlight_on_delay);
1689 }
1690
1691 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1692 {
1693 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1694 intel_dp->backlight_off_delay);
1695 }
1696
1697 /* Read the current pp_control value, unlocking the register if it
1698 * is locked
1699 */
1700
1701 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1702 {
1703 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1704 struct drm_i915_private *dev_priv = dev->dev_private;
1705 u32 control;
1706
1707 lockdep_assert_held(&dev_priv->pps_mutex);
1708
1709 control = I915_READ(_pp_ctrl_reg(intel_dp));
1710 if (!IS_BROXTON(dev)) {
1711 control &= ~PANEL_UNLOCK_MASK;
1712 control |= PANEL_UNLOCK_REGS;
1713 }
1714 return control;
1715 }
1716
1717 /*
1718 * Must be paired with edp_panel_vdd_off().
1719 * Must hold pps_mutex around the whole on/off sequence.
1720 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1721 */
1722 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1723 {
1724 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1725 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1726 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1727 struct drm_i915_private *dev_priv = dev->dev_private;
1728 enum intel_display_power_domain power_domain;
1729 u32 pp;
1730 u32 pp_stat_reg, pp_ctrl_reg;
1731 bool need_to_disable = !intel_dp->want_panel_vdd;
1732
1733 lockdep_assert_held(&dev_priv->pps_mutex);
1734
1735 if (!is_edp(intel_dp))
1736 return false;
1737
1738 cancel_delayed_work(&intel_dp->panel_vdd_work);
1739 intel_dp->want_panel_vdd = true;
1740
1741 if (edp_have_panel_vdd(intel_dp))
1742 return need_to_disable;
1743
1744 power_domain = intel_display_port_power_domain(intel_encoder);
1745 intel_display_power_get(dev_priv, power_domain);
1746
1747 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1748 port_name(intel_dig_port->port));
1749
1750 if (!edp_have_panel_power(intel_dp))
1751 wait_panel_power_cycle(intel_dp);
1752
1753 pp = ironlake_get_pp_control(intel_dp);
1754 pp |= EDP_FORCE_VDD;
1755
1756 pp_stat_reg = _pp_stat_reg(intel_dp);
1757 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1758
1759 I915_WRITE(pp_ctrl_reg, pp);
1760 POSTING_READ(pp_ctrl_reg);
1761 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1762 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1763 /*
1764 * If the panel wasn't on, delay before accessing aux channel
1765 */
1766 if (!edp_have_panel_power(intel_dp)) {
1767 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1768 port_name(intel_dig_port->port));
1769 msleep(intel_dp->panel_power_up_delay);
1770 }
1771
1772 return need_to_disable;
1773 }
1774
1775 /*
1776 * Must be paired with intel_edp_panel_vdd_off() or
1777 * intel_edp_panel_off().
1778 * Nested calls to these functions are not allowed since
1779 * we drop the lock. Caller must use some higher level
1780 * locking to prevent nested calls from other threads.
1781 */
1782 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1783 {
1784 bool vdd;
1785
1786 if (!is_edp(intel_dp))
1787 return;
1788
1789 pps_lock(intel_dp);
1790 vdd = edp_panel_vdd_on(intel_dp);
1791 pps_unlock(intel_dp);
1792
1793 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1794 port_name(dp_to_dig_port(intel_dp)->port));
1795 }
1796
1797 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1798 {
1799 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1800 struct drm_i915_private *dev_priv = dev->dev_private;
1801 struct intel_digital_port *intel_dig_port =
1802 dp_to_dig_port(intel_dp);
1803 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1804 enum intel_display_power_domain power_domain;
1805 u32 pp;
1806 u32 pp_stat_reg, pp_ctrl_reg;
1807
1808 lockdep_assert_held(&dev_priv->pps_mutex);
1809
1810 WARN_ON(intel_dp->want_panel_vdd);
1811
1812 if (!edp_have_panel_vdd(intel_dp))
1813 return;
1814
1815 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1816 port_name(intel_dig_port->port));
1817
1818 pp = ironlake_get_pp_control(intel_dp);
1819 pp &= ~EDP_FORCE_VDD;
1820
1821 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1822 pp_stat_reg = _pp_stat_reg(intel_dp);
1823
1824 I915_WRITE(pp_ctrl_reg, pp);
1825 POSTING_READ(pp_ctrl_reg);
1826
1827 /* Make sure sequencer is idle before allowing subsequent activity */
1828 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1829 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1830
1831 if ((pp & POWER_TARGET_ON) == 0)
1832 intel_dp->last_power_cycle = jiffies;
1833
1834 power_domain = intel_display_port_power_domain(intel_encoder);
1835 intel_display_power_put(dev_priv, power_domain);
1836 }
1837
1838 static void edp_panel_vdd_work(struct work_struct *__work)
1839 {
1840 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1841 struct intel_dp, panel_vdd_work);
1842
1843 pps_lock(intel_dp);
1844 if (!intel_dp->want_panel_vdd)
1845 edp_panel_vdd_off_sync(intel_dp);
1846 pps_unlock(intel_dp);
1847 }
1848
1849 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1850 {
1851 unsigned long delay;
1852
1853 /*
1854 * Queue the timer to fire a long time from now (relative to the power
1855 * down delay) to keep the panel power up across a sequence of
1856 * operations.
1857 */
1858 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1859 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1860 }
1861
1862 /*
1863 * Must be paired with edp_panel_vdd_on().
1864 * Must hold pps_mutex around the whole on/off sequence.
1865 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1866 */
1867 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1868 {
1869 struct drm_i915_private *dev_priv =
1870 intel_dp_to_dev(intel_dp)->dev_private;
1871
1872 lockdep_assert_held(&dev_priv->pps_mutex);
1873
1874 if (!is_edp(intel_dp))
1875 return;
1876
1877 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1878 port_name(dp_to_dig_port(intel_dp)->port));
1879
1880 intel_dp->want_panel_vdd = false;
1881
1882 if (sync)
1883 edp_panel_vdd_off_sync(intel_dp);
1884 else
1885 edp_panel_vdd_schedule_off(intel_dp);
1886 }
1887
1888 static void edp_panel_on(struct intel_dp *intel_dp)
1889 {
1890 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1891 struct drm_i915_private *dev_priv = dev->dev_private;
1892 u32 pp;
1893 u32 pp_ctrl_reg;
1894
1895 lockdep_assert_held(&dev_priv->pps_mutex);
1896
1897 if (!is_edp(intel_dp))
1898 return;
1899
1900 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1901 port_name(dp_to_dig_port(intel_dp)->port));
1902
1903 if (WARN(edp_have_panel_power(intel_dp),
1904 "eDP port %c panel power already on\n",
1905 port_name(dp_to_dig_port(intel_dp)->port)))
1906 return;
1907
1908 wait_panel_power_cycle(intel_dp);
1909
1910 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1911 pp = ironlake_get_pp_control(intel_dp);
1912 if (IS_GEN5(dev)) {
1913 /* ILK workaround: disable reset around power sequence */
1914 pp &= ~PANEL_POWER_RESET;
1915 I915_WRITE(pp_ctrl_reg, pp);
1916 POSTING_READ(pp_ctrl_reg);
1917 }
1918
1919 pp |= POWER_TARGET_ON;
1920 if (!IS_GEN5(dev))
1921 pp |= PANEL_POWER_RESET;
1922
1923 I915_WRITE(pp_ctrl_reg, pp);
1924 POSTING_READ(pp_ctrl_reg);
1925
1926 wait_panel_on(intel_dp);
1927 intel_dp->last_power_on = jiffies;
1928
1929 if (IS_GEN5(dev)) {
1930 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1931 I915_WRITE(pp_ctrl_reg, pp);
1932 POSTING_READ(pp_ctrl_reg);
1933 }
1934 }
1935
1936 void intel_edp_panel_on(struct intel_dp *intel_dp)
1937 {
1938 if (!is_edp(intel_dp))
1939 return;
1940
1941 pps_lock(intel_dp);
1942 edp_panel_on(intel_dp);
1943 pps_unlock(intel_dp);
1944 }
1945
1946
1947 static void edp_panel_off(struct intel_dp *intel_dp)
1948 {
1949 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1950 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1951 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1952 struct drm_i915_private *dev_priv = dev->dev_private;
1953 enum intel_display_power_domain power_domain;
1954 u32 pp;
1955 u32 pp_ctrl_reg;
1956
1957 lockdep_assert_held(&dev_priv->pps_mutex);
1958
1959 if (!is_edp(intel_dp))
1960 return;
1961
1962 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1963 port_name(dp_to_dig_port(intel_dp)->port));
1964
1965 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1966 port_name(dp_to_dig_port(intel_dp)->port));
1967
1968 pp = ironlake_get_pp_control(intel_dp);
1969 /* We need to switch off panel power _and_ force vdd, for otherwise some
1970 * panels get very unhappy and cease to work. */
1971 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1972 EDP_BLC_ENABLE);
1973
1974 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1975
1976 intel_dp->want_panel_vdd = false;
1977
1978 I915_WRITE(pp_ctrl_reg, pp);
1979 POSTING_READ(pp_ctrl_reg);
1980
1981 intel_dp->last_power_cycle = jiffies;
1982 wait_panel_off(intel_dp);
1983
1984 /* We got a reference when we enabled the VDD. */
1985 power_domain = intel_display_port_power_domain(intel_encoder);
1986 intel_display_power_put(dev_priv, power_domain);
1987 }
1988
1989 void intel_edp_panel_off(struct intel_dp *intel_dp)
1990 {
1991 if (!is_edp(intel_dp))
1992 return;
1993
1994 pps_lock(intel_dp);
1995 edp_panel_off(intel_dp);
1996 pps_unlock(intel_dp);
1997 }
1998
1999 /* Enable backlight in the panel power control. */
2000 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2001 {
2002 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2003 struct drm_device *dev = intel_dig_port->base.base.dev;
2004 struct drm_i915_private *dev_priv = dev->dev_private;
2005 u32 pp;
2006 u32 pp_ctrl_reg;
2007
2008 /*
2009 * If we enable the backlight right away following a panel power
2010 * on, we may see slight flicker as the panel syncs with the eDP
2011 * link. So delay a bit to make sure the image is solid before
2012 * allowing it to appear.
2013 */
2014 wait_backlight_on(intel_dp);
2015
2016 pps_lock(intel_dp);
2017
2018 pp = ironlake_get_pp_control(intel_dp);
2019 pp |= EDP_BLC_ENABLE;
2020
2021 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2022
2023 I915_WRITE(pp_ctrl_reg, pp);
2024 POSTING_READ(pp_ctrl_reg);
2025
2026 pps_unlock(intel_dp);
2027 }
2028
2029 /* Enable backlight PWM and backlight PP control. */
2030 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2031 {
2032 if (!is_edp(intel_dp))
2033 return;
2034
2035 DRM_DEBUG_KMS("\n");
2036
2037 intel_panel_enable_backlight(intel_dp->attached_connector);
2038 _intel_edp_backlight_on(intel_dp);
2039 }
2040
2041 /* Disable backlight in the panel power control. */
2042 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2043 {
2044 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2045 struct drm_i915_private *dev_priv = dev->dev_private;
2046 u32 pp;
2047 u32 pp_ctrl_reg;
2048
2049 if (!is_edp(intel_dp))
2050 return;
2051
2052 pps_lock(intel_dp);
2053
2054 pp = ironlake_get_pp_control(intel_dp);
2055 pp &= ~EDP_BLC_ENABLE;
2056
2057 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2058
2059 I915_WRITE(pp_ctrl_reg, pp);
2060 POSTING_READ(pp_ctrl_reg);
2061
2062 pps_unlock(intel_dp);
2063
2064 intel_dp->last_backlight_off = jiffies;
2065 edp_wait_backlight_off(intel_dp);
2066 }
2067
2068 /* Disable backlight PP control and backlight PWM. */
2069 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2070 {
2071 if (!is_edp(intel_dp))
2072 return;
2073
2074 DRM_DEBUG_KMS("\n");
2075
2076 _intel_edp_backlight_off(intel_dp);
2077 intel_panel_disable_backlight(intel_dp->attached_connector);
2078 }
2079
2080 /*
2081 * Hook for controlling the panel power control backlight through the bl_power
2082 * sysfs attribute. Take care to handle multiple calls.
2083 */
2084 static void intel_edp_backlight_power(struct intel_connector *connector,
2085 bool enable)
2086 {
2087 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2088 bool is_enabled;
2089
2090 pps_lock(intel_dp);
2091 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2092 pps_unlock(intel_dp);
2093
2094 if (is_enabled == enable)
2095 return;
2096
2097 DRM_DEBUG_KMS("panel power control backlight %s\n",
2098 enable ? "enable" : "disable");
2099
2100 if (enable)
2101 _intel_edp_backlight_on(intel_dp);
2102 else
2103 _intel_edp_backlight_off(intel_dp);
2104 }
2105
2106 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2107 {
2108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2109 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2110 struct drm_device *dev = crtc->dev;
2111 struct drm_i915_private *dev_priv = dev->dev_private;
2112 u32 dpa_ctl;
2113
2114 assert_pipe_disabled(dev_priv,
2115 to_intel_crtc(crtc)->pipe);
2116
2117 DRM_DEBUG_KMS("\n");
2118 dpa_ctl = I915_READ(DP_A);
2119 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2120 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2121
2122 /* We don't adjust intel_dp->DP while tearing down the link, to
2123 * facilitate link retraining (e.g. after hotplug). Hence clear all
2124 * enable bits here to ensure that we don't enable too much. */
2125 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2126 intel_dp->DP |= DP_PLL_ENABLE;
2127 I915_WRITE(DP_A, intel_dp->DP);
2128 POSTING_READ(DP_A);
2129 udelay(200);
2130 }
2131
2132 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2133 {
2134 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2135 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2136 struct drm_device *dev = crtc->dev;
2137 struct drm_i915_private *dev_priv = dev->dev_private;
2138 u32 dpa_ctl;
2139
2140 assert_pipe_disabled(dev_priv,
2141 to_intel_crtc(crtc)->pipe);
2142
2143 dpa_ctl = I915_READ(DP_A);
2144 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2145 "dp pll off, should be on\n");
2146 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2147
2148 /* We can't rely on the value tracked for the DP register in
2149 * intel_dp->DP because link_down must not change that (otherwise link
2150 * re-training will fail. */
2151 dpa_ctl &= ~DP_PLL_ENABLE;
2152 I915_WRITE(DP_A, dpa_ctl);
2153 POSTING_READ(DP_A);
2154 udelay(200);
2155 }
2156
2157 /* If the sink supports it, try to set the power state appropriately */
2158 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2159 {
2160 int ret, i;
2161
2162 /* Should have a valid DPCD by this point */
2163 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2164 return;
2165
2166 if (mode != DRM_MODE_DPMS_ON) {
2167 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2168 DP_SET_POWER_D3);
2169 } else {
2170 /*
2171 * When turning on, we need to retry for 1ms to give the sink
2172 * time to wake up.
2173 */
2174 for (i = 0; i < 3; i++) {
2175 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2176 DP_SET_POWER_D0);
2177 if (ret == 1)
2178 break;
2179 msleep(1);
2180 }
2181 }
2182
2183 if (ret != 1)
2184 DRM_DEBUG_KMS("failed to %s sink power state\n",
2185 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2186 }
2187
2188 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2189 enum pipe *pipe)
2190 {
2191 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2192 enum port port = dp_to_dig_port(intel_dp)->port;
2193 struct drm_device *dev = encoder->base.dev;
2194 struct drm_i915_private *dev_priv = dev->dev_private;
2195 enum intel_display_power_domain power_domain;
2196 u32 tmp;
2197
2198 power_domain = intel_display_port_power_domain(encoder);
2199 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2200 return false;
2201
2202 tmp = I915_READ(intel_dp->output_reg);
2203
2204 if (!(tmp & DP_PORT_EN))
2205 return false;
2206
2207 if (IS_GEN7(dev) && port == PORT_A) {
2208 *pipe = PORT_TO_PIPE_CPT(tmp);
2209 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2210 enum pipe p;
2211
2212 for_each_pipe(dev_priv, p) {
2213 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2214 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2215 *pipe = p;
2216 return true;
2217 }
2218 }
2219
2220 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2221 intel_dp->output_reg);
2222 } else if (IS_CHERRYVIEW(dev)) {
2223 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2224 } else {
2225 *pipe = PORT_TO_PIPE(tmp);
2226 }
2227
2228 return true;
2229 }
2230
2231 static void intel_dp_get_config(struct intel_encoder *encoder,
2232 struct intel_crtc_state *pipe_config)
2233 {
2234 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2235 u32 tmp, flags = 0;
2236 struct drm_device *dev = encoder->base.dev;
2237 struct drm_i915_private *dev_priv = dev->dev_private;
2238 enum port port = dp_to_dig_port(intel_dp)->port;
2239 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2240 int dotclock;
2241
2242 tmp = I915_READ(intel_dp->output_reg);
2243
2244 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2245
2246 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2247 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2248 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2249 flags |= DRM_MODE_FLAG_PHSYNC;
2250 else
2251 flags |= DRM_MODE_FLAG_NHSYNC;
2252
2253 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2254 flags |= DRM_MODE_FLAG_PVSYNC;
2255 else
2256 flags |= DRM_MODE_FLAG_NVSYNC;
2257 } else {
2258 if (tmp & DP_SYNC_HS_HIGH)
2259 flags |= DRM_MODE_FLAG_PHSYNC;
2260 else
2261 flags |= DRM_MODE_FLAG_NHSYNC;
2262
2263 if (tmp & DP_SYNC_VS_HIGH)
2264 flags |= DRM_MODE_FLAG_PVSYNC;
2265 else
2266 flags |= DRM_MODE_FLAG_NVSYNC;
2267 }
2268
2269 pipe_config->base.adjusted_mode.flags |= flags;
2270
2271 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2272 tmp & DP_COLOR_RANGE_16_235)
2273 pipe_config->limited_color_range = true;
2274
2275 pipe_config->has_dp_encoder = true;
2276
2277 intel_dp_get_m_n(crtc, pipe_config);
2278
2279 if (port == PORT_A) {
2280 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2281 pipe_config->port_clock = 162000;
2282 else
2283 pipe_config->port_clock = 270000;
2284 }
2285
2286 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2287 &pipe_config->dp_m_n);
2288
2289 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2290 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2291
2292 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2293
2294 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2295 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2296 /*
2297 * This is a big fat ugly hack.
2298 *
2299 * Some machines in UEFI boot mode provide us a VBT that has 18
2300 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2301 * unknown we fail to light up. Yet the same BIOS boots up with
2302 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2303 * max, not what it tells us to use.
2304 *
2305 * Note: This will still be broken if the eDP panel is not lit
2306 * up by the BIOS, and thus we can't get the mode at module
2307 * load.
2308 */
2309 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2310 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2311 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2312 }
2313 }
2314
2315 static void intel_disable_dp(struct intel_encoder *encoder)
2316 {
2317 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2318 struct drm_device *dev = encoder->base.dev;
2319 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2320
2321 if (crtc->config->has_audio)
2322 intel_audio_codec_disable(encoder);
2323
2324 if (HAS_PSR(dev) && !HAS_DDI(dev))
2325 intel_psr_disable(intel_dp);
2326
2327 /* Make sure the panel is off before trying to change the mode. But also
2328 * ensure that we have vdd while we switch off the panel. */
2329 intel_edp_panel_vdd_on(intel_dp);
2330 intel_edp_backlight_off(intel_dp);
2331 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2332 intel_edp_panel_off(intel_dp);
2333
2334 /* disable the port before the pipe on g4x */
2335 if (INTEL_INFO(dev)->gen < 5)
2336 intel_dp_link_down(intel_dp);
2337 }
2338
2339 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2340 {
2341 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2342 enum port port = dp_to_dig_port(intel_dp)->port;
2343
2344 intel_dp_link_down(intel_dp);
2345 if (port == PORT_A)
2346 ironlake_edp_pll_off(intel_dp);
2347 }
2348
2349 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2350 {
2351 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2352
2353 intel_dp_link_down(intel_dp);
2354 }
2355
2356 static void chv_post_disable_dp(struct intel_encoder *encoder)
2357 {
2358 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2359 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2360 struct drm_device *dev = encoder->base.dev;
2361 struct drm_i915_private *dev_priv = dev->dev_private;
2362 struct intel_crtc *intel_crtc =
2363 to_intel_crtc(encoder->base.crtc);
2364 enum dpio_channel ch = vlv_dport_to_channel(dport);
2365 enum pipe pipe = intel_crtc->pipe;
2366 u32 val;
2367
2368 intel_dp_link_down(intel_dp);
2369
2370 mutex_lock(&dev_priv->sb_lock);
2371
2372 /* Propagate soft reset to data lane reset */
2373 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2374 val |= CHV_PCS_REQ_SOFTRESET_EN;
2375 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2376
2377 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2378 val |= CHV_PCS_REQ_SOFTRESET_EN;
2379 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2380
2381 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2382 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2383 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2384
2385 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2386 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2387 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2388
2389 mutex_unlock(&dev_priv->sb_lock);
2390 }
2391
2392 static void
2393 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2394 uint32_t *DP,
2395 uint8_t dp_train_pat)
2396 {
2397 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2398 struct drm_device *dev = intel_dig_port->base.base.dev;
2399 struct drm_i915_private *dev_priv = dev->dev_private;
2400 enum port port = intel_dig_port->port;
2401
2402 if (HAS_DDI(dev)) {
2403 uint32_t temp = I915_READ(DP_TP_CTL(port));
2404
2405 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2406 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2407 else
2408 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2409
2410 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2411 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2412 case DP_TRAINING_PATTERN_DISABLE:
2413 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2414
2415 break;
2416 case DP_TRAINING_PATTERN_1:
2417 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2418 break;
2419 case DP_TRAINING_PATTERN_2:
2420 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2421 break;
2422 case DP_TRAINING_PATTERN_3:
2423 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2424 break;
2425 }
2426 I915_WRITE(DP_TP_CTL(port), temp);
2427
2428 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2429 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2430 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2431
2432 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2433 case DP_TRAINING_PATTERN_DISABLE:
2434 *DP |= DP_LINK_TRAIN_OFF_CPT;
2435 break;
2436 case DP_TRAINING_PATTERN_1:
2437 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2438 break;
2439 case DP_TRAINING_PATTERN_2:
2440 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2441 break;
2442 case DP_TRAINING_PATTERN_3:
2443 DRM_ERROR("DP training pattern 3 not supported\n");
2444 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2445 break;
2446 }
2447
2448 } else {
2449 if (IS_CHERRYVIEW(dev))
2450 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2451 else
2452 *DP &= ~DP_LINK_TRAIN_MASK;
2453
2454 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2455 case DP_TRAINING_PATTERN_DISABLE:
2456 *DP |= DP_LINK_TRAIN_OFF;
2457 break;
2458 case DP_TRAINING_PATTERN_1:
2459 *DP |= DP_LINK_TRAIN_PAT_1;
2460 break;
2461 case DP_TRAINING_PATTERN_2:
2462 *DP |= DP_LINK_TRAIN_PAT_2;
2463 break;
2464 case DP_TRAINING_PATTERN_3:
2465 if (IS_CHERRYVIEW(dev)) {
2466 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2467 } else {
2468 DRM_ERROR("DP training pattern 3 not supported\n");
2469 *DP |= DP_LINK_TRAIN_PAT_2;
2470 }
2471 break;
2472 }
2473 }
2474 }
2475
2476 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2477 {
2478 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2479 struct drm_i915_private *dev_priv = dev->dev_private;
2480
2481 /* enable with pattern 1 (as per spec) */
2482 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2483 DP_TRAINING_PATTERN_1);
2484
2485 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2486 POSTING_READ(intel_dp->output_reg);
2487
2488 /*
2489 * Magic for VLV/CHV. We _must_ first set up the register
2490 * without actually enabling the port, and then do another
2491 * write to enable the port. Otherwise link training will
2492 * fail when the power sequencer is freshly used for this port.
2493 */
2494 intel_dp->DP |= DP_PORT_EN;
2495
2496 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2497 POSTING_READ(intel_dp->output_reg);
2498 }
2499
2500 static void intel_enable_dp(struct intel_encoder *encoder)
2501 {
2502 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2503 struct drm_device *dev = encoder->base.dev;
2504 struct drm_i915_private *dev_priv = dev->dev_private;
2505 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2506 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2507 unsigned int lane_mask = 0x0;
2508
2509 if (WARN_ON(dp_reg & DP_PORT_EN))
2510 return;
2511
2512 pps_lock(intel_dp);
2513
2514 if (IS_VALLEYVIEW(dev))
2515 vlv_init_panel_power_sequencer(intel_dp);
2516
2517 intel_dp_enable_port(intel_dp);
2518
2519 edp_panel_vdd_on(intel_dp);
2520 edp_panel_on(intel_dp);
2521 edp_panel_vdd_off(intel_dp, true);
2522
2523 pps_unlock(intel_dp);
2524
2525 if (IS_VALLEYVIEW(dev))
2526 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2527 lane_mask);
2528
2529 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2530 intel_dp_start_link_train(intel_dp);
2531 intel_dp_complete_link_train(intel_dp);
2532 intel_dp_stop_link_train(intel_dp);
2533
2534 if (crtc->config->has_audio) {
2535 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2536 pipe_name(crtc->pipe));
2537 intel_audio_codec_enable(encoder);
2538 }
2539 }
2540
2541 static void g4x_enable_dp(struct intel_encoder *encoder)
2542 {
2543 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2544
2545 intel_enable_dp(encoder);
2546 intel_edp_backlight_on(intel_dp);
2547 }
2548
2549 static void vlv_enable_dp(struct intel_encoder *encoder)
2550 {
2551 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2552
2553 intel_edp_backlight_on(intel_dp);
2554 intel_psr_enable(intel_dp);
2555 }
2556
2557 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2558 {
2559 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2560 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2561
2562 intel_dp_prepare(encoder);
2563
2564 /* Only ilk+ has port A */
2565 if (dport->port == PORT_A) {
2566 ironlake_set_pll_cpu_edp(intel_dp);
2567 ironlake_edp_pll_on(intel_dp);
2568 }
2569 }
2570
2571 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2572 {
2573 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2574 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2575 enum pipe pipe = intel_dp->pps_pipe;
2576 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2577
2578 edp_panel_vdd_off_sync(intel_dp);
2579
2580 /*
2581 * VLV seems to get confused when multiple power seqeuencers
2582 * have the same port selected (even if only one has power/vdd
2583 * enabled). The failure manifests as vlv_wait_port_ready() failing
2584 * CHV on the other hand doesn't seem to mind having the same port
2585 * selected in multiple power seqeuencers, but let's clear the
2586 * port select always when logically disconnecting a power sequencer
2587 * from a port.
2588 */
2589 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2590 pipe_name(pipe), port_name(intel_dig_port->port));
2591 I915_WRITE(pp_on_reg, 0);
2592 POSTING_READ(pp_on_reg);
2593
2594 intel_dp->pps_pipe = INVALID_PIPE;
2595 }
2596
2597 static void vlv_steal_power_sequencer(struct drm_device *dev,
2598 enum pipe pipe)
2599 {
2600 struct drm_i915_private *dev_priv = dev->dev_private;
2601 struct intel_encoder *encoder;
2602
2603 lockdep_assert_held(&dev_priv->pps_mutex);
2604
2605 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2606 return;
2607
2608 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2609 base.head) {
2610 struct intel_dp *intel_dp;
2611 enum port port;
2612
2613 if (encoder->type != INTEL_OUTPUT_EDP)
2614 continue;
2615
2616 intel_dp = enc_to_intel_dp(&encoder->base);
2617 port = dp_to_dig_port(intel_dp)->port;
2618
2619 if (intel_dp->pps_pipe != pipe)
2620 continue;
2621
2622 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2623 pipe_name(pipe), port_name(port));
2624
2625 WARN(encoder->connectors_active,
2626 "stealing pipe %c power sequencer from active eDP port %c\n",
2627 pipe_name(pipe), port_name(port));
2628
2629 /* make sure vdd is off before we steal it */
2630 vlv_detach_power_sequencer(intel_dp);
2631 }
2632 }
2633
2634 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2635 {
2636 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2637 struct intel_encoder *encoder = &intel_dig_port->base;
2638 struct drm_device *dev = encoder->base.dev;
2639 struct drm_i915_private *dev_priv = dev->dev_private;
2640 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2641
2642 lockdep_assert_held(&dev_priv->pps_mutex);
2643
2644 if (!is_edp(intel_dp))
2645 return;
2646
2647 if (intel_dp->pps_pipe == crtc->pipe)
2648 return;
2649
2650 /*
2651 * If another power sequencer was being used on this
2652 * port previously make sure to turn off vdd there while
2653 * we still have control of it.
2654 */
2655 if (intel_dp->pps_pipe != INVALID_PIPE)
2656 vlv_detach_power_sequencer(intel_dp);
2657
2658 /*
2659 * We may be stealing the power
2660 * sequencer from another port.
2661 */
2662 vlv_steal_power_sequencer(dev, crtc->pipe);
2663
2664 /* now it's all ours */
2665 intel_dp->pps_pipe = crtc->pipe;
2666
2667 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2668 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2669
2670 /* init power sequencer on this pipe and port */
2671 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2672 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2673 }
2674
2675 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2676 {
2677 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2678 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2679 struct drm_device *dev = encoder->base.dev;
2680 struct drm_i915_private *dev_priv = dev->dev_private;
2681 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2682 enum dpio_channel port = vlv_dport_to_channel(dport);
2683 int pipe = intel_crtc->pipe;
2684 u32 val;
2685
2686 mutex_lock(&dev_priv->sb_lock);
2687
2688 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2689 val = 0;
2690 if (pipe)
2691 val |= (1<<21);
2692 else
2693 val &= ~(1<<21);
2694 val |= 0x001000c4;
2695 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2696 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2697 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2698
2699 mutex_unlock(&dev_priv->sb_lock);
2700
2701 intel_enable_dp(encoder);
2702 }
2703
2704 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2705 {
2706 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2707 struct drm_device *dev = encoder->base.dev;
2708 struct drm_i915_private *dev_priv = dev->dev_private;
2709 struct intel_crtc *intel_crtc =
2710 to_intel_crtc(encoder->base.crtc);
2711 enum dpio_channel port = vlv_dport_to_channel(dport);
2712 int pipe = intel_crtc->pipe;
2713
2714 intel_dp_prepare(encoder);
2715
2716 /* Program Tx lane resets to default */
2717 mutex_lock(&dev_priv->sb_lock);
2718 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2719 DPIO_PCS_TX_LANE2_RESET |
2720 DPIO_PCS_TX_LANE1_RESET);
2721 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2722 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2723 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2724 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2725 DPIO_PCS_CLK_SOFT_RESET);
2726
2727 /* Fix up inter-pair skew failure */
2728 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2729 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2730 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2731 mutex_unlock(&dev_priv->sb_lock);
2732 }
2733
2734 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2735 {
2736 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2737 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2738 struct drm_device *dev = encoder->base.dev;
2739 struct drm_i915_private *dev_priv = dev->dev_private;
2740 struct intel_crtc *intel_crtc =
2741 to_intel_crtc(encoder->base.crtc);
2742 enum dpio_channel ch = vlv_dport_to_channel(dport);
2743 int pipe = intel_crtc->pipe;
2744 int data, i, stagger;
2745 u32 val;
2746
2747 mutex_lock(&dev_priv->sb_lock);
2748
2749 /* allow hardware to manage TX FIFO reset source */
2750 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2751 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2752 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2753
2754 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2755 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2756 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2757
2758 /* Deassert soft data lane reset*/
2759 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2760 val |= CHV_PCS_REQ_SOFTRESET_EN;
2761 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2762
2763 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2764 val |= CHV_PCS_REQ_SOFTRESET_EN;
2765 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2766
2767 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2768 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2769 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2770
2771 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2772 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2773 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2774
2775 /* Program Tx lane latency optimal setting*/
2776 for (i = 0; i < 4; i++) {
2777 /* Set the upar bit */
2778 data = (i == 1) ? 0x0 : 0x1;
2779 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2780 data << DPIO_UPAR_SHIFT);
2781 }
2782
2783 /* Data lane stagger programming */
2784 if (intel_crtc->config->port_clock > 270000)
2785 stagger = 0x18;
2786 else if (intel_crtc->config->port_clock > 135000)
2787 stagger = 0xd;
2788 else if (intel_crtc->config->port_clock > 67500)
2789 stagger = 0x7;
2790 else if (intel_crtc->config->port_clock > 33750)
2791 stagger = 0x4;
2792 else
2793 stagger = 0x2;
2794
2795 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2796 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2797 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2798
2799 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2800 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2801 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2802
2803 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2804 DPIO_LANESTAGGER_STRAP(stagger) |
2805 DPIO_LANESTAGGER_STRAP_OVRD |
2806 DPIO_TX1_STAGGER_MASK(0x1f) |
2807 DPIO_TX1_STAGGER_MULT(6) |
2808 DPIO_TX2_STAGGER_MULT(0));
2809
2810 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2811 DPIO_LANESTAGGER_STRAP(stagger) |
2812 DPIO_LANESTAGGER_STRAP_OVRD |
2813 DPIO_TX1_STAGGER_MASK(0x1f) |
2814 DPIO_TX1_STAGGER_MULT(7) |
2815 DPIO_TX2_STAGGER_MULT(5));
2816
2817 mutex_unlock(&dev_priv->sb_lock);
2818
2819 intel_enable_dp(encoder);
2820 }
2821
2822 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2823 {
2824 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2825 struct drm_device *dev = encoder->base.dev;
2826 struct drm_i915_private *dev_priv = dev->dev_private;
2827 struct intel_crtc *intel_crtc =
2828 to_intel_crtc(encoder->base.crtc);
2829 enum dpio_channel ch = vlv_dport_to_channel(dport);
2830 enum pipe pipe = intel_crtc->pipe;
2831 u32 val;
2832
2833 intel_dp_prepare(encoder);
2834
2835 mutex_lock(&dev_priv->sb_lock);
2836
2837 /* program left/right clock distribution */
2838 if (pipe != PIPE_B) {
2839 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2840 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2841 if (ch == DPIO_CH0)
2842 val |= CHV_BUFLEFTENA1_FORCE;
2843 if (ch == DPIO_CH1)
2844 val |= CHV_BUFRIGHTENA1_FORCE;
2845 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2846 } else {
2847 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2848 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2849 if (ch == DPIO_CH0)
2850 val |= CHV_BUFLEFTENA2_FORCE;
2851 if (ch == DPIO_CH1)
2852 val |= CHV_BUFRIGHTENA2_FORCE;
2853 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2854 }
2855
2856 /* program clock channel usage */
2857 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2858 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2859 if (pipe != PIPE_B)
2860 val &= ~CHV_PCS_USEDCLKCHANNEL;
2861 else
2862 val |= CHV_PCS_USEDCLKCHANNEL;
2863 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2864
2865 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2866 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2867 if (pipe != PIPE_B)
2868 val &= ~CHV_PCS_USEDCLKCHANNEL;
2869 else
2870 val |= CHV_PCS_USEDCLKCHANNEL;
2871 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2872
2873 /*
2874 * This a a bit weird since generally CL
2875 * matches the pipe, but here we need to
2876 * pick the CL based on the port.
2877 */
2878 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2879 if (pipe != PIPE_B)
2880 val &= ~CHV_CMN_USEDCLKCHANNEL;
2881 else
2882 val |= CHV_CMN_USEDCLKCHANNEL;
2883 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2884
2885 mutex_unlock(&dev_priv->sb_lock);
2886 }
2887
2888 /*
2889 * Native read with retry for link status and receiver capability reads for
2890 * cases where the sink may still be asleep.
2891 *
2892 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2893 * supposed to retry 3 times per the spec.
2894 */
2895 static ssize_t
2896 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2897 void *buffer, size_t size)
2898 {
2899 ssize_t ret;
2900 int i;
2901
2902 /*
2903 * Sometime we just get the same incorrect byte repeated
2904 * over the entire buffer. Doing just one throw away read
2905 * initially seems to "solve" it.
2906 */
2907 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2908
2909 for (i = 0; i < 3; i++) {
2910 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2911 if (ret == size)
2912 return ret;
2913 msleep(1);
2914 }
2915
2916 return ret;
2917 }
2918
2919 /*
2920 * Fetch AUX CH registers 0x202 - 0x207 which contain
2921 * link status information
2922 */
2923 static bool
2924 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2925 {
2926 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2927 DP_LANE0_1_STATUS,
2928 link_status,
2929 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2930 }
2931
2932 /* These are source-specific values. */
2933 static uint8_t
2934 intel_dp_voltage_max(struct intel_dp *intel_dp)
2935 {
2936 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2937 struct drm_i915_private *dev_priv = dev->dev_private;
2938 enum port port = dp_to_dig_port(intel_dp)->port;
2939
2940 if (IS_BROXTON(dev))
2941 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2942 else if (INTEL_INFO(dev)->gen >= 9) {
2943 if (dev_priv->edp_low_vswing && port == PORT_A)
2944 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2945 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2946 } else if (IS_VALLEYVIEW(dev))
2947 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2948 else if (IS_GEN7(dev) && port == PORT_A)
2949 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2950 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2951 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2952 else
2953 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2954 }
2955
2956 static uint8_t
2957 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2958 {
2959 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2960 enum port port = dp_to_dig_port(intel_dp)->port;
2961
2962 if (INTEL_INFO(dev)->gen >= 9) {
2963 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2964 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2965 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2966 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2967 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2968 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2969 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2970 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2971 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2972 default:
2973 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2974 }
2975 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2976 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2977 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2978 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2979 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2980 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2981 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2982 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2983 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2984 default:
2985 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2986 }
2987 } else if (IS_VALLEYVIEW(dev)) {
2988 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2989 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2990 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2991 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2992 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2993 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2994 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2995 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2996 default:
2997 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2998 }
2999 } else if (IS_GEN7(dev) && port == PORT_A) {
3000 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3001 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3002 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3003 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3004 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3005 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3006 default:
3007 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3008 }
3009 } else {
3010 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3011 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3012 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3013 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3014 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3015 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3016 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3017 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3018 default:
3019 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3020 }
3021 }
3022 }
3023
3024 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3025 {
3026 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3027 struct drm_i915_private *dev_priv = dev->dev_private;
3028 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3029 struct intel_crtc *intel_crtc =
3030 to_intel_crtc(dport->base.base.crtc);
3031 unsigned long demph_reg_value, preemph_reg_value,
3032 uniqtranscale_reg_value;
3033 uint8_t train_set = intel_dp->train_set[0];
3034 enum dpio_channel port = vlv_dport_to_channel(dport);
3035 int pipe = intel_crtc->pipe;
3036
3037 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3038 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3039 preemph_reg_value = 0x0004000;
3040 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3041 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3042 demph_reg_value = 0x2B405555;
3043 uniqtranscale_reg_value = 0x552AB83A;
3044 break;
3045 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3046 demph_reg_value = 0x2B404040;
3047 uniqtranscale_reg_value = 0x5548B83A;
3048 break;
3049 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3050 demph_reg_value = 0x2B245555;
3051 uniqtranscale_reg_value = 0x5560B83A;
3052 break;
3053 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3054 demph_reg_value = 0x2B405555;
3055 uniqtranscale_reg_value = 0x5598DA3A;
3056 break;
3057 default:
3058 return 0;
3059 }
3060 break;
3061 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3062 preemph_reg_value = 0x0002000;
3063 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3064 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3065 demph_reg_value = 0x2B404040;
3066 uniqtranscale_reg_value = 0x5552B83A;
3067 break;
3068 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3069 demph_reg_value = 0x2B404848;
3070 uniqtranscale_reg_value = 0x5580B83A;
3071 break;
3072 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3073 demph_reg_value = 0x2B404040;
3074 uniqtranscale_reg_value = 0x55ADDA3A;
3075 break;
3076 default:
3077 return 0;
3078 }
3079 break;
3080 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3081 preemph_reg_value = 0x0000000;
3082 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3083 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3084 demph_reg_value = 0x2B305555;
3085 uniqtranscale_reg_value = 0x5570B83A;
3086 break;
3087 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3088 demph_reg_value = 0x2B2B4040;
3089 uniqtranscale_reg_value = 0x55ADDA3A;
3090 break;
3091 default:
3092 return 0;
3093 }
3094 break;
3095 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3096 preemph_reg_value = 0x0006000;
3097 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3098 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3099 demph_reg_value = 0x1B405555;
3100 uniqtranscale_reg_value = 0x55ADDA3A;
3101 break;
3102 default:
3103 return 0;
3104 }
3105 break;
3106 default:
3107 return 0;
3108 }
3109
3110 mutex_lock(&dev_priv->sb_lock);
3111 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3112 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3113 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3114 uniqtranscale_reg_value);
3115 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3116 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3117 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3118 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3119 mutex_unlock(&dev_priv->sb_lock);
3120
3121 return 0;
3122 }
3123
3124 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3125 {
3126 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3127 struct drm_i915_private *dev_priv = dev->dev_private;
3128 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3129 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3130 u32 deemph_reg_value, margin_reg_value, val;
3131 uint8_t train_set = intel_dp->train_set[0];
3132 enum dpio_channel ch = vlv_dport_to_channel(dport);
3133 enum pipe pipe = intel_crtc->pipe;
3134 int i;
3135
3136 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3137 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3138 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3140 deemph_reg_value = 128;
3141 margin_reg_value = 52;
3142 break;
3143 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3144 deemph_reg_value = 128;
3145 margin_reg_value = 77;
3146 break;
3147 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3148 deemph_reg_value = 128;
3149 margin_reg_value = 102;
3150 break;
3151 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3152 deemph_reg_value = 128;
3153 margin_reg_value = 154;
3154 /* FIXME extra to set for 1200 */
3155 break;
3156 default:
3157 return 0;
3158 }
3159 break;
3160 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3161 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3162 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3163 deemph_reg_value = 85;
3164 margin_reg_value = 78;
3165 break;
3166 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3167 deemph_reg_value = 85;
3168 margin_reg_value = 116;
3169 break;
3170 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3171 deemph_reg_value = 85;
3172 margin_reg_value = 154;
3173 break;
3174 default:
3175 return 0;
3176 }
3177 break;
3178 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3179 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3180 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3181 deemph_reg_value = 64;
3182 margin_reg_value = 104;
3183 break;
3184 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3185 deemph_reg_value = 64;
3186 margin_reg_value = 154;
3187 break;
3188 default:
3189 return 0;
3190 }
3191 break;
3192 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3193 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3194 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3195 deemph_reg_value = 43;
3196 margin_reg_value = 154;
3197 break;
3198 default:
3199 return 0;
3200 }
3201 break;
3202 default:
3203 return 0;
3204 }
3205
3206 mutex_lock(&dev_priv->sb_lock);
3207
3208 /* Clear calc init */
3209 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3210 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3211 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3212 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3213 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3214
3215 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3216 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3217 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3218 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3219 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3220
3221 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3222 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3223 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3224 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3225
3226 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3227 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3228 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3229 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3230
3231 /* Program swing deemph */
3232 for (i = 0; i < 4; i++) {
3233 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3234 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3235 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3236 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3237 }
3238
3239 /* Program swing margin */
3240 for (i = 0; i < 4; i++) {
3241 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3242 val &= ~DPIO_SWING_MARGIN000_MASK;
3243 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3244 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3245 }
3246
3247 /* Disable unique transition scale */
3248 for (i = 0; i < 4; i++) {
3249 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3250 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3251 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3252 }
3253
3254 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3255 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3256 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3257 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3258
3259 /*
3260 * The document said it needs to set bit 27 for ch0 and bit 26
3261 * for ch1. Might be a typo in the doc.
3262 * For now, for this unique transition scale selection, set bit
3263 * 27 for ch0 and ch1.
3264 */
3265 for (i = 0; i < 4; i++) {
3266 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3267 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3268 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3269 }
3270
3271 for (i = 0; i < 4; i++) {
3272 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3273 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3274 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3275 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3276 }
3277 }
3278
3279 /* Start swing calculation */
3280 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3281 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3282 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3283
3284 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3285 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3286 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3287
3288 /* LRC Bypass */
3289 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3290 val |= DPIO_LRC_BYPASS;
3291 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3292
3293 mutex_unlock(&dev_priv->sb_lock);
3294
3295 return 0;
3296 }
3297
3298 static void
3299 intel_get_adjust_train(struct intel_dp *intel_dp,
3300 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3301 {
3302 uint8_t v = 0;
3303 uint8_t p = 0;
3304 int lane;
3305 uint8_t voltage_max;
3306 uint8_t preemph_max;
3307
3308 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3309 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3310 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3311
3312 if (this_v > v)
3313 v = this_v;
3314 if (this_p > p)
3315 p = this_p;
3316 }
3317
3318 voltage_max = intel_dp_voltage_max(intel_dp);
3319 if (v >= voltage_max)
3320 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3321
3322 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3323 if (p >= preemph_max)
3324 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3325
3326 for (lane = 0; lane < 4; lane++)
3327 intel_dp->train_set[lane] = v | p;
3328 }
3329
3330 static uint32_t
3331 gen4_signal_levels(uint8_t train_set)
3332 {
3333 uint32_t signal_levels = 0;
3334
3335 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3336 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3337 default:
3338 signal_levels |= DP_VOLTAGE_0_4;
3339 break;
3340 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3341 signal_levels |= DP_VOLTAGE_0_6;
3342 break;
3343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3344 signal_levels |= DP_VOLTAGE_0_8;
3345 break;
3346 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3347 signal_levels |= DP_VOLTAGE_1_2;
3348 break;
3349 }
3350 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3351 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3352 default:
3353 signal_levels |= DP_PRE_EMPHASIS_0;
3354 break;
3355 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3356 signal_levels |= DP_PRE_EMPHASIS_3_5;
3357 break;
3358 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3359 signal_levels |= DP_PRE_EMPHASIS_6;
3360 break;
3361 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3362 signal_levels |= DP_PRE_EMPHASIS_9_5;
3363 break;
3364 }
3365 return signal_levels;
3366 }
3367
3368 /* Gen6's DP voltage swing and pre-emphasis control */
3369 static uint32_t
3370 gen6_edp_signal_levels(uint8_t train_set)
3371 {
3372 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3373 DP_TRAIN_PRE_EMPHASIS_MASK);
3374 switch (signal_levels) {
3375 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3376 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3377 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3378 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3379 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3380 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3381 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3382 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3383 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3384 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3385 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3386 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3387 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3388 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3389 default:
3390 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3391 "0x%x\n", signal_levels);
3392 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3393 }
3394 }
3395
3396 /* Gen7's DP voltage swing and pre-emphasis control */
3397 static uint32_t
3398 gen7_edp_signal_levels(uint8_t train_set)
3399 {
3400 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3401 DP_TRAIN_PRE_EMPHASIS_MASK);
3402 switch (signal_levels) {
3403 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3404 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3405 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3406 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3407 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3408 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3409
3410 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3411 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3412 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3413 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3414
3415 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3416 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3417 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3418 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3419
3420 default:
3421 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3422 "0x%x\n", signal_levels);
3423 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3424 }
3425 }
3426
3427 /* Properly updates "DP" with the correct signal levels. */
3428 static void
3429 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3430 {
3431 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3432 enum port port = intel_dig_port->port;
3433 struct drm_device *dev = intel_dig_port->base.base.dev;
3434 uint32_t signal_levels, mask = 0;
3435 uint8_t train_set = intel_dp->train_set[0];
3436
3437 if (HAS_DDI(dev)) {
3438 signal_levels = ddi_signal_levels(intel_dp);
3439
3440 if (IS_BROXTON(dev))
3441 signal_levels = 0;
3442 else
3443 mask = DDI_BUF_EMP_MASK;
3444 } else if (IS_CHERRYVIEW(dev)) {
3445 signal_levels = chv_signal_levels(intel_dp);
3446 } else if (IS_VALLEYVIEW(dev)) {
3447 signal_levels = vlv_signal_levels(intel_dp);
3448 } else if (IS_GEN7(dev) && port == PORT_A) {
3449 signal_levels = gen7_edp_signal_levels(train_set);
3450 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3451 } else if (IS_GEN6(dev) && port == PORT_A) {
3452 signal_levels = gen6_edp_signal_levels(train_set);
3453 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3454 } else {
3455 signal_levels = gen4_signal_levels(train_set);
3456 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3457 }
3458
3459 if (mask)
3460 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3461
3462 DRM_DEBUG_KMS("Using vswing level %d\n",
3463 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3464 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3465 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3466 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3467
3468 *DP = (*DP & ~mask) | signal_levels;
3469 }
3470
3471 static bool
3472 intel_dp_set_link_train(struct intel_dp *intel_dp,
3473 uint32_t *DP,
3474 uint8_t dp_train_pat)
3475 {
3476 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3477 struct drm_device *dev = intel_dig_port->base.base.dev;
3478 struct drm_i915_private *dev_priv = dev->dev_private;
3479 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3480 int ret, len;
3481
3482 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3483
3484 I915_WRITE(intel_dp->output_reg, *DP);
3485 POSTING_READ(intel_dp->output_reg);
3486
3487 buf[0] = dp_train_pat;
3488 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3489 DP_TRAINING_PATTERN_DISABLE) {
3490 /* don't write DP_TRAINING_LANEx_SET on disable */
3491 len = 1;
3492 } else {
3493 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3494 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3495 len = intel_dp->lane_count + 1;
3496 }
3497
3498 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3499 buf, len);
3500
3501 return ret == len;
3502 }
3503
3504 static bool
3505 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3506 uint8_t dp_train_pat)
3507 {
3508 if (!intel_dp->train_set_valid)
3509 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3510 intel_dp_set_signal_levels(intel_dp, DP);
3511 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3512 }
3513
3514 static bool
3515 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3516 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3517 {
3518 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3519 struct drm_device *dev = intel_dig_port->base.base.dev;
3520 struct drm_i915_private *dev_priv = dev->dev_private;
3521 int ret;
3522
3523 intel_get_adjust_train(intel_dp, link_status);
3524 intel_dp_set_signal_levels(intel_dp, DP);
3525
3526 I915_WRITE(intel_dp->output_reg, *DP);
3527 POSTING_READ(intel_dp->output_reg);
3528
3529 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3530 intel_dp->train_set, intel_dp->lane_count);
3531
3532 return ret == intel_dp->lane_count;
3533 }
3534
3535 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3536 {
3537 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3538 struct drm_device *dev = intel_dig_port->base.base.dev;
3539 struct drm_i915_private *dev_priv = dev->dev_private;
3540 enum port port = intel_dig_port->port;
3541 uint32_t val;
3542
3543 if (!HAS_DDI(dev))
3544 return;
3545
3546 val = I915_READ(DP_TP_CTL(port));
3547 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3548 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3549 I915_WRITE(DP_TP_CTL(port), val);
3550
3551 /*
3552 * On PORT_A we can have only eDP in SST mode. There the only reason
3553 * we need to set idle transmission mode is to work around a HW issue
3554 * where we enable the pipe while not in idle link-training mode.
3555 * In this case there is requirement to wait for a minimum number of
3556 * idle patterns to be sent.
3557 */
3558 if (port == PORT_A)
3559 return;
3560
3561 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3562 1))
3563 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3564 }
3565
3566 /* Enable corresponding port and start training pattern 1 */
3567 void
3568 intel_dp_start_link_train(struct intel_dp *intel_dp)
3569 {
3570 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3571 struct drm_device *dev = encoder->dev;
3572 int i;
3573 uint8_t voltage;
3574 int voltage_tries, loop_tries;
3575 uint32_t DP = intel_dp->DP;
3576 uint8_t link_config[2];
3577
3578 if (HAS_DDI(dev))
3579 intel_ddi_prepare_link_retrain(encoder);
3580
3581 /* Write the link configuration data */
3582 link_config[0] = intel_dp->link_bw;
3583 link_config[1] = intel_dp->lane_count;
3584 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3585 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3586 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3587 if (intel_dp->num_sink_rates)
3588 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3589 &intel_dp->rate_select, 1);
3590
3591 link_config[0] = 0;
3592 link_config[1] = DP_SET_ANSI_8B10B;
3593 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3594
3595 DP |= DP_PORT_EN;
3596
3597 /* clock recovery */
3598 if (!intel_dp_reset_link_train(intel_dp, &DP,
3599 DP_TRAINING_PATTERN_1 |
3600 DP_LINK_SCRAMBLING_DISABLE)) {
3601 DRM_ERROR("failed to enable link training\n");
3602 return;
3603 }
3604
3605 voltage = 0xff;
3606 voltage_tries = 0;
3607 loop_tries = 0;
3608 for (;;) {
3609 uint8_t link_status[DP_LINK_STATUS_SIZE];
3610
3611 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3612 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3613 DRM_ERROR("failed to get link status\n");
3614 break;
3615 }
3616
3617 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3618 DRM_DEBUG_KMS("clock recovery OK\n");
3619 break;
3620 }
3621
3622 /*
3623 * if we used previously trained voltage and pre-emphasis values
3624 * and we don't get clock recovery, reset link training values
3625 */
3626 if (intel_dp->train_set_valid) {
3627 DRM_DEBUG_KMS("clock recovery not ok, reset");
3628 /* clear the flag as we are not reusing train set */
3629 intel_dp->train_set_valid = false;
3630 if (!intel_dp_reset_link_train(intel_dp, &DP,
3631 DP_TRAINING_PATTERN_1 |
3632 DP_LINK_SCRAMBLING_DISABLE)) {
3633 DRM_ERROR("failed to enable link training\n");
3634 return;
3635 }
3636 continue;
3637 }
3638
3639 /* Check to see if we've tried the max voltage */
3640 for (i = 0; i < intel_dp->lane_count; i++)
3641 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3642 break;
3643 if (i == intel_dp->lane_count) {
3644 ++loop_tries;
3645 if (loop_tries == 5) {
3646 DRM_ERROR("too many full retries, give up\n");
3647 break;
3648 }
3649 intel_dp_reset_link_train(intel_dp, &DP,
3650 DP_TRAINING_PATTERN_1 |
3651 DP_LINK_SCRAMBLING_DISABLE);
3652 voltage_tries = 0;
3653 continue;
3654 }
3655
3656 /* Check to see if we've tried the same voltage 5 times */
3657 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3658 ++voltage_tries;
3659 if (voltage_tries == 5) {
3660 DRM_ERROR("too many voltage retries, give up\n");
3661 break;
3662 }
3663 } else
3664 voltage_tries = 0;
3665 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3666
3667 /* Update training set as requested by target */
3668 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3669 DRM_ERROR("failed to update link training\n");
3670 break;
3671 }
3672 }
3673
3674 intel_dp->DP = DP;
3675 }
3676
3677 void
3678 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3679 {
3680 bool channel_eq = false;
3681 int tries, cr_tries;
3682 uint32_t DP = intel_dp->DP;
3683 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3684
3685 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3686 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3687 training_pattern = DP_TRAINING_PATTERN_3;
3688
3689 /* channel equalization */
3690 if (!intel_dp_set_link_train(intel_dp, &DP,
3691 training_pattern |
3692 DP_LINK_SCRAMBLING_DISABLE)) {
3693 DRM_ERROR("failed to start channel equalization\n");
3694 return;
3695 }
3696
3697 tries = 0;
3698 cr_tries = 0;
3699 channel_eq = false;
3700 for (;;) {
3701 uint8_t link_status[DP_LINK_STATUS_SIZE];
3702
3703 if (cr_tries > 5) {
3704 DRM_ERROR("failed to train DP, aborting\n");
3705 break;
3706 }
3707
3708 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3709 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3710 DRM_ERROR("failed to get link status\n");
3711 break;
3712 }
3713
3714 /* Make sure clock is still ok */
3715 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3716 intel_dp->train_set_valid = false;
3717 intel_dp_start_link_train(intel_dp);
3718 intel_dp_set_link_train(intel_dp, &DP,
3719 training_pattern |
3720 DP_LINK_SCRAMBLING_DISABLE);
3721 cr_tries++;
3722 continue;
3723 }
3724
3725 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3726 channel_eq = true;
3727 break;
3728 }
3729
3730 /* Try 5 times, then try clock recovery if that fails */
3731 if (tries > 5) {
3732 intel_dp->train_set_valid = false;
3733 intel_dp_start_link_train(intel_dp);
3734 intel_dp_set_link_train(intel_dp, &DP,
3735 training_pattern |
3736 DP_LINK_SCRAMBLING_DISABLE);
3737 tries = 0;
3738 cr_tries++;
3739 continue;
3740 }
3741
3742 /* Update training set as requested by target */
3743 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3744 DRM_ERROR("failed to update link training\n");
3745 break;
3746 }
3747 ++tries;
3748 }
3749
3750 intel_dp_set_idle_link_train(intel_dp);
3751
3752 intel_dp->DP = DP;
3753
3754 if (channel_eq) {
3755 intel_dp->train_set_valid = true;
3756 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3757 }
3758 }
3759
3760 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3761 {
3762 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3763 DP_TRAINING_PATTERN_DISABLE);
3764 }
3765
3766 static void
3767 intel_dp_link_down(struct intel_dp *intel_dp)
3768 {
3769 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3770 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3771 enum port port = intel_dig_port->port;
3772 struct drm_device *dev = intel_dig_port->base.base.dev;
3773 struct drm_i915_private *dev_priv = dev->dev_private;
3774 uint32_t DP = intel_dp->DP;
3775
3776 if (WARN_ON(HAS_DDI(dev)))
3777 return;
3778
3779 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3780 return;
3781
3782 DRM_DEBUG_KMS("\n");
3783
3784 if ((IS_GEN7(dev) && port == PORT_A) ||
3785 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3786 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3787 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3788 } else {
3789 if (IS_CHERRYVIEW(dev))
3790 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3791 else
3792 DP &= ~DP_LINK_TRAIN_MASK;
3793 DP |= DP_LINK_TRAIN_PAT_IDLE;
3794 }
3795 I915_WRITE(intel_dp->output_reg, DP);
3796 POSTING_READ(intel_dp->output_reg);
3797
3798 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3799 I915_WRITE(intel_dp->output_reg, DP);
3800 POSTING_READ(intel_dp->output_reg);
3801
3802 /*
3803 * HW workaround for IBX, we need to move the port
3804 * to transcoder A after disabling it to allow the
3805 * matching HDMI port to be enabled on transcoder A.
3806 */
3807 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3808 /* always enable with pattern 1 (as per spec) */
3809 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3810 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3811 I915_WRITE(intel_dp->output_reg, DP);
3812 POSTING_READ(intel_dp->output_reg);
3813
3814 DP &= ~DP_PORT_EN;
3815 I915_WRITE(intel_dp->output_reg, DP);
3816 POSTING_READ(intel_dp->output_reg);
3817 }
3818
3819 msleep(intel_dp->panel_power_down_delay);
3820 }
3821
3822 static bool
3823 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3824 {
3825 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3826 struct drm_device *dev = dig_port->base.base.dev;
3827 struct drm_i915_private *dev_priv = dev->dev_private;
3828 uint8_t rev;
3829
3830 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3831 sizeof(intel_dp->dpcd)) < 0)
3832 return false; /* aux transfer failed */
3833
3834 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3835
3836 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3837 return false; /* DPCD not present */
3838
3839 /* Check if the panel supports PSR */
3840 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3841 if (is_edp(intel_dp)) {
3842 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3843 intel_dp->psr_dpcd,
3844 sizeof(intel_dp->psr_dpcd));
3845 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3846 dev_priv->psr.sink_support = true;
3847 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3848 }
3849
3850 if (INTEL_INFO(dev)->gen >= 9 &&
3851 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3852 uint8_t frame_sync_cap;
3853
3854 dev_priv->psr.sink_support = true;
3855 intel_dp_dpcd_read_wake(&intel_dp->aux,
3856 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3857 &frame_sync_cap, 1);
3858 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3859 /* PSR2 needs frame sync as well */
3860 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3861 DRM_DEBUG_KMS("PSR2 %s on sink",
3862 dev_priv->psr.psr2_support ? "supported" : "not supported");
3863 }
3864 }
3865
3866 /* Training Pattern 3 support, both source and sink */
3867 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3868 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3869 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3870 intel_dp->use_tps3 = true;
3871 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3872 } else
3873 intel_dp->use_tps3 = false;
3874
3875 /* Intermediate frequency support */
3876 if (is_edp(intel_dp) &&
3877 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3878 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3879 (rev >= 0x03)) { /* eDp v1.4 or higher */
3880 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3881 int i;
3882
3883 intel_dp_dpcd_read_wake(&intel_dp->aux,
3884 DP_SUPPORTED_LINK_RATES,
3885 sink_rates,
3886 sizeof(sink_rates));
3887
3888 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3889 int val = le16_to_cpu(sink_rates[i]);
3890
3891 if (val == 0)
3892 break;
3893
3894 /* Value read is in kHz while drm clock is saved in deca-kHz */
3895 intel_dp->sink_rates[i] = (val * 200) / 10;
3896 }
3897 intel_dp->num_sink_rates = i;
3898 }
3899
3900 intel_dp_print_rates(intel_dp);
3901
3902 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3903 DP_DWN_STRM_PORT_PRESENT))
3904 return true; /* native DP sink */
3905
3906 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3907 return true; /* no per-port downstream info */
3908
3909 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3910 intel_dp->downstream_ports,
3911 DP_MAX_DOWNSTREAM_PORTS) < 0)
3912 return false; /* downstream port status fetch failed */
3913
3914 return true;
3915 }
3916
3917 static void
3918 intel_dp_probe_oui(struct intel_dp *intel_dp)
3919 {
3920 u8 buf[3];
3921
3922 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3923 return;
3924
3925 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3926 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3927 buf[0], buf[1], buf[2]);
3928
3929 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3930 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3931 buf[0], buf[1], buf[2]);
3932 }
3933
3934 static bool
3935 intel_dp_probe_mst(struct intel_dp *intel_dp)
3936 {
3937 u8 buf[1];
3938
3939 if (!intel_dp->can_mst)
3940 return false;
3941
3942 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3943 return false;
3944
3945 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3946 if (buf[0] & DP_MST_CAP) {
3947 DRM_DEBUG_KMS("Sink is MST capable\n");
3948 intel_dp->is_mst = true;
3949 } else {
3950 DRM_DEBUG_KMS("Sink is not MST capable\n");
3951 intel_dp->is_mst = false;
3952 }
3953 }
3954
3955 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3956 return intel_dp->is_mst;
3957 }
3958
3959 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3960 {
3961 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3962 struct drm_device *dev = intel_dig_port->base.base.dev;
3963 struct intel_crtc *intel_crtc =
3964 to_intel_crtc(intel_dig_port->base.base.crtc);
3965 u8 buf;
3966 int test_crc_count;
3967 int attempts = 6;
3968 int ret = 0;
3969
3970 hsw_disable_ips(intel_crtc);
3971
3972 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
3973 ret = -EIO;
3974 goto out;
3975 }
3976
3977 if (!(buf & DP_TEST_CRC_SUPPORTED)) {
3978 ret = -ENOTTY;
3979 goto out;
3980 }
3981
3982 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3983 ret = -EIO;
3984 goto out;
3985 }
3986
3987 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3988 buf | DP_TEST_SINK_START) < 0) {
3989 ret = -EIO;
3990 goto out;
3991 }
3992
3993 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
3994 ret = -EIO;
3995 goto out;
3996 }
3997
3998 test_crc_count = buf & DP_TEST_COUNT_MASK;
3999
4000 do {
4001 if (drm_dp_dpcd_readb(&intel_dp->aux,
4002 DP_TEST_SINK_MISC, &buf) < 0) {
4003 ret = -EIO;
4004 goto out;
4005 }
4006 intel_wait_for_vblank(dev, intel_crtc->pipe);
4007 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4008
4009 if (attempts == 0) {
4010 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4011 ret = -ETIMEDOUT;
4012 goto out;
4013 }
4014
4015 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4016 ret = -EIO;
4017 goto out;
4018 }
4019
4020 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4021 ret = -EIO;
4022 goto out;
4023 }
4024 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4025 buf & ~DP_TEST_SINK_START) < 0) {
4026 ret = -EIO;
4027 goto out;
4028 }
4029 out:
4030 hsw_enable_ips(intel_crtc);
4031 return ret;
4032 }
4033
4034 static bool
4035 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4036 {
4037 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4038 DP_DEVICE_SERVICE_IRQ_VECTOR,
4039 sink_irq_vector, 1) == 1;
4040 }
4041
4042 static bool
4043 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4044 {
4045 int ret;
4046
4047 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4048 DP_SINK_COUNT_ESI,
4049 sink_irq_vector, 14);
4050 if (ret != 14)
4051 return false;
4052
4053 return true;
4054 }
4055
4056 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4057 {
4058 uint8_t test_result = DP_TEST_ACK;
4059 return test_result;
4060 }
4061
4062 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4063 {
4064 uint8_t test_result = DP_TEST_NAK;
4065 return test_result;
4066 }
4067
4068 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4069 {
4070 uint8_t test_result = DP_TEST_NAK;
4071 struct intel_connector *intel_connector = intel_dp->attached_connector;
4072 struct drm_connector *connector = &intel_connector->base;
4073
4074 if (intel_connector->detect_edid == NULL ||
4075 connector->edid_corrupt ||
4076 intel_dp->aux.i2c_defer_count > 6) {
4077 /* Check EDID read for NACKs, DEFERs and corruption
4078 * (DP CTS 1.2 Core r1.1)
4079 * 4.2.2.4 : Failed EDID read, I2C_NAK
4080 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4081 * 4.2.2.6 : EDID corruption detected
4082 * Use failsafe mode for all cases
4083 */
4084 if (intel_dp->aux.i2c_nack_count > 0 ||
4085 intel_dp->aux.i2c_defer_count > 0)
4086 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4087 intel_dp->aux.i2c_nack_count,
4088 intel_dp->aux.i2c_defer_count);
4089 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4090 } else {
4091 if (!drm_dp_dpcd_write(&intel_dp->aux,
4092 DP_TEST_EDID_CHECKSUM,
4093 &intel_connector->detect_edid->checksum,
4094 1))
4095 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4096
4097 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4098 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4099 }
4100
4101 /* Set test active flag here so userspace doesn't interrupt things */
4102 intel_dp->compliance_test_active = 1;
4103
4104 return test_result;
4105 }
4106
4107 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4108 {
4109 uint8_t test_result = DP_TEST_NAK;
4110 return test_result;
4111 }
4112
4113 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4114 {
4115 uint8_t response = DP_TEST_NAK;
4116 uint8_t rxdata = 0;
4117 int status = 0;
4118
4119 intel_dp->compliance_test_active = 0;
4120 intel_dp->compliance_test_type = 0;
4121 intel_dp->compliance_test_data = 0;
4122
4123 intel_dp->aux.i2c_nack_count = 0;
4124 intel_dp->aux.i2c_defer_count = 0;
4125
4126 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4127 if (status <= 0) {
4128 DRM_DEBUG_KMS("Could not read test request from sink\n");
4129 goto update_status;
4130 }
4131
4132 switch (rxdata) {
4133 case DP_TEST_LINK_TRAINING:
4134 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4135 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4136 response = intel_dp_autotest_link_training(intel_dp);
4137 break;
4138 case DP_TEST_LINK_VIDEO_PATTERN:
4139 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4140 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4141 response = intel_dp_autotest_video_pattern(intel_dp);
4142 break;
4143 case DP_TEST_LINK_EDID_READ:
4144 DRM_DEBUG_KMS("EDID test requested\n");
4145 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4146 response = intel_dp_autotest_edid(intel_dp);
4147 break;
4148 case DP_TEST_LINK_PHY_TEST_PATTERN:
4149 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4150 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4151 response = intel_dp_autotest_phy_pattern(intel_dp);
4152 break;
4153 default:
4154 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4155 break;
4156 }
4157
4158 update_status:
4159 status = drm_dp_dpcd_write(&intel_dp->aux,
4160 DP_TEST_RESPONSE,
4161 &response, 1);
4162 if (status <= 0)
4163 DRM_DEBUG_KMS("Could not write test response to sink\n");
4164 }
4165
4166 static int
4167 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4168 {
4169 bool bret;
4170
4171 if (intel_dp->is_mst) {
4172 u8 esi[16] = { 0 };
4173 int ret = 0;
4174 int retry;
4175 bool handled;
4176 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4177 go_again:
4178 if (bret == true) {
4179
4180 /* check link status - esi[10] = 0x200c */
4181 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4182 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4183 intel_dp_start_link_train(intel_dp);
4184 intel_dp_complete_link_train(intel_dp);
4185 intel_dp_stop_link_train(intel_dp);
4186 }
4187
4188 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4189 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4190
4191 if (handled) {
4192 for (retry = 0; retry < 3; retry++) {
4193 int wret;
4194 wret = drm_dp_dpcd_write(&intel_dp->aux,
4195 DP_SINK_COUNT_ESI+1,
4196 &esi[1], 3);
4197 if (wret == 3) {
4198 break;
4199 }
4200 }
4201
4202 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4203 if (bret == true) {
4204 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4205 goto go_again;
4206 }
4207 } else
4208 ret = 0;
4209
4210 return ret;
4211 } else {
4212 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4213 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4214 intel_dp->is_mst = false;
4215 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4216 /* send a hotplug event */
4217 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4218 }
4219 }
4220 return -EINVAL;
4221 }
4222
4223 /*
4224 * According to DP spec
4225 * 5.1.2:
4226 * 1. Read DPCD
4227 * 2. Configure link according to Receiver Capabilities
4228 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4229 * 4. Check link status on receipt of hot-plug interrupt
4230 */
4231 static void
4232 intel_dp_check_link_status(struct intel_dp *intel_dp)
4233 {
4234 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4235 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4236 u8 sink_irq_vector;
4237 u8 link_status[DP_LINK_STATUS_SIZE];
4238
4239 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4240
4241 if (!intel_encoder->connectors_active)
4242 return;
4243
4244 if (WARN_ON(!intel_encoder->base.crtc))
4245 return;
4246
4247 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4248 return;
4249
4250 /* Try to read receiver status if the link appears to be up */
4251 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4252 return;
4253 }
4254
4255 /* Now read the DPCD to see if it's actually running */
4256 if (!intel_dp_get_dpcd(intel_dp)) {
4257 return;
4258 }
4259
4260 /* Try to read the source of the interrupt */
4261 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4262 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4263 /* Clear interrupt source */
4264 drm_dp_dpcd_writeb(&intel_dp->aux,
4265 DP_DEVICE_SERVICE_IRQ_VECTOR,
4266 sink_irq_vector);
4267
4268 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4269 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4270 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4271 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4272 }
4273
4274 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4275 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4276 intel_encoder->base.name);
4277 intel_dp_start_link_train(intel_dp);
4278 intel_dp_complete_link_train(intel_dp);
4279 intel_dp_stop_link_train(intel_dp);
4280 }
4281 }
4282
4283 /* XXX this is probably wrong for multiple downstream ports */
4284 static enum drm_connector_status
4285 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4286 {
4287 uint8_t *dpcd = intel_dp->dpcd;
4288 uint8_t type;
4289
4290 if (!intel_dp_get_dpcd(intel_dp))
4291 return connector_status_disconnected;
4292
4293 /* if there's no downstream port, we're done */
4294 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4295 return connector_status_connected;
4296
4297 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4298 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4299 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4300 uint8_t reg;
4301
4302 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4303 &reg, 1) < 0)
4304 return connector_status_unknown;
4305
4306 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4307 : connector_status_disconnected;
4308 }
4309
4310 /* If no HPD, poke DDC gently */
4311 if (drm_probe_ddc(&intel_dp->aux.ddc))
4312 return connector_status_connected;
4313
4314 /* Well we tried, say unknown for unreliable port types */
4315 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4316 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4317 if (type == DP_DS_PORT_TYPE_VGA ||
4318 type == DP_DS_PORT_TYPE_NON_EDID)
4319 return connector_status_unknown;
4320 } else {
4321 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4322 DP_DWN_STRM_PORT_TYPE_MASK;
4323 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4324 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4325 return connector_status_unknown;
4326 }
4327
4328 /* Anything else is out of spec, warn and ignore */
4329 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4330 return connector_status_disconnected;
4331 }
4332
4333 static enum drm_connector_status
4334 edp_detect(struct intel_dp *intel_dp)
4335 {
4336 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4337 enum drm_connector_status status;
4338
4339 status = intel_panel_detect(dev);
4340 if (status == connector_status_unknown)
4341 status = connector_status_connected;
4342
4343 return status;
4344 }
4345
4346 static enum drm_connector_status
4347 ironlake_dp_detect(struct intel_dp *intel_dp)
4348 {
4349 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4350 struct drm_i915_private *dev_priv = dev->dev_private;
4351 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4352
4353 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4354 return connector_status_disconnected;
4355
4356 return intel_dp_detect_dpcd(intel_dp);
4357 }
4358
4359 static int g4x_digital_port_connected(struct drm_device *dev,
4360 struct intel_digital_port *intel_dig_port)
4361 {
4362 struct drm_i915_private *dev_priv = dev->dev_private;
4363 uint32_t bit;
4364
4365 if (IS_VALLEYVIEW(dev)) {
4366 switch (intel_dig_port->port) {
4367 case PORT_B:
4368 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4369 break;
4370 case PORT_C:
4371 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4372 break;
4373 case PORT_D:
4374 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4375 break;
4376 default:
4377 return -EINVAL;
4378 }
4379 } else {
4380 switch (intel_dig_port->port) {
4381 case PORT_B:
4382 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4383 break;
4384 case PORT_C:
4385 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4386 break;
4387 case PORT_D:
4388 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4389 break;
4390 default:
4391 return -EINVAL;
4392 }
4393 }
4394
4395 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4396 return 0;
4397 return 1;
4398 }
4399
4400 static enum drm_connector_status
4401 g4x_dp_detect(struct intel_dp *intel_dp)
4402 {
4403 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4404 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4405 int ret;
4406
4407 /* Can't disconnect eDP, but you can close the lid... */
4408 if (is_edp(intel_dp)) {
4409 enum drm_connector_status status;
4410
4411 status = intel_panel_detect(dev);
4412 if (status == connector_status_unknown)
4413 status = connector_status_connected;
4414 return status;
4415 }
4416
4417 ret = g4x_digital_port_connected(dev, intel_dig_port);
4418 if (ret == -EINVAL)
4419 return connector_status_unknown;
4420 else if (ret == 0)
4421 return connector_status_disconnected;
4422
4423 return intel_dp_detect_dpcd(intel_dp);
4424 }
4425
4426 static struct edid *
4427 intel_dp_get_edid(struct intel_dp *intel_dp)
4428 {
4429 struct intel_connector *intel_connector = intel_dp->attached_connector;
4430
4431 /* use cached edid if we have one */
4432 if (intel_connector->edid) {
4433 /* invalid edid */
4434 if (IS_ERR(intel_connector->edid))
4435 return NULL;
4436
4437 return drm_edid_duplicate(intel_connector->edid);
4438 } else
4439 return drm_get_edid(&intel_connector->base,
4440 &intel_dp->aux.ddc);
4441 }
4442
4443 static void
4444 intel_dp_set_edid(struct intel_dp *intel_dp)
4445 {
4446 struct intel_connector *intel_connector = intel_dp->attached_connector;
4447 struct edid *edid;
4448
4449 edid = intel_dp_get_edid(intel_dp);
4450 intel_connector->detect_edid = edid;
4451
4452 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4453 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4454 else
4455 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4456 }
4457
4458 static void
4459 intel_dp_unset_edid(struct intel_dp *intel_dp)
4460 {
4461 struct intel_connector *intel_connector = intel_dp->attached_connector;
4462
4463 kfree(intel_connector->detect_edid);
4464 intel_connector->detect_edid = NULL;
4465
4466 intel_dp->has_audio = false;
4467 }
4468
4469 static enum intel_display_power_domain
4470 intel_dp_power_get(struct intel_dp *dp)
4471 {
4472 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4473 enum intel_display_power_domain power_domain;
4474
4475 power_domain = intel_display_port_power_domain(encoder);
4476 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4477
4478 return power_domain;
4479 }
4480
4481 static void
4482 intel_dp_power_put(struct intel_dp *dp,
4483 enum intel_display_power_domain power_domain)
4484 {
4485 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4486 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4487 }
4488
4489 static enum drm_connector_status
4490 intel_dp_detect(struct drm_connector *connector, bool force)
4491 {
4492 struct intel_dp *intel_dp = intel_attached_dp(connector);
4493 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4494 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4495 struct drm_device *dev = connector->dev;
4496 enum drm_connector_status status;
4497 enum intel_display_power_domain power_domain;
4498 bool ret;
4499 u8 sink_irq_vector;
4500
4501 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4502 connector->base.id, connector->name);
4503 intel_dp_unset_edid(intel_dp);
4504
4505 if (intel_dp->is_mst) {
4506 /* MST devices are disconnected from a monitor POV */
4507 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4508 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4509 return connector_status_disconnected;
4510 }
4511
4512 power_domain = intel_dp_power_get(intel_dp);
4513
4514 /* Can't disconnect eDP, but you can close the lid... */
4515 if (is_edp(intel_dp))
4516 status = edp_detect(intel_dp);
4517 else if (HAS_PCH_SPLIT(dev))
4518 status = ironlake_dp_detect(intel_dp);
4519 else
4520 status = g4x_dp_detect(intel_dp);
4521 if (status != connector_status_connected)
4522 goto out;
4523
4524 intel_dp_probe_oui(intel_dp);
4525
4526 ret = intel_dp_probe_mst(intel_dp);
4527 if (ret) {
4528 /* if we are in MST mode then this connector
4529 won't appear connected or have anything with EDID on it */
4530 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4531 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4532 status = connector_status_disconnected;
4533 goto out;
4534 }
4535
4536 intel_dp_set_edid(intel_dp);
4537
4538 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4539 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4540 status = connector_status_connected;
4541
4542 /* Try to read the source of the interrupt */
4543 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4544 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4545 /* Clear interrupt source */
4546 drm_dp_dpcd_writeb(&intel_dp->aux,
4547 DP_DEVICE_SERVICE_IRQ_VECTOR,
4548 sink_irq_vector);
4549
4550 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4551 intel_dp_handle_test_request(intel_dp);
4552 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4553 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4554 }
4555
4556 out:
4557 intel_dp_power_put(intel_dp, power_domain);
4558 return status;
4559 }
4560
4561 static void
4562 intel_dp_force(struct drm_connector *connector)
4563 {
4564 struct intel_dp *intel_dp = intel_attached_dp(connector);
4565 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4566 enum intel_display_power_domain power_domain;
4567
4568 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4569 connector->base.id, connector->name);
4570 intel_dp_unset_edid(intel_dp);
4571
4572 if (connector->status != connector_status_connected)
4573 return;
4574
4575 power_domain = intel_dp_power_get(intel_dp);
4576
4577 intel_dp_set_edid(intel_dp);
4578
4579 intel_dp_power_put(intel_dp, power_domain);
4580
4581 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4582 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4583 }
4584
4585 static int intel_dp_get_modes(struct drm_connector *connector)
4586 {
4587 struct intel_connector *intel_connector = to_intel_connector(connector);
4588 struct edid *edid;
4589
4590 edid = intel_connector->detect_edid;
4591 if (edid) {
4592 int ret = intel_connector_update_modes(connector, edid);
4593 if (ret)
4594 return ret;
4595 }
4596
4597 /* if eDP has no EDID, fall back to fixed mode */
4598 if (is_edp(intel_attached_dp(connector)) &&
4599 intel_connector->panel.fixed_mode) {
4600 struct drm_display_mode *mode;
4601
4602 mode = drm_mode_duplicate(connector->dev,
4603 intel_connector->panel.fixed_mode);
4604 if (mode) {
4605 drm_mode_probed_add(connector, mode);
4606 return 1;
4607 }
4608 }
4609
4610 return 0;
4611 }
4612
4613 static bool
4614 intel_dp_detect_audio(struct drm_connector *connector)
4615 {
4616 bool has_audio = false;
4617 struct edid *edid;
4618
4619 edid = to_intel_connector(connector)->detect_edid;
4620 if (edid)
4621 has_audio = drm_detect_monitor_audio(edid);
4622
4623 return has_audio;
4624 }
4625
4626 static int
4627 intel_dp_set_property(struct drm_connector *connector,
4628 struct drm_property *property,
4629 uint64_t val)
4630 {
4631 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4632 struct intel_connector *intel_connector = to_intel_connector(connector);
4633 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4634 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4635 int ret;
4636
4637 ret = drm_object_property_set_value(&connector->base, property, val);
4638 if (ret)
4639 return ret;
4640
4641 if (property == dev_priv->force_audio_property) {
4642 int i = val;
4643 bool has_audio;
4644
4645 if (i == intel_dp->force_audio)
4646 return 0;
4647
4648 intel_dp->force_audio = i;
4649
4650 if (i == HDMI_AUDIO_AUTO)
4651 has_audio = intel_dp_detect_audio(connector);
4652 else
4653 has_audio = (i == HDMI_AUDIO_ON);
4654
4655 if (has_audio == intel_dp->has_audio)
4656 return 0;
4657
4658 intel_dp->has_audio = has_audio;
4659 goto done;
4660 }
4661
4662 if (property == dev_priv->broadcast_rgb_property) {
4663 bool old_auto = intel_dp->color_range_auto;
4664 uint32_t old_range = intel_dp->color_range;
4665
4666 switch (val) {
4667 case INTEL_BROADCAST_RGB_AUTO:
4668 intel_dp->color_range_auto = true;
4669 break;
4670 case INTEL_BROADCAST_RGB_FULL:
4671 intel_dp->color_range_auto = false;
4672 intel_dp->color_range = 0;
4673 break;
4674 case INTEL_BROADCAST_RGB_LIMITED:
4675 intel_dp->color_range_auto = false;
4676 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4677 break;
4678 default:
4679 return -EINVAL;
4680 }
4681
4682 if (old_auto == intel_dp->color_range_auto &&
4683 old_range == intel_dp->color_range)
4684 return 0;
4685
4686 goto done;
4687 }
4688
4689 if (is_edp(intel_dp) &&
4690 property == connector->dev->mode_config.scaling_mode_property) {
4691 if (val == DRM_MODE_SCALE_NONE) {
4692 DRM_DEBUG_KMS("no scaling not supported\n");
4693 return -EINVAL;
4694 }
4695
4696 if (intel_connector->panel.fitting_mode == val) {
4697 /* the eDP scaling property is not changed */
4698 return 0;
4699 }
4700 intel_connector->panel.fitting_mode = val;
4701
4702 goto done;
4703 }
4704
4705 return -EINVAL;
4706
4707 done:
4708 if (intel_encoder->base.crtc)
4709 intel_crtc_restore_mode(intel_encoder->base.crtc);
4710
4711 return 0;
4712 }
4713
4714 static void
4715 intel_dp_connector_destroy(struct drm_connector *connector)
4716 {
4717 struct intel_connector *intel_connector = to_intel_connector(connector);
4718
4719 kfree(intel_connector->detect_edid);
4720
4721 if (!IS_ERR_OR_NULL(intel_connector->edid))
4722 kfree(intel_connector->edid);
4723
4724 /* Can't call is_edp() since the encoder may have been destroyed
4725 * already. */
4726 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4727 intel_panel_fini(&intel_connector->panel);
4728
4729 drm_connector_cleanup(connector);
4730 kfree(connector);
4731 }
4732
4733 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4734 {
4735 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4736 struct intel_dp *intel_dp = &intel_dig_port->dp;
4737
4738 drm_dp_aux_unregister(&intel_dp->aux);
4739 intel_dp_mst_encoder_cleanup(intel_dig_port);
4740 if (is_edp(intel_dp)) {
4741 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4742 /*
4743 * vdd might still be enabled do to the delayed vdd off.
4744 * Make sure vdd is actually turned off here.
4745 */
4746 pps_lock(intel_dp);
4747 edp_panel_vdd_off_sync(intel_dp);
4748 pps_unlock(intel_dp);
4749
4750 if (intel_dp->edp_notifier.notifier_call) {
4751 unregister_reboot_notifier(&intel_dp->edp_notifier);
4752 intel_dp->edp_notifier.notifier_call = NULL;
4753 }
4754 }
4755 drm_encoder_cleanup(encoder);
4756 kfree(intel_dig_port);
4757 }
4758
4759 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4760 {
4761 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4762
4763 if (!is_edp(intel_dp))
4764 return;
4765
4766 /*
4767 * vdd might still be enabled do to the delayed vdd off.
4768 * Make sure vdd is actually turned off here.
4769 */
4770 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4771 pps_lock(intel_dp);
4772 edp_panel_vdd_off_sync(intel_dp);
4773 pps_unlock(intel_dp);
4774 }
4775
4776 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4777 {
4778 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4779 struct drm_device *dev = intel_dig_port->base.base.dev;
4780 struct drm_i915_private *dev_priv = dev->dev_private;
4781 enum intel_display_power_domain power_domain;
4782
4783 lockdep_assert_held(&dev_priv->pps_mutex);
4784
4785 if (!edp_have_panel_vdd(intel_dp))
4786 return;
4787
4788 /*
4789 * The VDD bit needs a power domain reference, so if the bit is
4790 * already enabled when we boot or resume, grab this reference and
4791 * schedule a vdd off, so we don't hold on to the reference
4792 * indefinitely.
4793 */
4794 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4795 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4796 intel_display_power_get(dev_priv, power_domain);
4797
4798 edp_panel_vdd_schedule_off(intel_dp);
4799 }
4800
4801 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4802 {
4803 struct intel_dp *intel_dp;
4804
4805 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4806 return;
4807
4808 intel_dp = enc_to_intel_dp(encoder);
4809
4810 pps_lock(intel_dp);
4811
4812 /*
4813 * Read out the current power sequencer assignment,
4814 * in case the BIOS did something with it.
4815 */
4816 if (IS_VALLEYVIEW(encoder->dev))
4817 vlv_initial_power_sequencer_setup(intel_dp);
4818
4819 intel_edp_panel_vdd_sanitize(intel_dp);
4820
4821 pps_unlock(intel_dp);
4822 }
4823
4824 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4825 .dpms = intel_connector_dpms,
4826 .detect = intel_dp_detect,
4827 .force = intel_dp_force,
4828 .fill_modes = drm_helper_probe_single_connector_modes,
4829 .set_property = intel_dp_set_property,
4830 .atomic_get_property = intel_connector_atomic_get_property,
4831 .destroy = intel_dp_connector_destroy,
4832 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4833 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4834 };
4835
4836 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4837 .get_modes = intel_dp_get_modes,
4838 .mode_valid = intel_dp_mode_valid,
4839 .best_encoder = intel_best_encoder,
4840 };
4841
4842 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4843 .reset = intel_dp_encoder_reset,
4844 .destroy = intel_dp_encoder_destroy,
4845 };
4846
4847 enum irqreturn
4848 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4849 {
4850 struct intel_dp *intel_dp = &intel_dig_port->dp;
4851 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4852 struct drm_device *dev = intel_dig_port->base.base.dev;
4853 struct drm_i915_private *dev_priv = dev->dev_private;
4854 enum intel_display_power_domain power_domain;
4855 enum irqreturn ret = IRQ_NONE;
4856
4857 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4858 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4859
4860 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4861 /*
4862 * vdd off can generate a long pulse on eDP which
4863 * would require vdd on to handle it, and thus we
4864 * would end up in an endless cycle of
4865 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4866 */
4867 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4868 port_name(intel_dig_port->port));
4869 return IRQ_HANDLED;
4870 }
4871
4872 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4873 port_name(intel_dig_port->port),
4874 long_hpd ? "long" : "short");
4875
4876 power_domain = intel_display_port_power_domain(intel_encoder);
4877 intel_display_power_get(dev_priv, power_domain);
4878
4879 if (long_hpd) {
4880 /* indicate that we need to restart link training */
4881 intel_dp->train_set_valid = false;
4882
4883 if (HAS_PCH_SPLIT(dev)) {
4884 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4885 goto mst_fail;
4886 } else {
4887 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4888 goto mst_fail;
4889 }
4890
4891 if (!intel_dp_get_dpcd(intel_dp)) {
4892 goto mst_fail;
4893 }
4894
4895 intel_dp_probe_oui(intel_dp);
4896
4897 if (!intel_dp_probe_mst(intel_dp))
4898 goto mst_fail;
4899
4900 } else {
4901 if (intel_dp->is_mst) {
4902 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4903 goto mst_fail;
4904 }
4905
4906 if (!intel_dp->is_mst) {
4907 /*
4908 * we'll check the link status via the normal hot plug path later -
4909 * but for short hpds we should check it now
4910 */
4911 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4912 intel_dp_check_link_status(intel_dp);
4913 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4914 }
4915 }
4916
4917 ret = IRQ_HANDLED;
4918
4919 goto put_power;
4920 mst_fail:
4921 /* if we were in MST mode, and device is not there get out of MST mode */
4922 if (intel_dp->is_mst) {
4923 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4924 intel_dp->is_mst = false;
4925 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4926 }
4927 put_power:
4928 intel_display_power_put(dev_priv, power_domain);
4929
4930 return ret;
4931 }
4932
4933 /* Return which DP Port should be selected for Transcoder DP control */
4934 int
4935 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4936 {
4937 struct drm_device *dev = crtc->dev;
4938 struct intel_encoder *intel_encoder;
4939 struct intel_dp *intel_dp;
4940
4941 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4942 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4943
4944 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4945 intel_encoder->type == INTEL_OUTPUT_EDP)
4946 return intel_dp->output_reg;
4947 }
4948
4949 return -1;
4950 }
4951
4952 /* check the VBT to see whether the eDP is on DP-D port */
4953 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4954 {
4955 struct drm_i915_private *dev_priv = dev->dev_private;
4956 union child_device_config *p_child;
4957 int i;
4958 static const short port_mapping[] = {
4959 [PORT_B] = PORT_IDPB,
4960 [PORT_C] = PORT_IDPC,
4961 [PORT_D] = PORT_IDPD,
4962 };
4963
4964 if (port == PORT_A)
4965 return true;
4966
4967 if (!dev_priv->vbt.child_dev_num)
4968 return false;
4969
4970 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4971 p_child = dev_priv->vbt.child_dev + i;
4972
4973 if (p_child->common.dvo_port == port_mapping[port] &&
4974 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4975 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4976 return true;
4977 }
4978 return false;
4979 }
4980
4981 void
4982 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4983 {
4984 struct intel_connector *intel_connector = to_intel_connector(connector);
4985
4986 intel_attach_force_audio_property(connector);
4987 intel_attach_broadcast_rgb_property(connector);
4988 intel_dp->color_range_auto = true;
4989
4990 if (is_edp(intel_dp)) {
4991 drm_mode_create_scaling_mode_property(connector->dev);
4992 drm_object_attach_property(
4993 &connector->base,
4994 connector->dev->mode_config.scaling_mode_property,
4995 DRM_MODE_SCALE_ASPECT);
4996 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4997 }
4998 }
4999
5000 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5001 {
5002 intel_dp->last_power_cycle = jiffies;
5003 intel_dp->last_power_on = jiffies;
5004 intel_dp->last_backlight_off = jiffies;
5005 }
5006
5007 static void
5008 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5009 struct intel_dp *intel_dp)
5010 {
5011 struct drm_i915_private *dev_priv = dev->dev_private;
5012 struct edp_power_seq cur, vbt, spec,
5013 *final = &intel_dp->pps_delays;
5014 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5015 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5016
5017 lockdep_assert_held(&dev_priv->pps_mutex);
5018
5019 /* already initialized? */
5020 if (final->t11_t12 != 0)
5021 return;
5022
5023 if (IS_BROXTON(dev)) {
5024 /*
5025 * TODO: BXT has 2 sets of PPS registers.
5026 * Correct Register for Broxton need to be identified
5027 * using VBT. hardcoding for now
5028 */
5029 pp_ctrl_reg = BXT_PP_CONTROL(0);
5030 pp_on_reg = BXT_PP_ON_DELAYS(0);
5031 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5032 } else if (HAS_PCH_SPLIT(dev)) {
5033 pp_ctrl_reg = PCH_PP_CONTROL;
5034 pp_on_reg = PCH_PP_ON_DELAYS;
5035 pp_off_reg = PCH_PP_OFF_DELAYS;
5036 pp_div_reg = PCH_PP_DIVISOR;
5037 } else {
5038 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5039
5040 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5041 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5042 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5043 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5044 }
5045
5046 /* Workaround: Need to write PP_CONTROL with the unlock key as
5047 * the very first thing. */
5048 pp_ctl = ironlake_get_pp_control(intel_dp);
5049
5050 pp_on = I915_READ(pp_on_reg);
5051 pp_off = I915_READ(pp_off_reg);
5052 if (!IS_BROXTON(dev)) {
5053 I915_WRITE(pp_ctrl_reg, pp_ctl);
5054 pp_div = I915_READ(pp_div_reg);
5055 }
5056
5057 /* Pull timing values out of registers */
5058 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5059 PANEL_POWER_UP_DELAY_SHIFT;
5060
5061 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5062 PANEL_LIGHT_ON_DELAY_SHIFT;
5063
5064 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5065 PANEL_LIGHT_OFF_DELAY_SHIFT;
5066
5067 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5068 PANEL_POWER_DOWN_DELAY_SHIFT;
5069
5070 if (IS_BROXTON(dev)) {
5071 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5072 BXT_POWER_CYCLE_DELAY_SHIFT;
5073 if (tmp > 0)
5074 cur.t11_t12 = (tmp - 1) * 1000;
5075 else
5076 cur.t11_t12 = 0;
5077 } else {
5078 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5079 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5080 }
5081
5082 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5083 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5084
5085 vbt = dev_priv->vbt.edp_pps;
5086
5087 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5088 * our hw here, which are all in 100usec. */
5089 spec.t1_t3 = 210 * 10;
5090 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5091 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5092 spec.t10 = 500 * 10;
5093 /* This one is special and actually in units of 100ms, but zero
5094 * based in the hw (so we need to add 100 ms). But the sw vbt
5095 * table multiplies it with 1000 to make it in units of 100usec,
5096 * too. */
5097 spec.t11_t12 = (510 + 100) * 10;
5098
5099 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5100 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5101
5102 /* Use the max of the register settings and vbt. If both are
5103 * unset, fall back to the spec limits. */
5104 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5105 spec.field : \
5106 max(cur.field, vbt.field))
5107 assign_final(t1_t3);
5108 assign_final(t8);
5109 assign_final(t9);
5110 assign_final(t10);
5111 assign_final(t11_t12);
5112 #undef assign_final
5113
5114 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5115 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5116 intel_dp->backlight_on_delay = get_delay(t8);
5117 intel_dp->backlight_off_delay = get_delay(t9);
5118 intel_dp->panel_power_down_delay = get_delay(t10);
5119 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5120 #undef get_delay
5121
5122 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5123 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5124 intel_dp->panel_power_cycle_delay);
5125
5126 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5127 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5128 }
5129
5130 static void
5131 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5132 struct intel_dp *intel_dp)
5133 {
5134 struct drm_i915_private *dev_priv = dev->dev_private;
5135 u32 pp_on, pp_off, pp_div, port_sel = 0;
5136 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5137 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5138 enum port port = dp_to_dig_port(intel_dp)->port;
5139 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5140
5141 lockdep_assert_held(&dev_priv->pps_mutex);
5142
5143 if (IS_BROXTON(dev)) {
5144 /*
5145 * TODO: BXT has 2 sets of PPS registers.
5146 * Correct Register for Broxton need to be identified
5147 * using VBT. hardcoding for now
5148 */
5149 pp_ctrl_reg = BXT_PP_CONTROL(0);
5150 pp_on_reg = BXT_PP_ON_DELAYS(0);
5151 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5152
5153 } else if (HAS_PCH_SPLIT(dev)) {
5154 pp_on_reg = PCH_PP_ON_DELAYS;
5155 pp_off_reg = PCH_PP_OFF_DELAYS;
5156 pp_div_reg = PCH_PP_DIVISOR;
5157 } else {
5158 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5159
5160 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5161 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5162 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5163 }
5164
5165 /*
5166 * And finally store the new values in the power sequencer. The
5167 * backlight delays are set to 1 because we do manual waits on them. For
5168 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5169 * we'll end up waiting for the backlight off delay twice: once when we
5170 * do the manual sleep, and once when we disable the panel and wait for
5171 * the PP_STATUS bit to become zero.
5172 */
5173 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5174 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5175 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5176 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5177 /* Compute the divisor for the pp clock, simply match the Bspec
5178 * formula. */
5179 if (IS_BROXTON(dev)) {
5180 pp_div = I915_READ(pp_ctrl_reg);
5181 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5182 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5183 << BXT_POWER_CYCLE_DELAY_SHIFT);
5184 } else {
5185 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5186 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5187 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5188 }
5189
5190 /* Haswell doesn't have any port selection bits for the panel
5191 * power sequencer any more. */
5192 if (IS_VALLEYVIEW(dev)) {
5193 port_sel = PANEL_PORT_SELECT_VLV(port);
5194 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5195 if (port == PORT_A)
5196 port_sel = PANEL_PORT_SELECT_DPA;
5197 else
5198 port_sel = PANEL_PORT_SELECT_DPD;
5199 }
5200
5201 pp_on |= port_sel;
5202
5203 I915_WRITE(pp_on_reg, pp_on);
5204 I915_WRITE(pp_off_reg, pp_off);
5205 if (IS_BROXTON(dev))
5206 I915_WRITE(pp_ctrl_reg, pp_div);
5207 else
5208 I915_WRITE(pp_div_reg, pp_div);
5209
5210 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5211 I915_READ(pp_on_reg),
5212 I915_READ(pp_off_reg),
5213 IS_BROXTON(dev) ?
5214 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5215 I915_READ(pp_div_reg));
5216 }
5217
5218 /**
5219 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5220 * @dev: DRM device
5221 * @refresh_rate: RR to be programmed
5222 *
5223 * This function gets called when refresh rate (RR) has to be changed from
5224 * one frequency to another. Switches can be between high and low RR
5225 * supported by the panel or to any other RR based on media playback (in
5226 * this case, RR value needs to be passed from user space).
5227 *
5228 * The caller of this function needs to take a lock on dev_priv->drrs.
5229 */
5230 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5231 {
5232 struct drm_i915_private *dev_priv = dev->dev_private;
5233 struct intel_encoder *encoder;
5234 struct intel_digital_port *dig_port = NULL;
5235 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5236 struct intel_crtc_state *config = NULL;
5237 struct intel_crtc *intel_crtc = NULL;
5238 u32 reg, val;
5239 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5240
5241 if (refresh_rate <= 0) {
5242 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5243 return;
5244 }
5245
5246 if (intel_dp == NULL) {
5247 DRM_DEBUG_KMS("DRRS not supported.\n");
5248 return;
5249 }
5250
5251 /*
5252 * FIXME: This needs proper synchronization with psr state for some
5253 * platforms that cannot have PSR and DRRS enabled at the same time.
5254 */
5255
5256 dig_port = dp_to_dig_port(intel_dp);
5257 encoder = &dig_port->base;
5258 intel_crtc = to_intel_crtc(encoder->base.crtc);
5259
5260 if (!intel_crtc) {
5261 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5262 return;
5263 }
5264
5265 config = intel_crtc->config;
5266
5267 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5268 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5269 return;
5270 }
5271
5272 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5273 refresh_rate)
5274 index = DRRS_LOW_RR;
5275
5276 if (index == dev_priv->drrs.refresh_rate_type) {
5277 DRM_DEBUG_KMS(
5278 "DRRS requested for previously set RR...ignoring\n");
5279 return;
5280 }
5281
5282 if (!intel_crtc->active) {
5283 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5284 return;
5285 }
5286
5287 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5288 switch (index) {
5289 case DRRS_HIGH_RR:
5290 intel_dp_set_m_n(intel_crtc, M1_N1);
5291 break;
5292 case DRRS_LOW_RR:
5293 intel_dp_set_m_n(intel_crtc, M2_N2);
5294 break;
5295 case DRRS_MAX_RR:
5296 default:
5297 DRM_ERROR("Unsupported refreshrate type\n");
5298 }
5299 } else if (INTEL_INFO(dev)->gen > 6) {
5300 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5301 val = I915_READ(reg);
5302
5303 if (index > DRRS_HIGH_RR) {
5304 if (IS_VALLEYVIEW(dev))
5305 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5306 else
5307 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5308 } else {
5309 if (IS_VALLEYVIEW(dev))
5310 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5311 else
5312 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5313 }
5314 I915_WRITE(reg, val);
5315 }
5316
5317 dev_priv->drrs.refresh_rate_type = index;
5318
5319 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5320 }
5321
5322 /**
5323 * intel_edp_drrs_enable - init drrs struct if supported
5324 * @intel_dp: DP struct
5325 *
5326 * Initializes frontbuffer_bits and drrs.dp
5327 */
5328 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5329 {
5330 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5331 struct drm_i915_private *dev_priv = dev->dev_private;
5332 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5333 struct drm_crtc *crtc = dig_port->base.base.crtc;
5334 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5335
5336 if (!intel_crtc->config->has_drrs) {
5337 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5338 return;
5339 }
5340
5341 mutex_lock(&dev_priv->drrs.mutex);
5342 if (WARN_ON(dev_priv->drrs.dp)) {
5343 DRM_ERROR("DRRS already enabled\n");
5344 goto unlock;
5345 }
5346
5347 dev_priv->drrs.busy_frontbuffer_bits = 0;
5348
5349 dev_priv->drrs.dp = intel_dp;
5350
5351 unlock:
5352 mutex_unlock(&dev_priv->drrs.mutex);
5353 }
5354
5355 /**
5356 * intel_edp_drrs_disable - Disable DRRS
5357 * @intel_dp: DP struct
5358 *
5359 */
5360 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5361 {
5362 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5363 struct drm_i915_private *dev_priv = dev->dev_private;
5364 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5365 struct drm_crtc *crtc = dig_port->base.base.crtc;
5366 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5367
5368 if (!intel_crtc->config->has_drrs)
5369 return;
5370
5371 mutex_lock(&dev_priv->drrs.mutex);
5372 if (!dev_priv->drrs.dp) {
5373 mutex_unlock(&dev_priv->drrs.mutex);
5374 return;
5375 }
5376
5377 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5378 intel_dp_set_drrs_state(dev_priv->dev,
5379 intel_dp->attached_connector->panel.
5380 fixed_mode->vrefresh);
5381
5382 dev_priv->drrs.dp = NULL;
5383 mutex_unlock(&dev_priv->drrs.mutex);
5384
5385 cancel_delayed_work_sync(&dev_priv->drrs.work);
5386 }
5387
5388 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5389 {
5390 struct drm_i915_private *dev_priv =
5391 container_of(work, typeof(*dev_priv), drrs.work.work);
5392 struct intel_dp *intel_dp;
5393
5394 mutex_lock(&dev_priv->drrs.mutex);
5395
5396 intel_dp = dev_priv->drrs.dp;
5397
5398 if (!intel_dp)
5399 goto unlock;
5400
5401 /*
5402 * The delayed work can race with an invalidate hence we need to
5403 * recheck.
5404 */
5405
5406 if (dev_priv->drrs.busy_frontbuffer_bits)
5407 goto unlock;
5408
5409 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5410 intel_dp_set_drrs_state(dev_priv->dev,
5411 intel_dp->attached_connector->panel.
5412 downclock_mode->vrefresh);
5413
5414 unlock:
5415 mutex_unlock(&dev_priv->drrs.mutex);
5416 }
5417
5418 /**
5419 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5420 * @dev: DRM device
5421 * @frontbuffer_bits: frontbuffer plane tracking bits
5422 *
5423 * This function gets called everytime rendering on the given planes start.
5424 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5425 *
5426 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5427 */
5428 void intel_edp_drrs_invalidate(struct drm_device *dev,
5429 unsigned frontbuffer_bits)
5430 {
5431 struct drm_i915_private *dev_priv = dev->dev_private;
5432 struct drm_crtc *crtc;
5433 enum pipe pipe;
5434
5435 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5436 return;
5437
5438 cancel_delayed_work(&dev_priv->drrs.work);
5439
5440 mutex_lock(&dev_priv->drrs.mutex);
5441 if (!dev_priv->drrs.dp) {
5442 mutex_unlock(&dev_priv->drrs.mutex);
5443 return;
5444 }
5445
5446 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5447 pipe = to_intel_crtc(crtc)->pipe;
5448
5449 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5450 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5451
5452 /* invalidate means busy screen hence upclock */
5453 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5454 intel_dp_set_drrs_state(dev_priv->dev,
5455 dev_priv->drrs.dp->attached_connector->panel.
5456 fixed_mode->vrefresh);
5457
5458 mutex_unlock(&dev_priv->drrs.mutex);
5459 }
5460
5461 /**
5462 * intel_edp_drrs_flush - Restart Idleness DRRS
5463 * @dev: DRM device
5464 * @frontbuffer_bits: frontbuffer plane tracking bits
5465 *
5466 * This function gets called every time rendering on the given planes has
5467 * completed or flip on a crtc is completed. So DRRS should be upclocked
5468 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5469 * if no other planes are dirty.
5470 *
5471 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5472 */
5473 void intel_edp_drrs_flush(struct drm_device *dev,
5474 unsigned frontbuffer_bits)
5475 {
5476 struct drm_i915_private *dev_priv = dev->dev_private;
5477 struct drm_crtc *crtc;
5478 enum pipe pipe;
5479
5480 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5481 return;
5482
5483 cancel_delayed_work(&dev_priv->drrs.work);
5484
5485 mutex_lock(&dev_priv->drrs.mutex);
5486 if (!dev_priv->drrs.dp) {
5487 mutex_unlock(&dev_priv->drrs.mutex);
5488 return;
5489 }
5490
5491 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5492 pipe = to_intel_crtc(crtc)->pipe;
5493
5494 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5495 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5496
5497 /* flush means busy screen hence upclock */
5498 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5499 intel_dp_set_drrs_state(dev_priv->dev,
5500 dev_priv->drrs.dp->attached_connector->panel.
5501 fixed_mode->vrefresh);
5502
5503 /*
5504 * flush also means no more activity hence schedule downclock, if all
5505 * other fbs are quiescent too
5506 */
5507 if (!dev_priv->drrs.busy_frontbuffer_bits)
5508 schedule_delayed_work(&dev_priv->drrs.work,
5509 msecs_to_jiffies(1000));
5510 mutex_unlock(&dev_priv->drrs.mutex);
5511 }
5512
5513 /**
5514 * DOC: Display Refresh Rate Switching (DRRS)
5515 *
5516 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5517 * which enables swtching between low and high refresh rates,
5518 * dynamically, based on the usage scenario. This feature is applicable
5519 * for internal panels.
5520 *
5521 * Indication that the panel supports DRRS is given by the panel EDID, which
5522 * would list multiple refresh rates for one resolution.
5523 *
5524 * DRRS is of 2 types - static and seamless.
5525 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5526 * (may appear as a blink on screen) and is used in dock-undock scenario.
5527 * Seamless DRRS involves changing RR without any visual effect to the user
5528 * and can be used during normal system usage. This is done by programming
5529 * certain registers.
5530 *
5531 * Support for static/seamless DRRS may be indicated in the VBT based on
5532 * inputs from the panel spec.
5533 *
5534 * DRRS saves power by switching to low RR based on usage scenarios.
5535 *
5536 * eDP DRRS:-
5537 * The implementation is based on frontbuffer tracking implementation.
5538 * When there is a disturbance on the screen triggered by user activity or a
5539 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5540 * When there is no movement on screen, after a timeout of 1 second, a switch
5541 * to low RR is made.
5542 * For integration with frontbuffer tracking code,
5543 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5544 *
5545 * DRRS can be further extended to support other internal panels and also
5546 * the scenario of video playback wherein RR is set based on the rate
5547 * requested by userspace.
5548 */
5549
5550 /**
5551 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5552 * @intel_connector: eDP connector
5553 * @fixed_mode: preferred mode of panel
5554 *
5555 * This function is called only once at driver load to initialize basic
5556 * DRRS stuff.
5557 *
5558 * Returns:
5559 * Downclock mode if panel supports it, else return NULL.
5560 * DRRS support is determined by the presence of downclock mode (apart
5561 * from VBT setting).
5562 */
5563 static struct drm_display_mode *
5564 intel_dp_drrs_init(struct intel_connector *intel_connector,
5565 struct drm_display_mode *fixed_mode)
5566 {
5567 struct drm_connector *connector = &intel_connector->base;
5568 struct drm_device *dev = connector->dev;
5569 struct drm_i915_private *dev_priv = dev->dev_private;
5570 struct drm_display_mode *downclock_mode = NULL;
5571
5572 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5573 mutex_init(&dev_priv->drrs.mutex);
5574
5575 if (INTEL_INFO(dev)->gen <= 6) {
5576 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5577 return NULL;
5578 }
5579
5580 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5581 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5582 return NULL;
5583 }
5584
5585 downclock_mode = intel_find_panel_downclock
5586 (dev, fixed_mode, connector);
5587
5588 if (!downclock_mode) {
5589 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5590 return NULL;
5591 }
5592
5593 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5594
5595 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5596 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5597 return downclock_mode;
5598 }
5599
5600 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5601 struct intel_connector *intel_connector)
5602 {
5603 struct drm_connector *connector = &intel_connector->base;
5604 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5605 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5606 struct drm_device *dev = intel_encoder->base.dev;
5607 struct drm_i915_private *dev_priv = dev->dev_private;
5608 struct drm_display_mode *fixed_mode = NULL;
5609 struct drm_display_mode *downclock_mode = NULL;
5610 bool has_dpcd;
5611 struct drm_display_mode *scan;
5612 struct edid *edid;
5613 enum pipe pipe = INVALID_PIPE;
5614
5615 if (!is_edp(intel_dp))
5616 return true;
5617
5618 pps_lock(intel_dp);
5619 intel_edp_panel_vdd_sanitize(intel_dp);
5620 pps_unlock(intel_dp);
5621
5622 /* Cache DPCD and EDID for edp. */
5623 has_dpcd = intel_dp_get_dpcd(intel_dp);
5624
5625 if (has_dpcd) {
5626 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5627 dev_priv->no_aux_handshake =
5628 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5629 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5630 } else {
5631 /* if this fails, presume the device is a ghost */
5632 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5633 return false;
5634 }
5635
5636 /* We now know it's not a ghost, init power sequence regs. */
5637 pps_lock(intel_dp);
5638 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5639 pps_unlock(intel_dp);
5640
5641 mutex_lock(&dev->mode_config.mutex);
5642 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5643 if (edid) {
5644 if (drm_add_edid_modes(connector, edid)) {
5645 drm_mode_connector_update_edid_property(connector,
5646 edid);
5647 drm_edid_to_eld(connector, edid);
5648 } else {
5649 kfree(edid);
5650 edid = ERR_PTR(-EINVAL);
5651 }
5652 } else {
5653 edid = ERR_PTR(-ENOENT);
5654 }
5655 intel_connector->edid = edid;
5656
5657 /* prefer fixed mode from EDID if available */
5658 list_for_each_entry(scan, &connector->probed_modes, head) {
5659 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5660 fixed_mode = drm_mode_duplicate(dev, scan);
5661 downclock_mode = intel_dp_drrs_init(
5662 intel_connector, fixed_mode);
5663 break;
5664 }
5665 }
5666
5667 /* fallback to VBT if available for eDP */
5668 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5669 fixed_mode = drm_mode_duplicate(dev,
5670 dev_priv->vbt.lfp_lvds_vbt_mode);
5671 if (fixed_mode)
5672 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5673 }
5674 mutex_unlock(&dev->mode_config.mutex);
5675
5676 if (IS_VALLEYVIEW(dev)) {
5677 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5678 register_reboot_notifier(&intel_dp->edp_notifier);
5679
5680 /*
5681 * Figure out the current pipe for the initial backlight setup.
5682 * If the current pipe isn't valid, try the PPS pipe, and if that
5683 * fails just assume pipe A.
5684 */
5685 if (IS_CHERRYVIEW(dev))
5686 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5687 else
5688 pipe = PORT_TO_PIPE(intel_dp->DP);
5689
5690 if (pipe != PIPE_A && pipe != PIPE_B)
5691 pipe = intel_dp->pps_pipe;
5692
5693 if (pipe != PIPE_A && pipe != PIPE_B)
5694 pipe = PIPE_A;
5695
5696 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5697 pipe_name(pipe));
5698 }
5699
5700 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5701 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5702 intel_panel_setup_backlight(connector, pipe);
5703
5704 return true;
5705 }
5706
5707 bool
5708 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5709 struct intel_connector *intel_connector)
5710 {
5711 struct drm_connector *connector = &intel_connector->base;
5712 struct intel_dp *intel_dp = &intel_dig_port->dp;
5713 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5714 struct drm_device *dev = intel_encoder->base.dev;
5715 struct drm_i915_private *dev_priv = dev->dev_private;
5716 enum port port = intel_dig_port->port;
5717 int type;
5718
5719 intel_dp->pps_pipe = INVALID_PIPE;
5720
5721 /* intel_dp vfuncs */
5722 if (INTEL_INFO(dev)->gen >= 9)
5723 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5724 else if (IS_VALLEYVIEW(dev))
5725 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5726 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5727 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5728 else if (HAS_PCH_SPLIT(dev))
5729 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5730 else
5731 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5732
5733 if (INTEL_INFO(dev)->gen >= 9)
5734 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5735 else
5736 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5737
5738 /* Preserve the current hw state. */
5739 intel_dp->DP = I915_READ(intel_dp->output_reg);
5740 intel_dp->attached_connector = intel_connector;
5741
5742 if (intel_dp_is_edp(dev, port))
5743 type = DRM_MODE_CONNECTOR_eDP;
5744 else
5745 type = DRM_MODE_CONNECTOR_DisplayPort;
5746
5747 /*
5748 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5749 * for DP the encoder type can be set by the caller to
5750 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5751 */
5752 if (type == DRM_MODE_CONNECTOR_eDP)
5753 intel_encoder->type = INTEL_OUTPUT_EDP;
5754
5755 /* eDP only on port B and/or C on vlv/chv */
5756 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5757 port != PORT_B && port != PORT_C))
5758 return false;
5759
5760 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5761 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5762 port_name(port));
5763
5764 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5765 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5766
5767 connector->interlace_allowed = true;
5768 connector->doublescan_allowed = 0;
5769
5770 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5771 edp_panel_vdd_work);
5772
5773 intel_connector_attach_encoder(intel_connector, intel_encoder);
5774 drm_connector_register(connector);
5775
5776 if (HAS_DDI(dev))
5777 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5778 else
5779 intel_connector->get_hw_state = intel_connector_get_hw_state;
5780 intel_connector->unregister = intel_dp_connector_unregister;
5781
5782 /* Set up the hotplug pin. */
5783 switch (port) {
5784 case PORT_A:
5785 intel_encoder->hpd_pin = HPD_PORT_A;
5786 break;
5787 case PORT_B:
5788 intel_encoder->hpd_pin = HPD_PORT_B;
5789 break;
5790 case PORT_C:
5791 intel_encoder->hpd_pin = HPD_PORT_C;
5792 break;
5793 case PORT_D:
5794 intel_encoder->hpd_pin = HPD_PORT_D;
5795 break;
5796 default:
5797 BUG();
5798 }
5799
5800 if (is_edp(intel_dp)) {
5801 pps_lock(intel_dp);
5802 intel_dp_init_panel_power_timestamps(intel_dp);
5803 if (IS_VALLEYVIEW(dev))
5804 vlv_initial_power_sequencer_setup(intel_dp);
5805 else
5806 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5807 pps_unlock(intel_dp);
5808 }
5809
5810 intel_dp_aux_init(intel_dp, intel_connector);
5811
5812 /* init MST on ports that can support it */
5813 if (HAS_DP_MST(dev) &&
5814 (port == PORT_B || port == PORT_C || port == PORT_D))
5815 intel_dp_mst_encoder_init(intel_dig_port,
5816 intel_connector->base.base.id);
5817
5818 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5819 drm_dp_aux_unregister(&intel_dp->aux);
5820 if (is_edp(intel_dp)) {
5821 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5822 /*
5823 * vdd might still be enabled do to the delayed vdd off.
5824 * Make sure vdd is actually turned off here.
5825 */
5826 pps_lock(intel_dp);
5827 edp_panel_vdd_off_sync(intel_dp);
5828 pps_unlock(intel_dp);
5829 }
5830 drm_connector_unregister(connector);
5831 drm_connector_cleanup(connector);
5832 return false;
5833 }
5834
5835 intel_dp_add_properties(intel_dp, connector);
5836
5837 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5838 * 0xd. Failure to do so will result in spurious interrupts being
5839 * generated on the port when a cable is not attached.
5840 */
5841 if (IS_G4X(dev) && !IS_GM45(dev)) {
5842 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5843 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5844 }
5845
5846 i915_debugfs_connector_add(connector);
5847
5848 return true;
5849 }
5850
5851 void
5852 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5853 {
5854 struct drm_i915_private *dev_priv = dev->dev_private;
5855 struct intel_digital_port *intel_dig_port;
5856 struct intel_encoder *intel_encoder;
5857 struct drm_encoder *encoder;
5858 struct intel_connector *intel_connector;
5859
5860 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5861 if (!intel_dig_port)
5862 return;
5863
5864 intel_connector = intel_connector_alloc();
5865 if (!intel_connector) {
5866 kfree(intel_dig_port);
5867 return;
5868 }
5869
5870 intel_encoder = &intel_dig_port->base;
5871 encoder = &intel_encoder->base;
5872
5873 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5874 DRM_MODE_ENCODER_TMDS);
5875
5876 intel_encoder->compute_config = intel_dp_compute_config;
5877 intel_encoder->disable = intel_disable_dp;
5878 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5879 intel_encoder->get_config = intel_dp_get_config;
5880 intel_encoder->suspend = intel_dp_encoder_suspend;
5881 if (IS_CHERRYVIEW(dev)) {
5882 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5883 intel_encoder->pre_enable = chv_pre_enable_dp;
5884 intel_encoder->enable = vlv_enable_dp;
5885 intel_encoder->post_disable = chv_post_disable_dp;
5886 } else if (IS_VALLEYVIEW(dev)) {
5887 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5888 intel_encoder->pre_enable = vlv_pre_enable_dp;
5889 intel_encoder->enable = vlv_enable_dp;
5890 intel_encoder->post_disable = vlv_post_disable_dp;
5891 } else {
5892 intel_encoder->pre_enable = g4x_pre_enable_dp;
5893 intel_encoder->enable = g4x_enable_dp;
5894 if (INTEL_INFO(dev)->gen >= 5)
5895 intel_encoder->post_disable = ilk_post_disable_dp;
5896 }
5897
5898 intel_dig_port->port = port;
5899 intel_dig_port->dp.output_reg = output_reg;
5900
5901 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5902 if (IS_CHERRYVIEW(dev)) {
5903 if (port == PORT_D)
5904 intel_encoder->crtc_mask = 1 << 2;
5905 else
5906 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5907 } else {
5908 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5909 }
5910 intel_encoder->cloneable = 0;
5911
5912 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5913 dev_priv->hotplug.irq_port[port] = intel_dig_port;
5914
5915 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5916 drm_encoder_cleanup(encoder);
5917 kfree(intel_dig_port);
5918 kfree(intel_connector);
5919 }
5920 }
5921
5922 void intel_dp_mst_suspend(struct drm_device *dev)
5923 {
5924 struct drm_i915_private *dev_priv = dev->dev_private;
5925 int i;
5926
5927 /* disable MST */
5928 for (i = 0; i < I915_MAX_PORTS; i++) {
5929 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5930 if (!intel_dig_port)
5931 continue;
5932
5933 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5934 if (!intel_dig_port->dp.can_mst)
5935 continue;
5936 if (intel_dig_port->dp.is_mst)
5937 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5938 }
5939 }
5940 }
5941
5942 void intel_dp_mst_resume(struct drm_device *dev)
5943 {
5944 struct drm_i915_private *dev_priv = dev->dev_private;
5945 int i;
5946
5947 for (i = 0; i < I915_MAX_PORTS; i++) {
5948 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5949 if (!intel_dig_port)
5950 continue;
5951 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5952 int ret;
5953
5954 if (!intel_dig_port->dp.can_mst)
5955 continue;
5956
5957 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5958 if (ret != 0) {
5959 intel_dp_check_mst_status(&intel_dig_port->dp);
5960 }
5961 }
5962 }
5963 }
This page took 0.147045 seconds and 6 git commands to generate.