drm: add register and unregister functions for connectors
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_crtc.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/drm_edid.h>
35 #include "intel_drv.h"
36 #include <drm/i915_drm.h>
37 #include "i915_drv.h"
38
39 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40
41 struct dp_link_dpll {
42 int link_bw;
43 struct dpll dpll;
44 };
45
46 static const struct dp_link_dpll gen4_dpll[] = {
47 { DP_LINK_BW_1_62,
48 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
49 { DP_LINK_BW_2_7,
50 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
51 };
52
53 static const struct dp_link_dpll pch_dpll[] = {
54 { DP_LINK_BW_1_62,
55 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
56 { DP_LINK_BW_2_7,
57 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
58 };
59
60 static const struct dp_link_dpll vlv_dpll[] = {
61 { DP_LINK_BW_1_62,
62 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
63 { DP_LINK_BW_2_7,
64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
65 };
66
67 /*
68 * CHV supports eDP 1.4 that have more link rates.
69 * Below only provides the fixed rate but exclude variable rate.
70 */
71 static const struct dp_link_dpll chv_dpll[] = {
72 /*
73 * CHV requires to program fractional division for m2.
74 * m2 is stored in fixed point format using formula below
75 * (m2_int << 22) | m2_fraction
76 */
77 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
78 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
79 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
80 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
81 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
82 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
83 };
84
85 /**
86 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
87 * @intel_dp: DP struct
88 *
89 * If a CPU or PCH DP output is attached to an eDP panel, this function
90 * will return true, and false otherwise.
91 */
92 static bool is_edp(struct intel_dp *intel_dp)
93 {
94 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
95
96 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
97 }
98
99 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
100 {
101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
102
103 return intel_dig_port->base.base.dev;
104 }
105
106 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
107 {
108 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
109 }
110
111 static void intel_dp_link_down(struct intel_dp *intel_dp);
112 static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
113 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
114
115 static int
116 intel_dp_max_link_bw(struct intel_dp *intel_dp)
117 {
118 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
119 struct drm_device *dev = intel_dp->attached_connector->base.dev;
120
121 switch (max_link_bw) {
122 case DP_LINK_BW_1_62:
123 case DP_LINK_BW_2_7:
124 break;
125 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
126 if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
127 INTEL_INFO(dev)->gen >= 8) &&
128 intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
129 max_link_bw = DP_LINK_BW_5_4;
130 else
131 max_link_bw = DP_LINK_BW_2_7;
132 break;
133 default:
134 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
135 max_link_bw);
136 max_link_bw = DP_LINK_BW_1_62;
137 break;
138 }
139 return max_link_bw;
140 }
141
142 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
143 {
144 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
145 struct drm_device *dev = intel_dig_port->base.base.dev;
146 u8 source_max, sink_max;
147
148 source_max = 4;
149 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
150 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
151 source_max = 2;
152
153 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
154
155 return min(source_max, sink_max);
156 }
157
158 /*
159 * The units on the numbers in the next two are... bizarre. Examples will
160 * make it clearer; this one parallels an example in the eDP spec.
161 *
162 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
163 *
164 * 270000 * 1 * 8 / 10 == 216000
165 *
166 * The actual data capacity of that configuration is 2.16Gbit/s, so the
167 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
168 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
169 * 119000. At 18bpp that's 2142000 kilobits per second.
170 *
171 * Thus the strange-looking division by 10 in intel_dp_link_required, to
172 * get the result in decakilobits instead of kilobits.
173 */
174
175 static int
176 intel_dp_link_required(int pixel_clock, int bpp)
177 {
178 return (pixel_clock * bpp + 9) / 10;
179 }
180
181 static int
182 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
183 {
184 return (max_link_clock * max_lanes * 8) / 10;
185 }
186
187 static enum drm_mode_status
188 intel_dp_mode_valid(struct drm_connector *connector,
189 struct drm_display_mode *mode)
190 {
191 struct intel_dp *intel_dp = intel_attached_dp(connector);
192 struct intel_connector *intel_connector = to_intel_connector(connector);
193 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
194 int target_clock = mode->clock;
195 int max_rate, mode_rate, max_lanes, max_link_clock;
196
197 if (is_edp(intel_dp) && fixed_mode) {
198 if (mode->hdisplay > fixed_mode->hdisplay)
199 return MODE_PANEL;
200
201 if (mode->vdisplay > fixed_mode->vdisplay)
202 return MODE_PANEL;
203
204 target_clock = fixed_mode->clock;
205 }
206
207 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
208 max_lanes = intel_dp_max_lane_count(intel_dp);
209
210 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
211 mode_rate = intel_dp_link_required(target_clock, 18);
212
213 if (mode_rate > max_rate)
214 return MODE_CLOCK_HIGH;
215
216 if (mode->clock < 10000)
217 return MODE_CLOCK_LOW;
218
219 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
220 return MODE_H_ILLEGAL;
221
222 return MODE_OK;
223 }
224
225 static uint32_t
226 pack_aux(uint8_t *src, int src_bytes)
227 {
228 int i;
229 uint32_t v = 0;
230
231 if (src_bytes > 4)
232 src_bytes = 4;
233 for (i = 0; i < src_bytes; i++)
234 v |= ((uint32_t) src[i]) << ((3-i) * 8);
235 return v;
236 }
237
238 static void
239 unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
240 {
241 int i;
242 if (dst_bytes > 4)
243 dst_bytes = 4;
244 for (i = 0; i < dst_bytes; i++)
245 dst[i] = src >> ((3-i) * 8);
246 }
247
248 /* hrawclock is 1/4 the FSB frequency */
249 static int
250 intel_hrawclk(struct drm_device *dev)
251 {
252 struct drm_i915_private *dev_priv = dev->dev_private;
253 uint32_t clkcfg;
254
255 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
256 if (IS_VALLEYVIEW(dev))
257 return 200;
258
259 clkcfg = I915_READ(CLKCFG);
260 switch (clkcfg & CLKCFG_FSB_MASK) {
261 case CLKCFG_FSB_400:
262 return 100;
263 case CLKCFG_FSB_533:
264 return 133;
265 case CLKCFG_FSB_667:
266 return 166;
267 case CLKCFG_FSB_800:
268 return 200;
269 case CLKCFG_FSB_1067:
270 return 266;
271 case CLKCFG_FSB_1333:
272 return 333;
273 /* these two are just a guess; one of them might be right */
274 case CLKCFG_FSB_1600:
275 case CLKCFG_FSB_1600_ALT:
276 return 400;
277 default:
278 return 133;
279 }
280 }
281
282 static void
283 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
284 struct intel_dp *intel_dp,
285 struct edp_power_seq *out);
286 static void
287 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
288 struct intel_dp *intel_dp,
289 struct edp_power_seq *out);
290
291 static enum pipe
292 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
293 {
294 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
296 struct drm_device *dev = intel_dig_port->base.base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 enum port port = intel_dig_port->port;
299 enum pipe pipe;
300
301 /* modeset should have pipe */
302 if (crtc)
303 return to_intel_crtc(crtc)->pipe;
304
305 /* init time, try to find a pipe with this port selected */
306 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
307 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
308 PANEL_PORT_SELECT_MASK;
309 if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
310 return pipe;
311 if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
312 return pipe;
313 }
314
315 /* shrug */
316 return PIPE_A;
317 }
318
319 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
320 {
321 struct drm_device *dev = intel_dp_to_dev(intel_dp);
322
323 if (HAS_PCH_SPLIT(dev))
324 return PCH_PP_CONTROL;
325 else
326 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
327 }
328
329 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
330 {
331 struct drm_device *dev = intel_dp_to_dev(intel_dp);
332
333 if (HAS_PCH_SPLIT(dev))
334 return PCH_PP_STATUS;
335 else
336 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
337 }
338
339 static bool edp_have_panel_power(struct intel_dp *intel_dp)
340 {
341 struct drm_device *dev = intel_dp_to_dev(intel_dp);
342 struct drm_i915_private *dev_priv = dev->dev_private;
343
344 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
345 }
346
347 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
348 {
349 struct drm_device *dev = intel_dp_to_dev(intel_dp);
350 struct drm_i915_private *dev_priv = dev->dev_private;
351 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
352 struct intel_encoder *intel_encoder = &intel_dig_port->base;
353 enum intel_display_power_domain power_domain;
354
355 power_domain = intel_display_port_power_domain(intel_encoder);
356 return intel_display_power_enabled(dev_priv, power_domain) &&
357 (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
358 }
359
360 static void
361 intel_dp_check_edp(struct intel_dp *intel_dp)
362 {
363 struct drm_device *dev = intel_dp_to_dev(intel_dp);
364 struct drm_i915_private *dev_priv = dev->dev_private;
365
366 if (!is_edp(intel_dp))
367 return;
368
369 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
370 WARN(1, "eDP powered off while attempting aux channel communication.\n");
371 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
372 I915_READ(_pp_stat_reg(intel_dp)),
373 I915_READ(_pp_ctrl_reg(intel_dp)));
374 }
375 }
376
377 static uint32_t
378 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
379 {
380 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
381 struct drm_device *dev = intel_dig_port->base.base.dev;
382 struct drm_i915_private *dev_priv = dev->dev_private;
383 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
384 uint32_t status;
385 bool done;
386
387 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
388 if (has_aux_irq)
389 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
390 msecs_to_jiffies_timeout(10));
391 else
392 done = wait_for_atomic(C, 10) == 0;
393 if (!done)
394 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
395 has_aux_irq);
396 #undef C
397
398 return status;
399 }
400
401 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
402 {
403 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
404 struct drm_device *dev = intel_dig_port->base.base.dev;
405
406 /*
407 * The clock divider is based off the hrawclk, and would like to run at
408 * 2MHz. So, take the hrawclk value and divide by 2 and use that
409 */
410 return index ? 0 : intel_hrawclk(dev) / 2;
411 }
412
413 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
414 {
415 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
416 struct drm_device *dev = intel_dig_port->base.base.dev;
417
418 if (index)
419 return 0;
420
421 if (intel_dig_port->port == PORT_A) {
422 if (IS_GEN6(dev) || IS_GEN7(dev))
423 return 200; /* SNB & IVB eDP input clock at 400Mhz */
424 else
425 return 225; /* eDP input clock at 450Mhz */
426 } else {
427 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
428 }
429 }
430
431 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
432 {
433 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
434 struct drm_device *dev = intel_dig_port->base.base.dev;
435 struct drm_i915_private *dev_priv = dev->dev_private;
436
437 if (intel_dig_port->port == PORT_A) {
438 if (index)
439 return 0;
440 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
441 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
442 /* Workaround for non-ULT HSW */
443 switch (index) {
444 case 0: return 63;
445 case 1: return 72;
446 default: return 0;
447 }
448 } else {
449 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
450 }
451 }
452
453 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
454 {
455 return index ? 0 : 100;
456 }
457
458 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
459 bool has_aux_irq,
460 int send_bytes,
461 uint32_t aux_clock_divider)
462 {
463 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
464 struct drm_device *dev = intel_dig_port->base.base.dev;
465 uint32_t precharge, timeout;
466
467 if (IS_GEN6(dev))
468 precharge = 3;
469 else
470 precharge = 5;
471
472 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
473 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
474 else
475 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
476
477 return DP_AUX_CH_CTL_SEND_BUSY |
478 DP_AUX_CH_CTL_DONE |
479 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
480 DP_AUX_CH_CTL_TIME_OUT_ERROR |
481 timeout |
482 DP_AUX_CH_CTL_RECEIVE_ERROR |
483 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
484 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
485 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
486 }
487
488 static int
489 intel_dp_aux_ch(struct intel_dp *intel_dp,
490 uint8_t *send, int send_bytes,
491 uint8_t *recv, int recv_size)
492 {
493 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
494 struct drm_device *dev = intel_dig_port->base.base.dev;
495 struct drm_i915_private *dev_priv = dev->dev_private;
496 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
497 uint32_t ch_data = ch_ctl + 4;
498 uint32_t aux_clock_divider;
499 int i, ret, recv_bytes;
500 uint32_t status;
501 int try, clock = 0;
502 bool has_aux_irq = HAS_AUX_IRQ(dev);
503 bool vdd;
504
505 vdd = _edp_panel_vdd_on(intel_dp);
506
507 /* dp aux is extremely sensitive to irq latency, hence request the
508 * lowest possible wakeup latency and so prevent the cpu from going into
509 * deep sleep states.
510 */
511 pm_qos_update_request(&dev_priv->pm_qos, 0);
512
513 intel_dp_check_edp(intel_dp);
514
515 intel_aux_display_runtime_get(dev_priv);
516
517 /* Try to wait for any previous AUX channel activity */
518 for (try = 0; try < 3; try++) {
519 status = I915_READ_NOTRACE(ch_ctl);
520 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
521 break;
522 msleep(1);
523 }
524
525 if (try == 3) {
526 WARN(1, "dp_aux_ch not started status 0x%08x\n",
527 I915_READ(ch_ctl));
528 ret = -EBUSY;
529 goto out;
530 }
531
532 /* Only 5 data registers! */
533 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
534 ret = -E2BIG;
535 goto out;
536 }
537
538 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
539 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
540 has_aux_irq,
541 send_bytes,
542 aux_clock_divider);
543
544 /* Must try at least 3 times according to DP spec */
545 for (try = 0; try < 5; try++) {
546 /* Load the send data into the aux channel data registers */
547 for (i = 0; i < send_bytes; i += 4)
548 I915_WRITE(ch_data + i,
549 pack_aux(send + i, send_bytes - i));
550
551 /* Send the command and wait for it to complete */
552 I915_WRITE(ch_ctl, send_ctl);
553
554 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
555
556 /* Clear done status and any errors */
557 I915_WRITE(ch_ctl,
558 status |
559 DP_AUX_CH_CTL_DONE |
560 DP_AUX_CH_CTL_TIME_OUT_ERROR |
561 DP_AUX_CH_CTL_RECEIVE_ERROR);
562
563 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
564 DP_AUX_CH_CTL_RECEIVE_ERROR))
565 continue;
566 if (status & DP_AUX_CH_CTL_DONE)
567 break;
568 }
569 if (status & DP_AUX_CH_CTL_DONE)
570 break;
571 }
572
573 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
574 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
575 ret = -EBUSY;
576 goto out;
577 }
578
579 /* Check for timeout or receive error.
580 * Timeouts occur when the sink is not connected
581 */
582 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
583 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
584 ret = -EIO;
585 goto out;
586 }
587
588 /* Timeouts occur when the device isn't connected, so they're
589 * "normal" -- don't fill the kernel log with these */
590 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
591 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
592 ret = -ETIMEDOUT;
593 goto out;
594 }
595
596 /* Unload any bytes sent back from the other side */
597 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
598 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
599 if (recv_bytes > recv_size)
600 recv_bytes = recv_size;
601
602 for (i = 0; i < recv_bytes; i += 4)
603 unpack_aux(I915_READ(ch_data + i),
604 recv + i, recv_bytes - i);
605
606 ret = recv_bytes;
607 out:
608 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
609 intel_aux_display_runtime_put(dev_priv);
610
611 if (vdd)
612 edp_panel_vdd_off(intel_dp, false);
613
614 return ret;
615 }
616
617 #define BARE_ADDRESS_SIZE 3
618 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
619 static ssize_t
620 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
621 {
622 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
623 uint8_t txbuf[20], rxbuf[20];
624 size_t txsize, rxsize;
625 int ret;
626
627 txbuf[0] = msg->request << 4;
628 txbuf[1] = msg->address >> 8;
629 txbuf[2] = msg->address & 0xff;
630 txbuf[3] = msg->size - 1;
631
632 switch (msg->request & ~DP_AUX_I2C_MOT) {
633 case DP_AUX_NATIVE_WRITE:
634 case DP_AUX_I2C_WRITE:
635 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
636 rxsize = 1;
637
638 if (WARN_ON(txsize > 20))
639 return -E2BIG;
640
641 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
642
643 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
644 if (ret > 0) {
645 msg->reply = rxbuf[0] >> 4;
646
647 /* Return payload size. */
648 ret = msg->size;
649 }
650 break;
651
652 case DP_AUX_NATIVE_READ:
653 case DP_AUX_I2C_READ:
654 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
655 rxsize = msg->size + 1;
656
657 if (WARN_ON(rxsize > 20))
658 return -E2BIG;
659
660 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
661 if (ret > 0) {
662 msg->reply = rxbuf[0] >> 4;
663 /*
664 * Assume happy day, and copy the data. The caller is
665 * expected to check msg->reply before touching it.
666 *
667 * Return payload size.
668 */
669 ret--;
670 memcpy(msg->buffer, rxbuf + 1, ret);
671 }
672 break;
673
674 default:
675 ret = -EINVAL;
676 break;
677 }
678
679 return ret;
680 }
681
682 static void
683 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
684 {
685 struct drm_device *dev = intel_dp_to_dev(intel_dp);
686 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
687 enum port port = intel_dig_port->port;
688 const char *name = NULL;
689 int ret;
690
691 switch (port) {
692 case PORT_A:
693 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
694 name = "DPDDC-A";
695 break;
696 case PORT_B:
697 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
698 name = "DPDDC-B";
699 break;
700 case PORT_C:
701 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
702 name = "DPDDC-C";
703 break;
704 case PORT_D:
705 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
706 name = "DPDDC-D";
707 break;
708 default:
709 BUG();
710 }
711
712 if (!HAS_DDI(dev))
713 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
714
715 intel_dp->aux.name = name;
716 intel_dp->aux.dev = dev->dev;
717 intel_dp->aux.transfer = intel_dp_aux_transfer;
718
719 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
720 connector->base.kdev->kobj.name);
721
722 ret = drm_dp_aux_register(&intel_dp->aux);
723 if (ret < 0) {
724 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
725 name, ret);
726 return;
727 }
728
729 ret = sysfs_create_link(&connector->base.kdev->kobj,
730 &intel_dp->aux.ddc.dev.kobj,
731 intel_dp->aux.ddc.dev.kobj.name);
732 if (ret < 0) {
733 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
734 drm_dp_aux_unregister(&intel_dp->aux);
735 }
736 }
737
738 static void
739 intel_dp_connector_unregister(struct intel_connector *intel_connector)
740 {
741 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
742
743 sysfs_remove_link(&intel_connector->base.kdev->kobj,
744 intel_dp->aux.ddc.dev.kobj.name);
745 intel_connector_unregister(intel_connector);
746 }
747
748 static void
749 intel_dp_set_clock(struct intel_encoder *encoder,
750 struct intel_crtc_config *pipe_config, int link_bw)
751 {
752 struct drm_device *dev = encoder->base.dev;
753 const struct dp_link_dpll *divisor = NULL;
754 int i, count = 0;
755
756 if (IS_G4X(dev)) {
757 divisor = gen4_dpll;
758 count = ARRAY_SIZE(gen4_dpll);
759 } else if (IS_HASWELL(dev)) {
760 /* Haswell has special-purpose DP DDI clocks. */
761 } else if (HAS_PCH_SPLIT(dev)) {
762 divisor = pch_dpll;
763 count = ARRAY_SIZE(pch_dpll);
764 } else if (IS_CHERRYVIEW(dev)) {
765 divisor = chv_dpll;
766 count = ARRAY_SIZE(chv_dpll);
767 } else if (IS_VALLEYVIEW(dev)) {
768 divisor = vlv_dpll;
769 count = ARRAY_SIZE(vlv_dpll);
770 }
771
772 if (divisor && count) {
773 for (i = 0; i < count; i++) {
774 if (link_bw == divisor[i].link_bw) {
775 pipe_config->dpll = divisor[i].dpll;
776 pipe_config->clock_set = true;
777 break;
778 }
779 }
780 }
781 }
782
783 static void
784 intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
785 {
786 struct drm_device *dev = crtc->base.dev;
787 struct drm_i915_private *dev_priv = dev->dev_private;
788 enum transcoder transcoder = crtc->config.cpu_transcoder;
789
790 I915_WRITE(PIPE_DATA_M2(transcoder),
791 TU_SIZE(m_n->tu) | m_n->gmch_m);
792 I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
793 I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
794 I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
795 }
796
797 bool
798 intel_dp_compute_config(struct intel_encoder *encoder,
799 struct intel_crtc_config *pipe_config)
800 {
801 struct drm_device *dev = encoder->base.dev;
802 struct drm_i915_private *dev_priv = dev->dev_private;
803 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
804 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
805 enum port port = dp_to_dig_port(intel_dp)->port;
806 struct intel_crtc *intel_crtc = encoder->new_crtc;
807 struct intel_connector *intel_connector = intel_dp->attached_connector;
808 int lane_count, clock;
809 int min_lane_count = 1;
810 int max_lane_count = intel_dp_max_lane_count(intel_dp);
811 /* Conveniently, the link BW constants become indices with a shift...*/
812 int min_clock = 0;
813 int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
814 int bpp, mode_rate;
815 static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
816 int link_avail, link_clock;
817
818 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
819 pipe_config->has_pch_encoder = true;
820
821 pipe_config->has_dp_encoder = true;
822 pipe_config->has_audio = intel_dp->has_audio;
823
824 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
825 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
826 adjusted_mode);
827 if (!HAS_PCH_SPLIT(dev))
828 intel_gmch_panel_fitting(intel_crtc, pipe_config,
829 intel_connector->panel.fitting_mode);
830 else
831 intel_pch_panel_fitting(intel_crtc, pipe_config,
832 intel_connector->panel.fitting_mode);
833 }
834
835 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
836 return false;
837
838 DRM_DEBUG_KMS("DP link computation with max lane count %i "
839 "max bw %02x pixel clock %iKHz\n",
840 max_lane_count, bws[max_clock],
841 adjusted_mode->crtc_clock);
842
843 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
844 * bpc in between. */
845 bpp = pipe_config->pipe_bpp;
846 if (is_edp(intel_dp)) {
847 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
848 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
849 dev_priv->vbt.edp_bpp);
850 bpp = dev_priv->vbt.edp_bpp;
851 }
852
853 if (IS_BROADWELL(dev)) {
854 /* Yes, it's an ugly hack. */
855 min_lane_count = max_lane_count;
856 DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
857 min_lane_count);
858 } else if (dev_priv->vbt.edp_lanes) {
859 min_lane_count = min(dev_priv->vbt.edp_lanes,
860 max_lane_count);
861 DRM_DEBUG_KMS("using min %u lanes per VBT\n",
862 min_lane_count);
863 }
864
865 if (dev_priv->vbt.edp_rate) {
866 min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
867 DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
868 bws[min_clock]);
869 }
870 }
871
872 for (; bpp >= 6*3; bpp -= 2*3) {
873 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
874 bpp);
875
876 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
877 for (clock = min_clock; clock <= max_clock; clock++) {
878 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
879 link_avail = intel_dp_max_data_rate(link_clock,
880 lane_count);
881
882 if (mode_rate <= link_avail) {
883 goto found;
884 }
885 }
886 }
887 }
888
889 return false;
890
891 found:
892 if (intel_dp->color_range_auto) {
893 /*
894 * See:
895 * CEA-861-E - 5.1 Default Encoding Parameters
896 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
897 */
898 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
899 intel_dp->color_range = DP_COLOR_RANGE_16_235;
900 else
901 intel_dp->color_range = 0;
902 }
903
904 if (intel_dp->color_range)
905 pipe_config->limited_color_range = true;
906
907 intel_dp->link_bw = bws[clock];
908 intel_dp->lane_count = lane_count;
909 pipe_config->pipe_bpp = bpp;
910 pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
911
912 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
913 intel_dp->link_bw, intel_dp->lane_count,
914 pipe_config->port_clock, bpp);
915 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
916 mode_rate, link_avail);
917
918 intel_link_compute_m_n(bpp, lane_count,
919 adjusted_mode->crtc_clock,
920 pipe_config->port_clock,
921 &pipe_config->dp_m_n);
922
923 if (intel_connector->panel.downclock_mode != NULL &&
924 intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
925 intel_link_compute_m_n(bpp, lane_count,
926 intel_connector->panel.downclock_mode->clock,
927 pipe_config->port_clock,
928 &pipe_config->dp_m2_n2);
929 }
930
931 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
932
933 return true;
934 }
935
936 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
937 {
938 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
939 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
940 struct drm_device *dev = crtc->base.dev;
941 struct drm_i915_private *dev_priv = dev->dev_private;
942 u32 dpa_ctl;
943
944 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
945 dpa_ctl = I915_READ(DP_A);
946 dpa_ctl &= ~DP_PLL_FREQ_MASK;
947
948 if (crtc->config.port_clock == 162000) {
949 /* For a long time we've carried around a ILK-DevA w/a for the
950 * 160MHz clock. If we're really unlucky, it's still required.
951 */
952 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
953 dpa_ctl |= DP_PLL_FREQ_160MHZ;
954 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
955 } else {
956 dpa_ctl |= DP_PLL_FREQ_270MHZ;
957 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
958 }
959
960 I915_WRITE(DP_A, dpa_ctl);
961
962 POSTING_READ(DP_A);
963 udelay(500);
964 }
965
966 static void intel_dp_prepare(struct intel_encoder *encoder)
967 {
968 struct drm_device *dev = encoder->base.dev;
969 struct drm_i915_private *dev_priv = dev->dev_private;
970 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
971 enum port port = dp_to_dig_port(intel_dp)->port;
972 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
973 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
974
975 /*
976 * There are four kinds of DP registers:
977 *
978 * IBX PCH
979 * SNB CPU
980 * IVB CPU
981 * CPT PCH
982 *
983 * IBX PCH and CPU are the same for almost everything,
984 * except that the CPU DP PLL is configured in this
985 * register
986 *
987 * CPT PCH is quite different, having many bits moved
988 * to the TRANS_DP_CTL register instead. That
989 * configuration happens (oddly) in ironlake_pch_enable
990 */
991
992 /* Preserve the BIOS-computed detected bit. This is
993 * supposed to be read-only.
994 */
995 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
996
997 /* Handle DP bits in common between all three register formats */
998 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
999 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1000
1001 if (crtc->config.has_audio) {
1002 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
1003 pipe_name(crtc->pipe));
1004 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1005 intel_write_eld(&encoder->base, adjusted_mode);
1006 }
1007
1008 /* Split out the IBX/CPU vs CPT settings */
1009
1010 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1011 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1012 intel_dp->DP |= DP_SYNC_HS_HIGH;
1013 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1014 intel_dp->DP |= DP_SYNC_VS_HIGH;
1015 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1016
1017 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1018 intel_dp->DP |= DP_ENHANCED_FRAMING;
1019
1020 intel_dp->DP |= crtc->pipe << 29;
1021 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1022 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1023 intel_dp->DP |= intel_dp->color_range;
1024
1025 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1026 intel_dp->DP |= DP_SYNC_HS_HIGH;
1027 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1028 intel_dp->DP |= DP_SYNC_VS_HIGH;
1029 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1030
1031 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1032 intel_dp->DP |= DP_ENHANCED_FRAMING;
1033
1034 if (!IS_CHERRYVIEW(dev)) {
1035 if (crtc->pipe == 1)
1036 intel_dp->DP |= DP_PIPEB_SELECT;
1037 } else {
1038 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1039 }
1040 } else {
1041 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1042 }
1043 }
1044
1045 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1046 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1047
1048 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1049 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1050
1051 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1052 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1053
1054 static void wait_panel_status(struct intel_dp *intel_dp,
1055 u32 mask,
1056 u32 value)
1057 {
1058 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1059 struct drm_i915_private *dev_priv = dev->dev_private;
1060 u32 pp_stat_reg, pp_ctrl_reg;
1061
1062 pp_stat_reg = _pp_stat_reg(intel_dp);
1063 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1064
1065 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1066 mask, value,
1067 I915_READ(pp_stat_reg),
1068 I915_READ(pp_ctrl_reg));
1069
1070 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1071 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1072 I915_READ(pp_stat_reg),
1073 I915_READ(pp_ctrl_reg));
1074 }
1075
1076 DRM_DEBUG_KMS("Wait complete\n");
1077 }
1078
1079 static void wait_panel_on(struct intel_dp *intel_dp)
1080 {
1081 DRM_DEBUG_KMS("Wait for panel power on\n");
1082 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1083 }
1084
1085 static void wait_panel_off(struct intel_dp *intel_dp)
1086 {
1087 DRM_DEBUG_KMS("Wait for panel power off time\n");
1088 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1089 }
1090
1091 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1092 {
1093 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1094
1095 /* When we disable the VDD override bit last we have to do the manual
1096 * wait. */
1097 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1098 intel_dp->panel_power_cycle_delay);
1099
1100 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1101 }
1102
1103 static void wait_backlight_on(struct intel_dp *intel_dp)
1104 {
1105 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1106 intel_dp->backlight_on_delay);
1107 }
1108
1109 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1110 {
1111 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1112 intel_dp->backlight_off_delay);
1113 }
1114
1115 /* Read the current pp_control value, unlocking the register if it
1116 * is locked
1117 */
1118
1119 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1120 {
1121 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1122 struct drm_i915_private *dev_priv = dev->dev_private;
1123 u32 control;
1124
1125 control = I915_READ(_pp_ctrl_reg(intel_dp));
1126 control &= ~PANEL_UNLOCK_MASK;
1127 control |= PANEL_UNLOCK_REGS;
1128 return control;
1129 }
1130
1131 static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1132 {
1133 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1134 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1135 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1136 struct drm_i915_private *dev_priv = dev->dev_private;
1137 enum intel_display_power_domain power_domain;
1138 u32 pp;
1139 u32 pp_stat_reg, pp_ctrl_reg;
1140 bool need_to_disable = !intel_dp->want_panel_vdd;
1141
1142 if (!is_edp(intel_dp))
1143 return false;
1144
1145 intel_dp->want_panel_vdd = true;
1146
1147 if (edp_have_panel_vdd(intel_dp))
1148 return need_to_disable;
1149
1150 power_domain = intel_display_port_power_domain(intel_encoder);
1151 intel_display_power_get(dev_priv, power_domain);
1152
1153 DRM_DEBUG_KMS("Turning eDP VDD on\n");
1154
1155 if (!edp_have_panel_power(intel_dp))
1156 wait_panel_power_cycle(intel_dp);
1157
1158 pp = ironlake_get_pp_control(intel_dp);
1159 pp |= EDP_FORCE_VDD;
1160
1161 pp_stat_reg = _pp_stat_reg(intel_dp);
1162 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1163
1164 I915_WRITE(pp_ctrl_reg, pp);
1165 POSTING_READ(pp_ctrl_reg);
1166 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1167 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1168 /*
1169 * If the panel wasn't on, delay before accessing aux channel
1170 */
1171 if (!edp_have_panel_power(intel_dp)) {
1172 DRM_DEBUG_KMS("eDP was not running\n");
1173 msleep(intel_dp->panel_power_up_delay);
1174 }
1175
1176 return need_to_disable;
1177 }
1178
1179 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1180 {
1181 if (is_edp(intel_dp)) {
1182 bool vdd = _edp_panel_vdd_on(intel_dp);
1183
1184 WARN(!vdd, "eDP VDD already requested on\n");
1185 }
1186 }
1187
1188 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1189 {
1190 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1191 struct drm_i915_private *dev_priv = dev->dev_private;
1192 u32 pp;
1193 u32 pp_stat_reg, pp_ctrl_reg;
1194
1195 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1196
1197 if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
1198 struct intel_digital_port *intel_dig_port =
1199 dp_to_dig_port(intel_dp);
1200 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1201 enum intel_display_power_domain power_domain;
1202
1203 DRM_DEBUG_KMS("Turning eDP VDD off\n");
1204
1205 pp = ironlake_get_pp_control(intel_dp);
1206 pp &= ~EDP_FORCE_VDD;
1207
1208 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1209 pp_stat_reg = _pp_stat_reg(intel_dp);
1210
1211 I915_WRITE(pp_ctrl_reg, pp);
1212 POSTING_READ(pp_ctrl_reg);
1213
1214 /* Make sure sequencer is idle before allowing subsequent activity */
1215 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1216 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1217
1218 if ((pp & POWER_TARGET_ON) == 0)
1219 intel_dp->last_power_cycle = jiffies;
1220
1221 power_domain = intel_display_port_power_domain(intel_encoder);
1222 intel_display_power_put(dev_priv, power_domain);
1223 }
1224 }
1225
1226 static void edp_panel_vdd_work(struct work_struct *__work)
1227 {
1228 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1229 struct intel_dp, panel_vdd_work);
1230 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1231
1232 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1233 edp_panel_vdd_off_sync(intel_dp);
1234 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1235 }
1236
1237 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1238 {
1239 if (!is_edp(intel_dp))
1240 return;
1241
1242 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1243
1244 intel_dp->want_panel_vdd = false;
1245
1246 if (sync) {
1247 edp_panel_vdd_off_sync(intel_dp);
1248 } else {
1249 /*
1250 * Queue the timer to fire a long
1251 * time from now (relative to the power down delay)
1252 * to keep the panel power up across a sequence of operations
1253 */
1254 schedule_delayed_work(&intel_dp->panel_vdd_work,
1255 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1256 }
1257 }
1258
1259 void intel_edp_panel_on(struct intel_dp *intel_dp)
1260 {
1261 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1262 struct drm_i915_private *dev_priv = dev->dev_private;
1263 u32 pp;
1264 u32 pp_ctrl_reg;
1265
1266 if (!is_edp(intel_dp))
1267 return;
1268
1269 DRM_DEBUG_KMS("Turn eDP power on\n");
1270
1271 if (edp_have_panel_power(intel_dp)) {
1272 DRM_DEBUG_KMS("eDP power already on\n");
1273 return;
1274 }
1275
1276 wait_panel_power_cycle(intel_dp);
1277
1278 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1279 pp = ironlake_get_pp_control(intel_dp);
1280 if (IS_GEN5(dev)) {
1281 /* ILK workaround: disable reset around power sequence */
1282 pp &= ~PANEL_POWER_RESET;
1283 I915_WRITE(pp_ctrl_reg, pp);
1284 POSTING_READ(pp_ctrl_reg);
1285 }
1286
1287 pp |= POWER_TARGET_ON;
1288 if (!IS_GEN5(dev))
1289 pp |= PANEL_POWER_RESET;
1290
1291 I915_WRITE(pp_ctrl_reg, pp);
1292 POSTING_READ(pp_ctrl_reg);
1293
1294 wait_panel_on(intel_dp);
1295 intel_dp->last_power_on = jiffies;
1296
1297 if (IS_GEN5(dev)) {
1298 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1299 I915_WRITE(pp_ctrl_reg, pp);
1300 POSTING_READ(pp_ctrl_reg);
1301 }
1302 }
1303
1304 void intel_edp_panel_off(struct intel_dp *intel_dp)
1305 {
1306 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1307 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1308 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1309 struct drm_i915_private *dev_priv = dev->dev_private;
1310 enum intel_display_power_domain power_domain;
1311 u32 pp;
1312 u32 pp_ctrl_reg;
1313
1314 if (!is_edp(intel_dp))
1315 return;
1316
1317 DRM_DEBUG_KMS("Turn eDP power off\n");
1318
1319 edp_wait_backlight_off(intel_dp);
1320
1321 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1322
1323 pp = ironlake_get_pp_control(intel_dp);
1324 /* We need to switch off panel power _and_ force vdd, for otherwise some
1325 * panels get very unhappy and cease to work. */
1326 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1327 EDP_BLC_ENABLE);
1328
1329 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1330
1331 intel_dp->want_panel_vdd = false;
1332
1333 I915_WRITE(pp_ctrl_reg, pp);
1334 POSTING_READ(pp_ctrl_reg);
1335
1336 intel_dp->last_power_cycle = jiffies;
1337 wait_panel_off(intel_dp);
1338
1339 /* We got a reference when we enabled the VDD. */
1340 power_domain = intel_display_port_power_domain(intel_encoder);
1341 intel_display_power_put(dev_priv, power_domain);
1342 }
1343
1344 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1345 {
1346 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1347 struct drm_device *dev = intel_dig_port->base.base.dev;
1348 struct drm_i915_private *dev_priv = dev->dev_private;
1349 u32 pp;
1350 u32 pp_ctrl_reg;
1351
1352 if (!is_edp(intel_dp))
1353 return;
1354
1355 DRM_DEBUG_KMS("\n");
1356 /*
1357 * If we enable the backlight right away following a panel power
1358 * on, we may see slight flicker as the panel syncs with the eDP
1359 * link. So delay a bit to make sure the image is solid before
1360 * allowing it to appear.
1361 */
1362 wait_backlight_on(intel_dp);
1363 pp = ironlake_get_pp_control(intel_dp);
1364 pp |= EDP_BLC_ENABLE;
1365
1366 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1367
1368 I915_WRITE(pp_ctrl_reg, pp);
1369 POSTING_READ(pp_ctrl_reg);
1370
1371 intel_panel_enable_backlight(intel_dp->attached_connector);
1372 }
1373
1374 void intel_edp_backlight_off(struct intel_dp *intel_dp)
1375 {
1376 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1377 struct drm_i915_private *dev_priv = dev->dev_private;
1378 u32 pp;
1379 u32 pp_ctrl_reg;
1380
1381 if (!is_edp(intel_dp))
1382 return;
1383
1384 intel_panel_disable_backlight(intel_dp->attached_connector);
1385
1386 DRM_DEBUG_KMS("\n");
1387 pp = ironlake_get_pp_control(intel_dp);
1388 pp &= ~EDP_BLC_ENABLE;
1389
1390 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1391
1392 I915_WRITE(pp_ctrl_reg, pp);
1393 POSTING_READ(pp_ctrl_reg);
1394 intel_dp->last_backlight_off = jiffies;
1395 }
1396
1397 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1398 {
1399 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1400 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1401 struct drm_device *dev = crtc->dev;
1402 struct drm_i915_private *dev_priv = dev->dev_private;
1403 u32 dpa_ctl;
1404
1405 assert_pipe_disabled(dev_priv,
1406 to_intel_crtc(crtc)->pipe);
1407
1408 DRM_DEBUG_KMS("\n");
1409 dpa_ctl = I915_READ(DP_A);
1410 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1411 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1412
1413 /* We don't adjust intel_dp->DP while tearing down the link, to
1414 * facilitate link retraining (e.g. after hotplug). Hence clear all
1415 * enable bits here to ensure that we don't enable too much. */
1416 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1417 intel_dp->DP |= DP_PLL_ENABLE;
1418 I915_WRITE(DP_A, intel_dp->DP);
1419 POSTING_READ(DP_A);
1420 udelay(200);
1421 }
1422
1423 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1424 {
1425 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1426 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1427 struct drm_device *dev = crtc->dev;
1428 struct drm_i915_private *dev_priv = dev->dev_private;
1429 u32 dpa_ctl;
1430
1431 assert_pipe_disabled(dev_priv,
1432 to_intel_crtc(crtc)->pipe);
1433
1434 dpa_ctl = I915_READ(DP_A);
1435 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1436 "dp pll off, should be on\n");
1437 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1438
1439 /* We can't rely on the value tracked for the DP register in
1440 * intel_dp->DP because link_down must not change that (otherwise link
1441 * re-training will fail. */
1442 dpa_ctl &= ~DP_PLL_ENABLE;
1443 I915_WRITE(DP_A, dpa_ctl);
1444 POSTING_READ(DP_A);
1445 udelay(200);
1446 }
1447
1448 /* If the sink supports it, try to set the power state appropriately */
1449 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1450 {
1451 int ret, i;
1452
1453 /* Should have a valid DPCD by this point */
1454 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1455 return;
1456
1457 if (mode != DRM_MODE_DPMS_ON) {
1458 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1459 DP_SET_POWER_D3);
1460 if (ret != 1)
1461 DRM_DEBUG_DRIVER("failed to write sink power state\n");
1462 } else {
1463 /*
1464 * When turning on, we need to retry for 1ms to give the sink
1465 * time to wake up.
1466 */
1467 for (i = 0; i < 3; i++) {
1468 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1469 DP_SET_POWER_D0);
1470 if (ret == 1)
1471 break;
1472 msleep(1);
1473 }
1474 }
1475 }
1476
1477 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1478 enum pipe *pipe)
1479 {
1480 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1481 enum port port = dp_to_dig_port(intel_dp)->port;
1482 struct drm_device *dev = encoder->base.dev;
1483 struct drm_i915_private *dev_priv = dev->dev_private;
1484 enum intel_display_power_domain power_domain;
1485 u32 tmp;
1486
1487 power_domain = intel_display_port_power_domain(encoder);
1488 if (!intel_display_power_enabled(dev_priv, power_domain))
1489 return false;
1490
1491 tmp = I915_READ(intel_dp->output_reg);
1492
1493 if (!(tmp & DP_PORT_EN))
1494 return false;
1495
1496 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1497 *pipe = PORT_TO_PIPE_CPT(tmp);
1498 } else if (IS_CHERRYVIEW(dev)) {
1499 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
1500 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1501 *pipe = PORT_TO_PIPE(tmp);
1502 } else {
1503 u32 trans_sel;
1504 u32 trans_dp;
1505 int i;
1506
1507 switch (intel_dp->output_reg) {
1508 case PCH_DP_B:
1509 trans_sel = TRANS_DP_PORT_SEL_B;
1510 break;
1511 case PCH_DP_C:
1512 trans_sel = TRANS_DP_PORT_SEL_C;
1513 break;
1514 case PCH_DP_D:
1515 trans_sel = TRANS_DP_PORT_SEL_D;
1516 break;
1517 default:
1518 return true;
1519 }
1520
1521 for_each_pipe(i) {
1522 trans_dp = I915_READ(TRANS_DP_CTL(i));
1523 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1524 *pipe = i;
1525 return true;
1526 }
1527 }
1528
1529 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1530 intel_dp->output_reg);
1531 }
1532
1533 return true;
1534 }
1535
1536 static void intel_dp_get_config(struct intel_encoder *encoder,
1537 struct intel_crtc_config *pipe_config)
1538 {
1539 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1540 u32 tmp, flags = 0;
1541 struct drm_device *dev = encoder->base.dev;
1542 struct drm_i915_private *dev_priv = dev->dev_private;
1543 enum port port = dp_to_dig_port(intel_dp)->port;
1544 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1545 int dotclock;
1546
1547 tmp = I915_READ(intel_dp->output_reg);
1548 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
1549 pipe_config->has_audio = true;
1550
1551 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1552 if (tmp & DP_SYNC_HS_HIGH)
1553 flags |= DRM_MODE_FLAG_PHSYNC;
1554 else
1555 flags |= DRM_MODE_FLAG_NHSYNC;
1556
1557 if (tmp & DP_SYNC_VS_HIGH)
1558 flags |= DRM_MODE_FLAG_PVSYNC;
1559 else
1560 flags |= DRM_MODE_FLAG_NVSYNC;
1561 } else {
1562 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1563 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1564 flags |= DRM_MODE_FLAG_PHSYNC;
1565 else
1566 flags |= DRM_MODE_FLAG_NHSYNC;
1567
1568 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1569 flags |= DRM_MODE_FLAG_PVSYNC;
1570 else
1571 flags |= DRM_MODE_FLAG_NVSYNC;
1572 }
1573
1574 pipe_config->adjusted_mode.flags |= flags;
1575
1576 pipe_config->has_dp_encoder = true;
1577
1578 intel_dp_get_m_n(crtc, pipe_config);
1579
1580 if (port == PORT_A) {
1581 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1582 pipe_config->port_clock = 162000;
1583 else
1584 pipe_config->port_clock = 270000;
1585 }
1586
1587 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1588 &pipe_config->dp_m_n);
1589
1590 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1591 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1592
1593 pipe_config->adjusted_mode.crtc_clock = dotclock;
1594
1595 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1596 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1597 /*
1598 * This is a big fat ugly hack.
1599 *
1600 * Some machines in UEFI boot mode provide us a VBT that has 18
1601 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
1602 * unknown we fail to light up. Yet the same BIOS boots up with
1603 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
1604 * max, not what it tells us to use.
1605 *
1606 * Note: This will still be broken if the eDP panel is not lit
1607 * up by the BIOS, and thus we can't get the mode at module
1608 * load.
1609 */
1610 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
1611 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1612 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1613 }
1614 }
1615
1616 static bool is_edp_psr(struct drm_device *dev)
1617 {
1618 struct drm_i915_private *dev_priv = dev->dev_private;
1619
1620 return dev_priv->psr.sink_support;
1621 }
1622
1623 static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1624 {
1625 struct drm_i915_private *dev_priv = dev->dev_private;
1626
1627 if (!HAS_PSR(dev))
1628 return false;
1629
1630 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1631 }
1632
1633 static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1634 struct edp_vsc_psr *vsc_psr)
1635 {
1636 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1637 struct drm_device *dev = dig_port->base.base.dev;
1638 struct drm_i915_private *dev_priv = dev->dev_private;
1639 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1640 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1641 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1642 uint32_t *data = (uint32_t *) vsc_psr;
1643 unsigned int i;
1644
1645 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
1646 the video DIP being updated before program video DIP data buffer
1647 registers for DIP being updated. */
1648 I915_WRITE(ctl_reg, 0);
1649 POSTING_READ(ctl_reg);
1650
1651 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1652 if (i < sizeof(struct edp_vsc_psr))
1653 I915_WRITE(data_reg + i, *data++);
1654 else
1655 I915_WRITE(data_reg + i, 0);
1656 }
1657
1658 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1659 POSTING_READ(ctl_reg);
1660 }
1661
1662 static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1663 {
1664 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1665 struct drm_i915_private *dev_priv = dev->dev_private;
1666 struct edp_vsc_psr psr_vsc;
1667
1668 if (intel_dp->psr_setup_done)
1669 return;
1670
1671 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1672 memset(&psr_vsc, 0, sizeof(psr_vsc));
1673 psr_vsc.sdp_header.HB0 = 0;
1674 psr_vsc.sdp_header.HB1 = 0x7;
1675 psr_vsc.sdp_header.HB2 = 0x2;
1676 psr_vsc.sdp_header.HB3 = 0x8;
1677 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1678
1679 /* Avoid continuous PSR exit by masking memup and hpd */
1680 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1681 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1682
1683 intel_dp->psr_setup_done = true;
1684 }
1685
1686 static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1687 {
1688 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1689 struct drm_i915_private *dev_priv = dev->dev_private;
1690 uint32_t aux_clock_divider;
1691 int precharge = 0x3;
1692 int msg_size = 5; /* Header(4) + Message(1) */
1693
1694 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
1695
1696 /* Enable PSR in sink */
1697 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
1698 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1699 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
1700 else
1701 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1702 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
1703
1704 /* Setup AUX registers */
1705 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1706 I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1707 I915_WRITE(EDP_PSR_AUX_CTL(dev),
1708 DP_AUX_CH_CTL_TIME_OUT_400us |
1709 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1710 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1711 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
1712 }
1713
1714 static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1715 {
1716 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1717 struct drm_i915_private *dev_priv = dev->dev_private;
1718 uint32_t max_sleep_time = 0x1f;
1719 uint32_t idle_frames = 1;
1720 uint32_t val = 0x0;
1721 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
1722
1723 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
1724 val |= EDP_PSR_LINK_STANDBY;
1725 val |= EDP_PSR_TP2_TP3_TIME_0us;
1726 val |= EDP_PSR_TP1_TIME_0us;
1727 val |= EDP_PSR_SKIP_AUX_EXIT;
1728 } else
1729 val |= EDP_PSR_LINK_DISABLE;
1730
1731 I915_WRITE(EDP_PSR_CTL(dev), val |
1732 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
1733 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1734 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1735 EDP_PSR_ENABLE);
1736 }
1737
1738 static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1739 {
1740 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1741 struct drm_device *dev = dig_port->base.base.dev;
1742 struct drm_i915_private *dev_priv = dev->dev_private;
1743 struct drm_crtc *crtc = dig_port->base.base.crtc;
1744 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1745 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj;
1746 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1747
1748 dev_priv->psr.source_ok = false;
1749
1750 if (!HAS_PSR(dev)) {
1751 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1752 return false;
1753 }
1754
1755 if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1756 (dig_port->port != PORT_A)) {
1757 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1758 return false;
1759 }
1760
1761 if (!i915.enable_psr) {
1762 DRM_DEBUG_KMS("PSR disable by flag\n");
1763 return false;
1764 }
1765
1766 crtc = dig_port->base.base.crtc;
1767 if (crtc == NULL) {
1768 DRM_DEBUG_KMS("crtc not active for PSR\n");
1769 return false;
1770 }
1771
1772 intel_crtc = to_intel_crtc(crtc);
1773 if (!intel_crtc_active(crtc)) {
1774 DRM_DEBUG_KMS("crtc not active for PSR\n");
1775 return false;
1776 }
1777
1778 obj = to_intel_framebuffer(crtc->primary->fb)->obj;
1779 if (obj->tiling_mode != I915_TILING_X ||
1780 obj->fence_reg == I915_FENCE_REG_NONE) {
1781 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1782 return false;
1783 }
1784
1785 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1786 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1787 return false;
1788 }
1789
1790 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1791 S3D_ENABLE) {
1792 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1793 return false;
1794 }
1795
1796 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1797 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1798 return false;
1799 }
1800
1801 dev_priv->psr.source_ok = true;
1802 return true;
1803 }
1804
1805 static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1806 {
1807 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1808
1809 if (!intel_edp_psr_match_conditions(intel_dp) ||
1810 intel_edp_is_psr_enabled(dev))
1811 return;
1812
1813 /* Setup PSR once */
1814 intel_edp_psr_setup(intel_dp);
1815
1816 /* Enable PSR on the panel */
1817 intel_edp_psr_enable_sink(intel_dp);
1818
1819 /* Enable PSR on the host */
1820 intel_edp_psr_enable_source(intel_dp);
1821 }
1822
1823 void intel_edp_psr_enable(struct intel_dp *intel_dp)
1824 {
1825 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1826
1827 if (intel_edp_psr_match_conditions(intel_dp) &&
1828 !intel_edp_is_psr_enabled(dev))
1829 intel_edp_psr_do_enable(intel_dp);
1830 }
1831
1832 void intel_edp_psr_disable(struct intel_dp *intel_dp)
1833 {
1834 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1835 struct drm_i915_private *dev_priv = dev->dev_private;
1836
1837 if (!intel_edp_is_psr_enabled(dev))
1838 return;
1839
1840 I915_WRITE(EDP_PSR_CTL(dev),
1841 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
1842
1843 /* Wait till PSR is idle */
1844 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1845 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1846 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1847 }
1848
1849 void intel_edp_psr_update(struct drm_device *dev)
1850 {
1851 struct intel_encoder *encoder;
1852 struct intel_dp *intel_dp = NULL;
1853
1854 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
1855 if (encoder->type == INTEL_OUTPUT_EDP) {
1856 intel_dp = enc_to_intel_dp(&encoder->base);
1857
1858 if (!is_edp_psr(dev))
1859 return;
1860
1861 if (!intel_edp_psr_match_conditions(intel_dp))
1862 intel_edp_psr_disable(intel_dp);
1863 else
1864 if (!intel_edp_is_psr_enabled(dev))
1865 intel_edp_psr_do_enable(intel_dp);
1866 }
1867 }
1868
1869 static void intel_disable_dp(struct intel_encoder *encoder)
1870 {
1871 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1872 enum port port = dp_to_dig_port(intel_dp)->port;
1873 struct drm_device *dev = encoder->base.dev;
1874
1875 /* Make sure the panel is off before trying to change the mode. But also
1876 * ensure that we have vdd while we switch off the panel. */
1877 intel_edp_panel_vdd_on(intel_dp);
1878 intel_edp_backlight_off(intel_dp);
1879 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1880 intel_edp_panel_off(intel_dp);
1881
1882 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1883 if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
1884 intel_dp_link_down(intel_dp);
1885 }
1886
1887 static void g4x_post_disable_dp(struct intel_encoder *encoder)
1888 {
1889 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1890 enum port port = dp_to_dig_port(intel_dp)->port;
1891
1892 if (port != PORT_A)
1893 return;
1894
1895 intel_dp_link_down(intel_dp);
1896 ironlake_edp_pll_off(intel_dp);
1897 }
1898
1899 static void vlv_post_disable_dp(struct intel_encoder *encoder)
1900 {
1901 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1902
1903 intel_dp_link_down(intel_dp);
1904 }
1905
1906 static void chv_post_disable_dp(struct intel_encoder *encoder)
1907 {
1908 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1909 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1910 struct drm_device *dev = encoder->base.dev;
1911 struct drm_i915_private *dev_priv = dev->dev_private;
1912 struct intel_crtc *intel_crtc =
1913 to_intel_crtc(encoder->base.crtc);
1914 enum dpio_channel ch = vlv_dport_to_channel(dport);
1915 enum pipe pipe = intel_crtc->pipe;
1916 u32 val;
1917
1918 intel_dp_link_down(intel_dp);
1919
1920 mutex_lock(&dev_priv->dpio_lock);
1921
1922 /* Propagate soft reset to data lane reset */
1923 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
1924 val |= CHV_PCS_REQ_SOFTRESET_EN;
1925 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
1926
1927 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
1928 val |= CHV_PCS_REQ_SOFTRESET_EN;
1929 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
1930
1931 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
1932 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1933 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
1934
1935 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
1936 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1937 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
1938
1939 mutex_unlock(&dev_priv->dpio_lock);
1940 }
1941
1942 static void intel_enable_dp(struct intel_encoder *encoder)
1943 {
1944 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1945 struct drm_device *dev = encoder->base.dev;
1946 struct drm_i915_private *dev_priv = dev->dev_private;
1947 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1948
1949 if (WARN_ON(dp_reg & DP_PORT_EN))
1950 return;
1951
1952 intel_edp_panel_vdd_on(intel_dp);
1953 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1954 intel_dp_start_link_train(intel_dp);
1955 intel_edp_panel_on(intel_dp);
1956 edp_panel_vdd_off(intel_dp, true);
1957 intel_dp_complete_link_train(intel_dp);
1958 intel_dp_stop_link_train(intel_dp);
1959 }
1960
1961 static void g4x_enable_dp(struct intel_encoder *encoder)
1962 {
1963 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1964
1965 intel_enable_dp(encoder);
1966 intel_edp_backlight_on(intel_dp);
1967 }
1968
1969 static void vlv_enable_dp(struct intel_encoder *encoder)
1970 {
1971 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1972
1973 intel_edp_backlight_on(intel_dp);
1974 }
1975
1976 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
1977 {
1978 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1979 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1980
1981 intel_dp_prepare(encoder);
1982
1983 /* Only ilk+ has port A */
1984 if (dport->port == PORT_A) {
1985 ironlake_set_pll_cpu_edp(intel_dp);
1986 ironlake_edp_pll_on(intel_dp);
1987 }
1988 }
1989
1990 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
1991 {
1992 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1993 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1994 struct drm_device *dev = encoder->base.dev;
1995 struct drm_i915_private *dev_priv = dev->dev_private;
1996 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1997 enum dpio_channel port = vlv_dport_to_channel(dport);
1998 int pipe = intel_crtc->pipe;
1999 struct edp_power_seq power_seq;
2000 u32 val;
2001
2002 mutex_lock(&dev_priv->dpio_lock);
2003
2004 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2005 val = 0;
2006 if (pipe)
2007 val |= (1<<21);
2008 else
2009 val &= ~(1<<21);
2010 val |= 0x001000c4;
2011 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2012 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2013 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2014
2015 mutex_unlock(&dev_priv->dpio_lock);
2016
2017 if (is_edp(intel_dp)) {
2018 /* init power sequencer on this pipe and port */
2019 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2020 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2021 &power_seq);
2022 }
2023
2024 intel_enable_dp(encoder);
2025
2026 vlv_wait_port_ready(dev_priv, dport);
2027 }
2028
2029 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2030 {
2031 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2032 struct drm_device *dev = encoder->base.dev;
2033 struct drm_i915_private *dev_priv = dev->dev_private;
2034 struct intel_crtc *intel_crtc =
2035 to_intel_crtc(encoder->base.crtc);
2036 enum dpio_channel port = vlv_dport_to_channel(dport);
2037 int pipe = intel_crtc->pipe;
2038
2039 intel_dp_prepare(encoder);
2040
2041 /* Program Tx lane resets to default */
2042 mutex_lock(&dev_priv->dpio_lock);
2043 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2044 DPIO_PCS_TX_LANE2_RESET |
2045 DPIO_PCS_TX_LANE1_RESET);
2046 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2047 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2048 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2049 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2050 DPIO_PCS_CLK_SOFT_RESET);
2051
2052 /* Fix up inter-pair skew failure */
2053 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2054 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2055 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2056 mutex_unlock(&dev_priv->dpio_lock);
2057 }
2058
2059 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2060 {
2061 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2062 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2063 struct drm_device *dev = encoder->base.dev;
2064 struct drm_i915_private *dev_priv = dev->dev_private;
2065 struct edp_power_seq power_seq;
2066 struct intel_crtc *intel_crtc =
2067 to_intel_crtc(encoder->base.crtc);
2068 enum dpio_channel ch = vlv_dport_to_channel(dport);
2069 int pipe = intel_crtc->pipe;
2070 int data, i;
2071 u32 val;
2072
2073 mutex_lock(&dev_priv->dpio_lock);
2074
2075 /* Deassert soft data lane reset*/
2076 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2077 val |= CHV_PCS_REQ_SOFTRESET_EN;
2078 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2079
2080 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2081 val |= CHV_PCS_REQ_SOFTRESET_EN;
2082 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2083
2084 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2085 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2086 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2087
2088 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2089 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2090 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2091
2092 /* Program Tx lane latency optimal setting*/
2093 for (i = 0; i < 4; i++) {
2094 /* Set the latency optimal bit */
2095 data = (i == 1) ? 0x0 : 0x6;
2096 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2097 data << DPIO_FRC_LATENCY_SHFIT);
2098
2099 /* Set the upar bit */
2100 data = (i == 1) ? 0x0 : 0x1;
2101 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2102 data << DPIO_UPAR_SHIFT);
2103 }
2104
2105 /* Data lane stagger programming */
2106 /* FIXME: Fix up value only after power analysis */
2107
2108 mutex_unlock(&dev_priv->dpio_lock);
2109
2110 if (is_edp(intel_dp)) {
2111 /* init power sequencer on this pipe and port */
2112 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2113 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2114 &power_seq);
2115 }
2116
2117 intel_enable_dp(encoder);
2118
2119 vlv_wait_port_ready(dev_priv, dport);
2120 }
2121
2122 /*
2123 * Native read with retry for link status and receiver capability reads for
2124 * cases where the sink may still be asleep.
2125 *
2126 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2127 * supposed to retry 3 times per the spec.
2128 */
2129 static ssize_t
2130 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2131 void *buffer, size_t size)
2132 {
2133 ssize_t ret;
2134 int i;
2135
2136 for (i = 0; i < 3; i++) {
2137 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2138 if (ret == size)
2139 return ret;
2140 msleep(1);
2141 }
2142
2143 return ret;
2144 }
2145
2146 /*
2147 * Fetch AUX CH registers 0x202 - 0x207 which contain
2148 * link status information
2149 */
2150 static bool
2151 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2152 {
2153 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2154 DP_LANE0_1_STATUS,
2155 link_status,
2156 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2157 }
2158
2159 /*
2160 * These are source-specific values; current Intel hardware supports
2161 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
2162 */
2163
2164 static uint8_t
2165 intel_dp_voltage_max(struct intel_dp *intel_dp)
2166 {
2167 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2168 enum port port = dp_to_dig_port(intel_dp)->port;
2169
2170 if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
2171 return DP_TRAIN_VOLTAGE_SWING_1200;
2172 else if (IS_GEN7(dev) && port == PORT_A)
2173 return DP_TRAIN_VOLTAGE_SWING_800;
2174 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2175 return DP_TRAIN_VOLTAGE_SWING_1200;
2176 else
2177 return DP_TRAIN_VOLTAGE_SWING_800;
2178 }
2179
2180 static uint8_t
2181 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2182 {
2183 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2184 enum port port = dp_to_dig_port(intel_dp)->port;
2185
2186 if (IS_BROADWELL(dev)) {
2187 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2188 case DP_TRAIN_VOLTAGE_SWING_400:
2189 case DP_TRAIN_VOLTAGE_SWING_600:
2190 return DP_TRAIN_PRE_EMPHASIS_6;
2191 case DP_TRAIN_VOLTAGE_SWING_800:
2192 return DP_TRAIN_PRE_EMPHASIS_3_5;
2193 case DP_TRAIN_VOLTAGE_SWING_1200:
2194 default:
2195 return DP_TRAIN_PRE_EMPHASIS_0;
2196 }
2197 } else if (IS_HASWELL(dev)) {
2198 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2199 case DP_TRAIN_VOLTAGE_SWING_400:
2200 return DP_TRAIN_PRE_EMPHASIS_9_5;
2201 case DP_TRAIN_VOLTAGE_SWING_600:
2202 return DP_TRAIN_PRE_EMPHASIS_6;
2203 case DP_TRAIN_VOLTAGE_SWING_800:
2204 return DP_TRAIN_PRE_EMPHASIS_3_5;
2205 case DP_TRAIN_VOLTAGE_SWING_1200:
2206 default:
2207 return DP_TRAIN_PRE_EMPHASIS_0;
2208 }
2209 } else if (IS_VALLEYVIEW(dev)) {
2210 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2211 case DP_TRAIN_VOLTAGE_SWING_400:
2212 return DP_TRAIN_PRE_EMPHASIS_9_5;
2213 case DP_TRAIN_VOLTAGE_SWING_600:
2214 return DP_TRAIN_PRE_EMPHASIS_6;
2215 case DP_TRAIN_VOLTAGE_SWING_800:
2216 return DP_TRAIN_PRE_EMPHASIS_3_5;
2217 case DP_TRAIN_VOLTAGE_SWING_1200:
2218 default:
2219 return DP_TRAIN_PRE_EMPHASIS_0;
2220 }
2221 } else if (IS_GEN7(dev) && port == PORT_A) {
2222 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2223 case DP_TRAIN_VOLTAGE_SWING_400:
2224 return DP_TRAIN_PRE_EMPHASIS_6;
2225 case DP_TRAIN_VOLTAGE_SWING_600:
2226 case DP_TRAIN_VOLTAGE_SWING_800:
2227 return DP_TRAIN_PRE_EMPHASIS_3_5;
2228 default:
2229 return DP_TRAIN_PRE_EMPHASIS_0;
2230 }
2231 } else {
2232 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2233 case DP_TRAIN_VOLTAGE_SWING_400:
2234 return DP_TRAIN_PRE_EMPHASIS_6;
2235 case DP_TRAIN_VOLTAGE_SWING_600:
2236 return DP_TRAIN_PRE_EMPHASIS_6;
2237 case DP_TRAIN_VOLTAGE_SWING_800:
2238 return DP_TRAIN_PRE_EMPHASIS_3_5;
2239 case DP_TRAIN_VOLTAGE_SWING_1200:
2240 default:
2241 return DP_TRAIN_PRE_EMPHASIS_0;
2242 }
2243 }
2244 }
2245
2246 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2247 {
2248 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2249 struct drm_i915_private *dev_priv = dev->dev_private;
2250 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2251 struct intel_crtc *intel_crtc =
2252 to_intel_crtc(dport->base.base.crtc);
2253 unsigned long demph_reg_value, preemph_reg_value,
2254 uniqtranscale_reg_value;
2255 uint8_t train_set = intel_dp->train_set[0];
2256 enum dpio_channel port = vlv_dport_to_channel(dport);
2257 int pipe = intel_crtc->pipe;
2258
2259 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2260 case DP_TRAIN_PRE_EMPHASIS_0:
2261 preemph_reg_value = 0x0004000;
2262 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2263 case DP_TRAIN_VOLTAGE_SWING_400:
2264 demph_reg_value = 0x2B405555;
2265 uniqtranscale_reg_value = 0x552AB83A;
2266 break;
2267 case DP_TRAIN_VOLTAGE_SWING_600:
2268 demph_reg_value = 0x2B404040;
2269 uniqtranscale_reg_value = 0x5548B83A;
2270 break;
2271 case DP_TRAIN_VOLTAGE_SWING_800:
2272 demph_reg_value = 0x2B245555;
2273 uniqtranscale_reg_value = 0x5560B83A;
2274 break;
2275 case DP_TRAIN_VOLTAGE_SWING_1200:
2276 demph_reg_value = 0x2B405555;
2277 uniqtranscale_reg_value = 0x5598DA3A;
2278 break;
2279 default:
2280 return 0;
2281 }
2282 break;
2283 case DP_TRAIN_PRE_EMPHASIS_3_5:
2284 preemph_reg_value = 0x0002000;
2285 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2286 case DP_TRAIN_VOLTAGE_SWING_400:
2287 demph_reg_value = 0x2B404040;
2288 uniqtranscale_reg_value = 0x5552B83A;
2289 break;
2290 case DP_TRAIN_VOLTAGE_SWING_600:
2291 demph_reg_value = 0x2B404848;
2292 uniqtranscale_reg_value = 0x5580B83A;
2293 break;
2294 case DP_TRAIN_VOLTAGE_SWING_800:
2295 demph_reg_value = 0x2B404040;
2296 uniqtranscale_reg_value = 0x55ADDA3A;
2297 break;
2298 default:
2299 return 0;
2300 }
2301 break;
2302 case DP_TRAIN_PRE_EMPHASIS_6:
2303 preemph_reg_value = 0x0000000;
2304 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2305 case DP_TRAIN_VOLTAGE_SWING_400:
2306 demph_reg_value = 0x2B305555;
2307 uniqtranscale_reg_value = 0x5570B83A;
2308 break;
2309 case DP_TRAIN_VOLTAGE_SWING_600:
2310 demph_reg_value = 0x2B2B4040;
2311 uniqtranscale_reg_value = 0x55ADDA3A;
2312 break;
2313 default:
2314 return 0;
2315 }
2316 break;
2317 case DP_TRAIN_PRE_EMPHASIS_9_5:
2318 preemph_reg_value = 0x0006000;
2319 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2320 case DP_TRAIN_VOLTAGE_SWING_400:
2321 demph_reg_value = 0x1B405555;
2322 uniqtranscale_reg_value = 0x55ADDA3A;
2323 break;
2324 default:
2325 return 0;
2326 }
2327 break;
2328 default:
2329 return 0;
2330 }
2331
2332 mutex_lock(&dev_priv->dpio_lock);
2333 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2334 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2335 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
2336 uniqtranscale_reg_value);
2337 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2338 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2339 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2340 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
2341 mutex_unlock(&dev_priv->dpio_lock);
2342
2343 return 0;
2344 }
2345
2346 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2347 {
2348 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2349 struct drm_i915_private *dev_priv = dev->dev_private;
2350 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2351 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
2352 u32 deemph_reg_value, margin_reg_value, val;
2353 uint8_t train_set = intel_dp->train_set[0];
2354 enum dpio_channel ch = vlv_dport_to_channel(dport);
2355 enum pipe pipe = intel_crtc->pipe;
2356 int i;
2357
2358 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2359 case DP_TRAIN_PRE_EMPHASIS_0:
2360 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2361 case DP_TRAIN_VOLTAGE_SWING_400:
2362 deemph_reg_value = 128;
2363 margin_reg_value = 52;
2364 break;
2365 case DP_TRAIN_VOLTAGE_SWING_600:
2366 deemph_reg_value = 128;
2367 margin_reg_value = 77;
2368 break;
2369 case DP_TRAIN_VOLTAGE_SWING_800:
2370 deemph_reg_value = 128;
2371 margin_reg_value = 102;
2372 break;
2373 case DP_TRAIN_VOLTAGE_SWING_1200:
2374 deemph_reg_value = 128;
2375 margin_reg_value = 154;
2376 /* FIXME extra to set for 1200 */
2377 break;
2378 default:
2379 return 0;
2380 }
2381 break;
2382 case DP_TRAIN_PRE_EMPHASIS_3_5:
2383 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2384 case DP_TRAIN_VOLTAGE_SWING_400:
2385 deemph_reg_value = 85;
2386 margin_reg_value = 78;
2387 break;
2388 case DP_TRAIN_VOLTAGE_SWING_600:
2389 deemph_reg_value = 85;
2390 margin_reg_value = 116;
2391 break;
2392 case DP_TRAIN_VOLTAGE_SWING_800:
2393 deemph_reg_value = 85;
2394 margin_reg_value = 154;
2395 break;
2396 default:
2397 return 0;
2398 }
2399 break;
2400 case DP_TRAIN_PRE_EMPHASIS_6:
2401 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2402 case DP_TRAIN_VOLTAGE_SWING_400:
2403 deemph_reg_value = 64;
2404 margin_reg_value = 104;
2405 break;
2406 case DP_TRAIN_VOLTAGE_SWING_600:
2407 deemph_reg_value = 64;
2408 margin_reg_value = 154;
2409 break;
2410 default:
2411 return 0;
2412 }
2413 break;
2414 case DP_TRAIN_PRE_EMPHASIS_9_5:
2415 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2416 case DP_TRAIN_VOLTAGE_SWING_400:
2417 deemph_reg_value = 43;
2418 margin_reg_value = 154;
2419 break;
2420 default:
2421 return 0;
2422 }
2423 break;
2424 default:
2425 return 0;
2426 }
2427
2428 mutex_lock(&dev_priv->dpio_lock);
2429
2430 /* Clear calc init */
2431 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2432 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2433 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2434
2435 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2436 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2437 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2438
2439 /* Program swing deemph */
2440 for (i = 0; i < 4; i++) {
2441 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
2442 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
2443 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
2444 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
2445 }
2446
2447 /* Program swing margin */
2448 for (i = 0; i < 4; i++) {
2449 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2450 val &= ~DPIO_SWING_MARGIN_MASK;
2451 val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
2452 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2453 }
2454
2455 /* Disable unique transition scale */
2456 for (i = 0; i < 4; i++) {
2457 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
2458 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
2459 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
2460 }
2461
2462 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
2463 == DP_TRAIN_PRE_EMPHASIS_0) &&
2464 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
2465 == DP_TRAIN_VOLTAGE_SWING_1200)) {
2466
2467 /*
2468 * The document said it needs to set bit 27 for ch0 and bit 26
2469 * for ch1. Might be a typo in the doc.
2470 * For now, for this unique transition scale selection, set bit
2471 * 27 for ch0 and ch1.
2472 */
2473 for (i = 0; i < 4; i++) {
2474 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
2475 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
2476 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
2477 }
2478
2479 for (i = 0; i < 4; i++) {
2480 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2481 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2482 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2483 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2484 }
2485 }
2486
2487 /* Start swing calculation */
2488 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2489 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2490 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2491
2492 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2493 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2494 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2495
2496 /* LRC Bypass */
2497 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
2498 val |= DPIO_LRC_BYPASS;
2499 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
2500
2501 mutex_unlock(&dev_priv->dpio_lock);
2502
2503 return 0;
2504 }
2505
2506 static void
2507 intel_get_adjust_train(struct intel_dp *intel_dp,
2508 const uint8_t link_status[DP_LINK_STATUS_SIZE])
2509 {
2510 uint8_t v = 0;
2511 uint8_t p = 0;
2512 int lane;
2513 uint8_t voltage_max;
2514 uint8_t preemph_max;
2515
2516 for (lane = 0; lane < intel_dp->lane_count; lane++) {
2517 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
2518 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
2519
2520 if (this_v > v)
2521 v = this_v;
2522 if (this_p > p)
2523 p = this_p;
2524 }
2525
2526 voltage_max = intel_dp_voltage_max(intel_dp);
2527 if (v >= voltage_max)
2528 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
2529
2530 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
2531 if (p >= preemph_max)
2532 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
2533
2534 for (lane = 0; lane < 4; lane++)
2535 intel_dp->train_set[lane] = v | p;
2536 }
2537
2538 static uint32_t
2539 intel_gen4_signal_levels(uint8_t train_set)
2540 {
2541 uint32_t signal_levels = 0;
2542
2543 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2544 case DP_TRAIN_VOLTAGE_SWING_400:
2545 default:
2546 signal_levels |= DP_VOLTAGE_0_4;
2547 break;
2548 case DP_TRAIN_VOLTAGE_SWING_600:
2549 signal_levels |= DP_VOLTAGE_0_6;
2550 break;
2551 case DP_TRAIN_VOLTAGE_SWING_800:
2552 signal_levels |= DP_VOLTAGE_0_8;
2553 break;
2554 case DP_TRAIN_VOLTAGE_SWING_1200:
2555 signal_levels |= DP_VOLTAGE_1_2;
2556 break;
2557 }
2558 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2559 case DP_TRAIN_PRE_EMPHASIS_0:
2560 default:
2561 signal_levels |= DP_PRE_EMPHASIS_0;
2562 break;
2563 case DP_TRAIN_PRE_EMPHASIS_3_5:
2564 signal_levels |= DP_PRE_EMPHASIS_3_5;
2565 break;
2566 case DP_TRAIN_PRE_EMPHASIS_6:
2567 signal_levels |= DP_PRE_EMPHASIS_6;
2568 break;
2569 case DP_TRAIN_PRE_EMPHASIS_9_5:
2570 signal_levels |= DP_PRE_EMPHASIS_9_5;
2571 break;
2572 }
2573 return signal_levels;
2574 }
2575
2576 /* Gen6's DP voltage swing and pre-emphasis control */
2577 static uint32_t
2578 intel_gen6_edp_signal_levels(uint8_t train_set)
2579 {
2580 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2581 DP_TRAIN_PRE_EMPHASIS_MASK);
2582 switch (signal_levels) {
2583 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2584 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2585 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2586 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2587 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
2588 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2589 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2590 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
2591 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2592 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2593 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
2594 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2595 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2596 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
2597 default:
2598 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2599 "0x%x\n", signal_levels);
2600 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2601 }
2602 }
2603
2604 /* Gen7's DP voltage swing and pre-emphasis control */
2605 static uint32_t
2606 intel_gen7_edp_signal_levels(uint8_t train_set)
2607 {
2608 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2609 DP_TRAIN_PRE_EMPHASIS_MASK);
2610 switch (signal_levels) {
2611 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2612 return EDP_LINK_TRAIN_400MV_0DB_IVB;
2613 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2614 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
2615 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2616 return EDP_LINK_TRAIN_400MV_6DB_IVB;
2617
2618 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2619 return EDP_LINK_TRAIN_600MV_0DB_IVB;
2620 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2621 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
2622
2623 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2624 return EDP_LINK_TRAIN_800MV_0DB_IVB;
2625 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2626 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
2627
2628 default:
2629 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2630 "0x%x\n", signal_levels);
2631 return EDP_LINK_TRAIN_500MV_0DB_IVB;
2632 }
2633 }
2634
2635 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
2636 static uint32_t
2637 intel_hsw_signal_levels(uint8_t train_set)
2638 {
2639 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2640 DP_TRAIN_PRE_EMPHASIS_MASK);
2641 switch (signal_levels) {
2642 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2643 return DDI_BUF_EMP_400MV_0DB_HSW;
2644 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2645 return DDI_BUF_EMP_400MV_3_5DB_HSW;
2646 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2647 return DDI_BUF_EMP_400MV_6DB_HSW;
2648 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
2649 return DDI_BUF_EMP_400MV_9_5DB_HSW;
2650
2651 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2652 return DDI_BUF_EMP_600MV_0DB_HSW;
2653 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2654 return DDI_BUF_EMP_600MV_3_5DB_HSW;
2655 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2656 return DDI_BUF_EMP_600MV_6DB_HSW;
2657
2658 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2659 return DDI_BUF_EMP_800MV_0DB_HSW;
2660 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2661 return DDI_BUF_EMP_800MV_3_5DB_HSW;
2662 default:
2663 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2664 "0x%x\n", signal_levels);
2665 return DDI_BUF_EMP_400MV_0DB_HSW;
2666 }
2667 }
2668
2669 static uint32_t
2670 intel_bdw_signal_levels(uint8_t train_set)
2671 {
2672 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2673 DP_TRAIN_PRE_EMPHASIS_MASK);
2674 switch (signal_levels) {
2675 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2676 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2677 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2678 return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
2679 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2680 return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
2681
2682 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2683 return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
2684 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2685 return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
2686 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2687 return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
2688
2689 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2690 return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
2691 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2692 return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
2693
2694 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2695 return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
2696
2697 default:
2698 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2699 "0x%x\n", signal_levels);
2700 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2701 }
2702 }
2703
2704 /* Properly updates "DP" with the correct signal levels. */
2705 static void
2706 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2707 {
2708 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2709 enum port port = intel_dig_port->port;
2710 struct drm_device *dev = intel_dig_port->base.base.dev;
2711 uint32_t signal_levels, mask;
2712 uint8_t train_set = intel_dp->train_set[0];
2713
2714 if (IS_BROADWELL(dev)) {
2715 signal_levels = intel_bdw_signal_levels(train_set);
2716 mask = DDI_BUF_EMP_MASK;
2717 } else if (IS_HASWELL(dev)) {
2718 signal_levels = intel_hsw_signal_levels(train_set);
2719 mask = DDI_BUF_EMP_MASK;
2720 } else if (IS_CHERRYVIEW(dev)) {
2721 signal_levels = intel_chv_signal_levels(intel_dp);
2722 mask = 0;
2723 } else if (IS_VALLEYVIEW(dev)) {
2724 signal_levels = intel_vlv_signal_levels(intel_dp);
2725 mask = 0;
2726 } else if (IS_GEN7(dev) && port == PORT_A) {
2727 signal_levels = intel_gen7_edp_signal_levels(train_set);
2728 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
2729 } else if (IS_GEN6(dev) && port == PORT_A) {
2730 signal_levels = intel_gen6_edp_signal_levels(train_set);
2731 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
2732 } else {
2733 signal_levels = intel_gen4_signal_levels(train_set);
2734 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
2735 }
2736
2737 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
2738
2739 *DP = (*DP & ~mask) | signal_levels;
2740 }
2741
2742 static bool
2743 intel_dp_set_link_train(struct intel_dp *intel_dp,
2744 uint32_t *DP,
2745 uint8_t dp_train_pat)
2746 {
2747 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2748 struct drm_device *dev = intel_dig_port->base.base.dev;
2749 struct drm_i915_private *dev_priv = dev->dev_private;
2750 enum port port = intel_dig_port->port;
2751 uint8_t buf[sizeof(intel_dp->train_set) + 1];
2752 int ret, len;
2753
2754 if (HAS_DDI(dev)) {
2755 uint32_t temp = I915_READ(DP_TP_CTL(port));
2756
2757 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2758 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2759 else
2760 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2761
2762 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2763 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2764 case DP_TRAINING_PATTERN_DISABLE:
2765 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2766
2767 break;
2768 case DP_TRAINING_PATTERN_1:
2769 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2770 break;
2771 case DP_TRAINING_PATTERN_2:
2772 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2773 break;
2774 case DP_TRAINING_PATTERN_3:
2775 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2776 break;
2777 }
2778 I915_WRITE(DP_TP_CTL(port), temp);
2779
2780 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2781 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2782
2783 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2784 case DP_TRAINING_PATTERN_DISABLE:
2785 *DP |= DP_LINK_TRAIN_OFF_CPT;
2786 break;
2787 case DP_TRAINING_PATTERN_1:
2788 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2789 break;
2790 case DP_TRAINING_PATTERN_2:
2791 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2792 break;
2793 case DP_TRAINING_PATTERN_3:
2794 DRM_ERROR("DP training pattern 3 not supported\n");
2795 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2796 break;
2797 }
2798
2799 } else {
2800 *DP &= ~DP_LINK_TRAIN_MASK;
2801
2802 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2803 case DP_TRAINING_PATTERN_DISABLE:
2804 *DP |= DP_LINK_TRAIN_OFF;
2805 break;
2806 case DP_TRAINING_PATTERN_1:
2807 *DP |= DP_LINK_TRAIN_PAT_1;
2808 break;
2809 case DP_TRAINING_PATTERN_2:
2810 *DP |= DP_LINK_TRAIN_PAT_2;
2811 break;
2812 case DP_TRAINING_PATTERN_3:
2813 DRM_ERROR("DP training pattern 3 not supported\n");
2814 *DP |= DP_LINK_TRAIN_PAT_2;
2815 break;
2816 }
2817 }
2818
2819 I915_WRITE(intel_dp->output_reg, *DP);
2820 POSTING_READ(intel_dp->output_reg);
2821
2822 buf[0] = dp_train_pat;
2823 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
2824 DP_TRAINING_PATTERN_DISABLE) {
2825 /* don't write DP_TRAINING_LANEx_SET on disable */
2826 len = 1;
2827 } else {
2828 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
2829 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
2830 len = intel_dp->lane_count + 1;
2831 }
2832
2833 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
2834 buf, len);
2835
2836 return ret == len;
2837 }
2838
2839 static bool
2840 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2841 uint8_t dp_train_pat)
2842 {
2843 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
2844 intel_dp_set_signal_levels(intel_dp, DP);
2845 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2846 }
2847
2848 static bool
2849 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2850 const uint8_t link_status[DP_LINK_STATUS_SIZE])
2851 {
2852 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2853 struct drm_device *dev = intel_dig_port->base.base.dev;
2854 struct drm_i915_private *dev_priv = dev->dev_private;
2855 int ret;
2856
2857 intel_get_adjust_train(intel_dp, link_status);
2858 intel_dp_set_signal_levels(intel_dp, DP);
2859
2860 I915_WRITE(intel_dp->output_reg, *DP);
2861 POSTING_READ(intel_dp->output_reg);
2862
2863 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
2864 intel_dp->train_set, intel_dp->lane_count);
2865
2866 return ret == intel_dp->lane_count;
2867 }
2868
2869 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2870 {
2871 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2872 struct drm_device *dev = intel_dig_port->base.base.dev;
2873 struct drm_i915_private *dev_priv = dev->dev_private;
2874 enum port port = intel_dig_port->port;
2875 uint32_t val;
2876
2877 if (!HAS_DDI(dev))
2878 return;
2879
2880 val = I915_READ(DP_TP_CTL(port));
2881 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2882 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
2883 I915_WRITE(DP_TP_CTL(port), val);
2884
2885 /*
2886 * On PORT_A we can have only eDP in SST mode. There the only reason
2887 * we need to set idle transmission mode is to work around a HW issue
2888 * where we enable the pipe while not in idle link-training mode.
2889 * In this case there is requirement to wait for a minimum number of
2890 * idle patterns to be sent.
2891 */
2892 if (port == PORT_A)
2893 return;
2894
2895 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
2896 1))
2897 DRM_ERROR("Timed out waiting for DP idle patterns\n");
2898 }
2899
2900 /* Enable corresponding port and start training pattern 1 */
2901 void
2902 intel_dp_start_link_train(struct intel_dp *intel_dp)
2903 {
2904 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
2905 struct drm_device *dev = encoder->dev;
2906 int i;
2907 uint8_t voltage;
2908 int voltage_tries, loop_tries;
2909 uint32_t DP = intel_dp->DP;
2910 uint8_t link_config[2];
2911
2912 if (HAS_DDI(dev))
2913 intel_ddi_prepare_link_retrain(encoder);
2914
2915 /* Write the link configuration data */
2916 link_config[0] = intel_dp->link_bw;
2917 link_config[1] = intel_dp->lane_count;
2918 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2919 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
2920 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
2921
2922 link_config[0] = 0;
2923 link_config[1] = DP_SET_ANSI_8B10B;
2924 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
2925
2926 DP |= DP_PORT_EN;
2927
2928 /* clock recovery */
2929 if (!intel_dp_reset_link_train(intel_dp, &DP,
2930 DP_TRAINING_PATTERN_1 |
2931 DP_LINK_SCRAMBLING_DISABLE)) {
2932 DRM_ERROR("failed to enable link training\n");
2933 return;
2934 }
2935
2936 voltage = 0xff;
2937 voltage_tries = 0;
2938 loop_tries = 0;
2939 for (;;) {
2940 uint8_t link_status[DP_LINK_STATUS_SIZE];
2941
2942 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2943 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2944 DRM_ERROR("failed to get link status\n");
2945 break;
2946 }
2947
2948 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2949 DRM_DEBUG_KMS("clock recovery OK\n");
2950 break;
2951 }
2952
2953 /* Check to see if we've tried the max voltage */
2954 for (i = 0; i < intel_dp->lane_count; i++)
2955 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
2956 break;
2957 if (i == intel_dp->lane_count) {
2958 ++loop_tries;
2959 if (loop_tries == 5) {
2960 DRM_ERROR("too many full retries, give up\n");
2961 break;
2962 }
2963 intel_dp_reset_link_train(intel_dp, &DP,
2964 DP_TRAINING_PATTERN_1 |
2965 DP_LINK_SCRAMBLING_DISABLE);
2966 voltage_tries = 0;
2967 continue;
2968 }
2969
2970 /* Check to see if we've tried the same voltage 5 times */
2971 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2972 ++voltage_tries;
2973 if (voltage_tries == 5) {
2974 DRM_ERROR("too many voltage retries, give up\n");
2975 break;
2976 }
2977 } else
2978 voltage_tries = 0;
2979 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
2980
2981 /* Update training set as requested by target */
2982 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2983 DRM_ERROR("failed to update link training\n");
2984 break;
2985 }
2986 }
2987
2988 intel_dp->DP = DP;
2989 }
2990
2991 void
2992 intel_dp_complete_link_train(struct intel_dp *intel_dp)
2993 {
2994 bool channel_eq = false;
2995 int tries, cr_tries;
2996 uint32_t DP = intel_dp->DP;
2997 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
2998
2999 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3000 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3001 training_pattern = DP_TRAINING_PATTERN_3;
3002
3003 /* channel equalization */
3004 if (!intel_dp_set_link_train(intel_dp, &DP,
3005 training_pattern |
3006 DP_LINK_SCRAMBLING_DISABLE)) {
3007 DRM_ERROR("failed to start channel equalization\n");
3008 return;
3009 }
3010
3011 tries = 0;
3012 cr_tries = 0;
3013 channel_eq = false;
3014 for (;;) {
3015 uint8_t link_status[DP_LINK_STATUS_SIZE];
3016
3017 if (cr_tries > 5) {
3018 DRM_ERROR("failed to train DP, aborting\n");
3019 break;
3020 }
3021
3022 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3023 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3024 DRM_ERROR("failed to get link status\n");
3025 break;
3026 }
3027
3028 /* Make sure clock is still ok */
3029 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3030 intel_dp_start_link_train(intel_dp);
3031 intel_dp_set_link_train(intel_dp, &DP,
3032 training_pattern |
3033 DP_LINK_SCRAMBLING_DISABLE);
3034 cr_tries++;
3035 continue;
3036 }
3037
3038 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3039 channel_eq = true;
3040 break;
3041 }
3042
3043 /* Try 5 times, then try clock recovery if that fails */
3044 if (tries > 5) {
3045 intel_dp_link_down(intel_dp);
3046 intel_dp_start_link_train(intel_dp);
3047 intel_dp_set_link_train(intel_dp, &DP,
3048 training_pattern |
3049 DP_LINK_SCRAMBLING_DISABLE);
3050 tries = 0;
3051 cr_tries++;
3052 continue;
3053 }
3054
3055 /* Update training set as requested by target */
3056 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3057 DRM_ERROR("failed to update link training\n");
3058 break;
3059 }
3060 ++tries;
3061 }
3062
3063 intel_dp_set_idle_link_train(intel_dp);
3064
3065 intel_dp->DP = DP;
3066
3067 if (channel_eq)
3068 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3069
3070 }
3071
3072 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3073 {
3074 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3075 DP_TRAINING_PATTERN_DISABLE);
3076 }
3077
3078 static void
3079 intel_dp_link_down(struct intel_dp *intel_dp)
3080 {
3081 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3082 enum port port = intel_dig_port->port;
3083 struct drm_device *dev = intel_dig_port->base.base.dev;
3084 struct drm_i915_private *dev_priv = dev->dev_private;
3085 struct intel_crtc *intel_crtc =
3086 to_intel_crtc(intel_dig_port->base.base.crtc);
3087 uint32_t DP = intel_dp->DP;
3088
3089 if (WARN_ON(HAS_DDI(dev)))
3090 return;
3091
3092 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3093 return;
3094
3095 DRM_DEBUG_KMS("\n");
3096
3097 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3098 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3099 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3100 } else {
3101 DP &= ~DP_LINK_TRAIN_MASK;
3102 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3103 }
3104 POSTING_READ(intel_dp->output_reg);
3105
3106 if (HAS_PCH_IBX(dev) &&
3107 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3108 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
3109
3110 /* Hardware workaround: leaving our transcoder select
3111 * set to transcoder B while it's off will prevent the
3112 * corresponding HDMI output on transcoder A.
3113 *
3114 * Combine this with another hardware workaround:
3115 * transcoder select bit can only be cleared while the
3116 * port is enabled.
3117 */
3118 DP &= ~DP_PIPEB_SELECT;
3119 I915_WRITE(intel_dp->output_reg, DP);
3120
3121 /* Changes to enable or select take place the vblank
3122 * after being written.
3123 */
3124 if (WARN_ON(crtc == NULL)) {
3125 /* We should never try to disable a port without a crtc
3126 * attached. For paranoia keep the code around for a
3127 * bit. */
3128 POSTING_READ(intel_dp->output_reg);
3129 msleep(50);
3130 } else
3131 intel_wait_for_vblank(dev, intel_crtc->pipe);
3132 }
3133
3134 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3135 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3136 POSTING_READ(intel_dp->output_reg);
3137 msleep(intel_dp->panel_power_down_delay);
3138 }
3139
3140 static bool
3141 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3142 {
3143 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3144 struct drm_device *dev = dig_port->base.base.dev;
3145 struct drm_i915_private *dev_priv = dev->dev_private;
3146
3147 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
3148
3149 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3150 sizeof(intel_dp->dpcd)) < 0)
3151 return false; /* aux transfer failed */
3152
3153 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
3154 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
3155 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
3156
3157 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3158 return false; /* DPCD not present */
3159
3160 /* Check if the panel supports PSR */
3161 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3162 if (is_edp(intel_dp)) {
3163 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3164 intel_dp->psr_dpcd,
3165 sizeof(intel_dp->psr_dpcd));
3166 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3167 dev_priv->psr.sink_support = true;
3168 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3169 }
3170 }
3171
3172 /* Training Pattern 3 support */
3173 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3174 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
3175 intel_dp->use_tps3 = true;
3176 DRM_DEBUG_KMS("Displayport TPS3 supported");
3177 } else
3178 intel_dp->use_tps3 = false;
3179
3180 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3181 DP_DWN_STRM_PORT_PRESENT))
3182 return true; /* native DP sink */
3183
3184 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3185 return true; /* no per-port downstream info */
3186
3187 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3188 intel_dp->downstream_ports,
3189 DP_MAX_DOWNSTREAM_PORTS) < 0)
3190 return false; /* downstream port status fetch failed */
3191
3192 return true;
3193 }
3194
3195 static void
3196 intel_dp_probe_oui(struct intel_dp *intel_dp)
3197 {
3198 u8 buf[3];
3199
3200 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3201 return;
3202
3203 intel_edp_panel_vdd_on(intel_dp);
3204
3205 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3206 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3207 buf[0], buf[1], buf[2]);
3208
3209 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3210 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3211 buf[0], buf[1], buf[2]);
3212
3213 edp_panel_vdd_off(intel_dp, false);
3214 }
3215
3216 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3217 {
3218 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3219 struct drm_device *dev = intel_dig_port->base.base.dev;
3220 struct intel_crtc *intel_crtc =
3221 to_intel_crtc(intel_dig_port->base.base.crtc);
3222 u8 buf[1];
3223
3224 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
3225 return -EAGAIN;
3226
3227 if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
3228 return -ENOTTY;
3229
3230 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3231 DP_TEST_SINK_START) < 0)
3232 return -EAGAIN;
3233
3234 /* Wait 2 vblanks to be sure we will have the correct CRC value */
3235 intel_wait_for_vblank(dev, intel_crtc->pipe);
3236 intel_wait_for_vblank(dev, intel_crtc->pipe);
3237
3238 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3239 return -EAGAIN;
3240
3241 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
3242 return 0;
3243 }
3244
3245 static bool
3246 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3247 {
3248 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3249 DP_DEVICE_SERVICE_IRQ_VECTOR,
3250 sink_irq_vector, 1) == 1;
3251 }
3252
3253 static void
3254 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3255 {
3256 /* NAK by default */
3257 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3258 }
3259
3260 /*
3261 * According to DP spec
3262 * 5.1.2:
3263 * 1. Read DPCD
3264 * 2. Configure link according to Receiver Capabilities
3265 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
3266 * 4. Check link status on receipt of hot-plug interrupt
3267 */
3268
3269 void
3270 intel_dp_check_link_status(struct intel_dp *intel_dp)
3271 {
3272 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3273 u8 sink_irq_vector;
3274 u8 link_status[DP_LINK_STATUS_SIZE];
3275
3276 /* FIXME: This access isn't protected by any locks. */
3277 if (!intel_encoder->connectors_active)
3278 return;
3279
3280 if (WARN_ON(!intel_encoder->base.crtc))
3281 return;
3282
3283 /* Try to read receiver status if the link appears to be up */
3284 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3285 return;
3286 }
3287
3288 /* Now read the DPCD to see if it's actually running */
3289 if (!intel_dp_get_dpcd(intel_dp)) {
3290 return;
3291 }
3292
3293 /* Try to read the source of the interrupt */
3294 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3295 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3296 /* Clear interrupt source */
3297 drm_dp_dpcd_writeb(&intel_dp->aux,
3298 DP_DEVICE_SERVICE_IRQ_VECTOR,
3299 sink_irq_vector);
3300
3301 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3302 intel_dp_handle_test_request(intel_dp);
3303 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3304 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3305 }
3306
3307 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3308 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
3309 intel_encoder->base.name);
3310 intel_dp_start_link_train(intel_dp);
3311 intel_dp_complete_link_train(intel_dp);
3312 intel_dp_stop_link_train(intel_dp);
3313 }
3314 }
3315
3316 /* XXX this is probably wrong for multiple downstream ports */
3317 static enum drm_connector_status
3318 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
3319 {
3320 uint8_t *dpcd = intel_dp->dpcd;
3321 uint8_t type;
3322
3323 if (!intel_dp_get_dpcd(intel_dp))
3324 return connector_status_disconnected;
3325
3326 /* if there's no downstream port, we're done */
3327 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
3328 return connector_status_connected;
3329
3330 /* If we're HPD-aware, SINK_COUNT changes dynamically */
3331 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3332 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
3333 uint8_t reg;
3334
3335 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
3336 &reg, 1) < 0)
3337 return connector_status_unknown;
3338
3339 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
3340 : connector_status_disconnected;
3341 }
3342
3343 /* If no HPD, poke DDC gently */
3344 if (drm_probe_ddc(&intel_dp->aux.ddc))
3345 return connector_status_connected;
3346
3347 /* Well we tried, say unknown for unreliable port types */
3348 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
3349 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
3350 if (type == DP_DS_PORT_TYPE_VGA ||
3351 type == DP_DS_PORT_TYPE_NON_EDID)
3352 return connector_status_unknown;
3353 } else {
3354 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3355 DP_DWN_STRM_PORT_TYPE_MASK;
3356 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
3357 type == DP_DWN_STRM_PORT_TYPE_OTHER)
3358 return connector_status_unknown;
3359 }
3360
3361 /* Anything else is out of spec, warn and ignore */
3362 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
3363 return connector_status_disconnected;
3364 }
3365
3366 static enum drm_connector_status
3367 ironlake_dp_detect(struct intel_dp *intel_dp)
3368 {
3369 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3370 struct drm_i915_private *dev_priv = dev->dev_private;
3371 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3372 enum drm_connector_status status;
3373
3374 /* Can't disconnect eDP, but you can close the lid... */
3375 if (is_edp(intel_dp)) {
3376 status = intel_panel_detect(dev);
3377 if (status == connector_status_unknown)
3378 status = connector_status_connected;
3379 return status;
3380 }
3381
3382 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
3383 return connector_status_disconnected;
3384
3385 return intel_dp_detect_dpcd(intel_dp);
3386 }
3387
3388 static enum drm_connector_status
3389 g4x_dp_detect(struct intel_dp *intel_dp)
3390 {
3391 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3392 struct drm_i915_private *dev_priv = dev->dev_private;
3393 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3394 uint32_t bit;
3395
3396 /* Can't disconnect eDP, but you can close the lid... */
3397 if (is_edp(intel_dp)) {
3398 enum drm_connector_status status;
3399
3400 status = intel_panel_detect(dev);
3401 if (status == connector_status_unknown)
3402 status = connector_status_connected;
3403 return status;
3404 }
3405
3406 if (IS_VALLEYVIEW(dev)) {
3407 switch (intel_dig_port->port) {
3408 case PORT_B:
3409 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
3410 break;
3411 case PORT_C:
3412 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
3413 break;
3414 case PORT_D:
3415 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
3416 break;
3417 default:
3418 return connector_status_unknown;
3419 }
3420 } else {
3421 switch (intel_dig_port->port) {
3422 case PORT_B:
3423 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
3424 break;
3425 case PORT_C:
3426 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
3427 break;
3428 case PORT_D:
3429 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
3430 break;
3431 default:
3432 return connector_status_unknown;
3433 }
3434 }
3435
3436 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
3437 return connector_status_disconnected;
3438
3439 return intel_dp_detect_dpcd(intel_dp);
3440 }
3441
3442 static struct edid *
3443 intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
3444 {
3445 struct intel_connector *intel_connector = to_intel_connector(connector);
3446
3447 /* use cached edid if we have one */
3448 if (intel_connector->edid) {
3449 /* invalid edid */
3450 if (IS_ERR(intel_connector->edid))
3451 return NULL;
3452
3453 return drm_edid_duplicate(intel_connector->edid);
3454 }
3455
3456 return drm_get_edid(connector, adapter);
3457 }
3458
3459 static int
3460 intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
3461 {
3462 struct intel_connector *intel_connector = to_intel_connector(connector);
3463
3464 /* use cached edid if we have one */
3465 if (intel_connector->edid) {
3466 /* invalid edid */
3467 if (IS_ERR(intel_connector->edid))
3468 return 0;
3469
3470 return intel_connector_update_modes(connector,
3471 intel_connector->edid);
3472 }
3473
3474 return intel_ddc_get_modes(connector, adapter);
3475 }
3476
3477 static enum drm_connector_status
3478 intel_dp_detect(struct drm_connector *connector, bool force)
3479 {
3480 struct intel_dp *intel_dp = intel_attached_dp(connector);
3481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3482 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3483 struct drm_device *dev = connector->dev;
3484 struct drm_i915_private *dev_priv = dev->dev_private;
3485 enum drm_connector_status status;
3486 enum intel_display_power_domain power_domain;
3487 struct edid *edid = NULL;
3488
3489 intel_runtime_pm_get(dev_priv);
3490
3491 power_domain = intel_display_port_power_domain(intel_encoder);
3492 intel_display_power_get(dev_priv, power_domain);
3493
3494 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3495 connector->base.id, connector->name);
3496
3497 intel_dp->has_audio = false;
3498
3499 if (HAS_PCH_SPLIT(dev))
3500 status = ironlake_dp_detect(intel_dp);
3501 else
3502 status = g4x_dp_detect(intel_dp);
3503
3504 if (status != connector_status_connected)
3505 goto out;
3506
3507 intel_dp_probe_oui(intel_dp);
3508
3509 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
3510 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
3511 } else {
3512 edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3513 if (edid) {
3514 intel_dp->has_audio = drm_detect_monitor_audio(edid);
3515 kfree(edid);
3516 }
3517 }
3518
3519 if (intel_encoder->type != INTEL_OUTPUT_EDP)
3520 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3521 status = connector_status_connected;
3522
3523 out:
3524 intel_display_power_put(dev_priv, power_domain);
3525
3526 intel_runtime_pm_put(dev_priv);
3527
3528 return status;
3529 }
3530
3531 static int intel_dp_get_modes(struct drm_connector *connector)
3532 {
3533 struct intel_dp *intel_dp = intel_attached_dp(connector);
3534 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3535 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3536 struct intel_connector *intel_connector = to_intel_connector(connector);
3537 struct drm_device *dev = connector->dev;
3538 struct drm_i915_private *dev_priv = dev->dev_private;
3539 enum intel_display_power_domain power_domain;
3540 int ret;
3541
3542 /* We should parse the EDID data and find out if it has an audio sink
3543 */
3544
3545 power_domain = intel_display_port_power_domain(intel_encoder);
3546 intel_display_power_get(dev_priv, power_domain);
3547
3548 ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc);
3549 intel_display_power_put(dev_priv, power_domain);
3550 if (ret)
3551 return ret;
3552
3553 /* if eDP has no EDID, fall back to fixed mode */
3554 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
3555 struct drm_display_mode *mode;
3556 mode = drm_mode_duplicate(dev,
3557 intel_connector->panel.fixed_mode);
3558 if (mode) {
3559 drm_mode_probed_add(connector, mode);
3560 return 1;
3561 }
3562 }
3563 return 0;
3564 }
3565
3566 static bool
3567 intel_dp_detect_audio(struct drm_connector *connector)
3568 {
3569 struct intel_dp *intel_dp = intel_attached_dp(connector);
3570 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3571 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3572 struct drm_device *dev = connector->dev;
3573 struct drm_i915_private *dev_priv = dev->dev_private;
3574 enum intel_display_power_domain power_domain;
3575 struct edid *edid;
3576 bool has_audio = false;
3577
3578 power_domain = intel_display_port_power_domain(intel_encoder);
3579 intel_display_power_get(dev_priv, power_domain);
3580
3581 edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3582 if (edid) {
3583 has_audio = drm_detect_monitor_audio(edid);
3584 kfree(edid);
3585 }
3586
3587 intel_display_power_put(dev_priv, power_domain);
3588
3589 return has_audio;
3590 }
3591
3592 static int
3593 intel_dp_set_property(struct drm_connector *connector,
3594 struct drm_property *property,
3595 uint64_t val)
3596 {
3597 struct drm_i915_private *dev_priv = connector->dev->dev_private;
3598 struct intel_connector *intel_connector = to_intel_connector(connector);
3599 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
3600 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3601 int ret;
3602
3603 ret = drm_object_property_set_value(&connector->base, property, val);
3604 if (ret)
3605 return ret;
3606
3607 if (property == dev_priv->force_audio_property) {
3608 int i = val;
3609 bool has_audio;
3610
3611 if (i == intel_dp->force_audio)
3612 return 0;
3613
3614 intel_dp->force_audio = i;
3615
3616 if (i == HDMI_AUDIO_AUTO)
3617 has_audio = intel_dp_detect_audio(connector);
3618 else
3619 has_audio = (i == HDMI_AUDIO_ON);
3620
3621 if (has_audio == intel_dp->has_audio)
3622 return 0;
3623
3624 intel_dp->has_audio = has_audio;
3625 goto done;
3626 }
3627
3628 if (property == dev_priv->broadcast_rgb_property) {
3629 bool old_auto = intel_dp->color_range_auto;
3630 uint32_t old_range = intel_dp->color_range;
3631
3632 switch (val) {
3633 case INTEL_BROADCAST_RGB_AUTO:
3634 intel_dp->color_range_auto = true;
3635 break;
3636 case INTEL_BROADCAST_RGB_FULL:
3637 intel_dp->color_range_auto = false;
3638 intel_dp->color_range = 0;
3639 break;
3640 case INTEL_BROADCAST_RGB_LIMITED:
3641 intel_dp->color_range_auto = false;
3642 intel_dp->color_range = DP_COLOR_RANGE_16_235;
3643 break;
3644 default:
3645 return -EINVAL;
3646 }
3647
3648 if (old_auto == intel_dp->color_range_auto &&
3649 old_range == intel_dp->color_range)
3650 return 0;
3651
3652 goto done;
3653 }
3654
3655 if (is_edp(intel_dp) &&
3656 property == connector->dev->mode_config.scaling_mode_property) {
3657 if (val == DRM_MODE_SCALE_NONE) {
3658 DRM_DEBUG_KMS("no scaling not supported\n");
3659 return -EINVAL;
3660 }
3661
3662 if (intel_connector->panel.fitting_mode == val) {
3663 /* the eDP scaling property is not changed */
3664 return 0;
3665 }
3666 intel_connector->panel.fitting_mode = val;
3667
3668 goto done;
3669 }
3670
3671 return -EINVAL;
3672
3673 done:
3674 if (intel_encoder->base.crtc)
3675 intel_crtc_restore_mode(intel_encoder->base.crtc);
3676
3677 return 0;
3678 }
3679
3680 static void
3681 intel_dp_connector_destroy(struct drm_connector *connector)
3682 {
3683 struct intel_connector *intel_connector = to_intel_connector(connector);
3684
3685 if (!IS_ERR_OR_NULL(intel_connector->edid))
3686 kfree(intel_connector->edid);
3687
3688 /* Can't call is_edp() since the encoder may have been destroyed
3689 * already. */
3690 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3691 intel_panel_fini(&intel_connector->panel);
3692
3693 drm_connector_cleanup(connector);
3694 kfree(connector);
3695 }
3696
3697 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
3698 {
3699 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
3700 struct intel_dp *intel_dp = &intel_dig_port->dp;
3701 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3702
3703 drm_dp_aux_unregister(&intel_dp->aux);
3704 drm_encoder_cleanup(encoder);
3705 if (is_edp(intel_dp)) {
3706 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3707 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
3708 edp_panel_vdd_off_sync(intel_dp);
3709 drm_modeset_unlock(&dev->mode_config.connection_mutex);
3710 }
3711 kfree(intel_dig_port);
3712 }
3713
3714 static const struct drm_connector_funcs intel_dp_connector_funcs = {
3715 .dpms = intel_connector_dpms,
3716 .detect = intel_dp_detect,
3717 .fill_modes = drm_helper_probe_single_connector_modes,
3718 .set_property = intel_dp_set_property,
3719 .destroy = intel_dp_connector_destroy,
3720 };
3721
3722 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
3723 .get_modes = intel_dp_get_modes,
3724 .mode_valid = intel_dp_mode_valid,
3725 .best_encoder = intel_best_encoder,
3726 };
3727
3728 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
3729 .destroy = intel_dp_encoder_destroy,
3730 };
3731
3732 static void
3733 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
3734 {
3735 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3736
3737 intel_dp_check_link_status(intel_dp);
3738 }
3739
3740 /* Return which DP Port should be selected for Transcoder DP control */
3741 int
3742 intel_trans_dp_port_sel(struct drm_crtc *crtc)
3743 {
3744 struct drm_device *dev = crtc->dev;
3745 struct intel_encoder *intel_encoder;
3746 struct intel_dp *intel_dp;
3747
3748 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
3749 intel_dp = enc_to_intel_dp(&intel_encoder->base);
3750
3751 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
3752 intel_encoder->type == INTEL_OUTPUT_EDP)
3753 return intel_dp->output_reg;
3754 }
3755
3756 return -1;
3757 }
3758
3759 /* check the VBT to see whether the eDP is on DP-D port */
3760 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
3761 {
3762 struct drm_i915_private *dev_priv = dev->dev_private;
3763 union child_device_config *p_child;
3764 int i;
3765 static const short port_mapping[] = {
3766 [PORT_B] = PORT_IDPB,
3767 [PORT_C] = PORT_IDPC,
3768 [PORT_D] = PORT_IDPD,
3769 };
3770
3771 if (port == PORT_A)
3772 return true;
3773
3774 if (!dev_priv->vbt.child_dev_num)
3775 return false;
3776
3777 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3778 p_child = dev_priv->vbt.child_dev + i;
3779
3780 if (p_child->common.dvo_port == port_mapping[port] &&
3781 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3782 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
3783 return true;
3784 }
3785 return false;
3786 }
3787
3788 static void
3789 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
3790 {
3791 struct intel_connector *intel_connector = to_intel_connector(connector);
3792
3793 intel_attach_force_audio_property(connector);
3794 intel_attach_broadcast_rgb_property(connector);
3795 intel_dp->color_range_auto = true;
3796
3797 if (is_edp(intel_dp)) {
3798 drm_mode_create_scaling_mode_property(connector->dev);
3799 drm_object_attach_property(
3800 &connector->base,
3801 connector->dev->mode_config.scaling_mode_property,
3802 DRM_MODE_SCALE_ASPECT);
3803 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
3804 }
3805 }
3806
3807 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
3808 {
3809 intel_dp->last_power_cycle = jiffies;
3810 intel_dp->last_power_on = jiffies;
3811 intel_dp->last_backlight_off = jiffies;
3812 }
3813
3814 static void
3815 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
3816 struct intel_dp *intel_dp,
3817 struct edp_power_seq *out)
3818 {
3819 struct drm_i915_private *dev_priv = dev->dev_private;
3820 struct edp_power_seq cur, vbt, spec, final;
3821 u32 pp_on, pp_off, pp_div, pp;
3822 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
3823
3824 if (HAS_PCH_SPLIT(dev)) {
3825 pp_ctrl_reg = PCH_PP_CONTROL;
3826 pp_on_reg = PCH_PP_ON_DELAYS;
3827 pp_off_reg = PCH_PP_OFF_DELAYS;
3828 pp_div_reg = PCH_PP_DIVISOR;
3829 } else {
3830 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3831
3832 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
3833 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3834 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3835 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3836 }
3837
3838 /* Workaround: Need to write PP_CONTROL with the unlock key as
3839 * the very first thing. */
3840 pp = ironlake_get_pp_control(intel_dp);
3841 I915_WRITE(pp_ctrl_reg, pp);
3842
3843 pp_on = I915_READ(pp_on_reg);
3844 pp_off = I915_READ(pp_off_reg);
3845 pp_div = I915_READ(pp_div_reg);
3846
3847 /* Pull timing values out of registers */
3848 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
3849 PANEL_POWER_UP_DELAY_SHIFT;
3850
3851 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
3852 PANEL_LIGHT_ON_DELAY_SHIFT;
3853
3854 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
3855 PANEL_LIGHT_OFF_DELAY_SHIFT;
3856
3857 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
3858 PANEL_POWER_DOWN_DELAY_SHIFT;
3859
3860 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
3861 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
3862
3863 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3864 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
3865
3866 vbt = dev_priv->vbt.edp_pps;
3867
3868 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
3869 * our hw here, which are all in 100usec. */
3870 spec.t1_t3 = 210 * 10;
3871 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
3872 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
3873 spec.t10 = 500 * 10;
3874 /* This one is special and actually in units of 100ms, but zero
3875 * based in the hw (so we need to add 100 ms). But the sw vbt
3876 * table multiplies it with 1000 to make it in units of 100usec,
3877 * too. */
3878 spec.t11_t12 = (510 + 100) * 10;
3879
3880 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3881 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
3882
3883 /* Use the max of the register settings and vbt. If both are
3884 * unset, fall back to the spec limits. */
3885 #define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
3886 spec.field : \
3887 max(cur.field, vbt.field))
3888 assign_final(t1_t3);
3889 assign_final(t8);
3890 assign_final(t9);
3891 assign_final(t10);
3892 assign_final(t11_t12);
3893 #undef assign_final
3894
3895 #define get_delay(field) (DIV_ROUND_UP(final.field, 10))
3896 intel_dp->panel_power_up_delay = get_delay(t1_t3);
3897 intel_dp->backlight_on_delay = get_delay(t8);
3898 intel_dp->backlight_off_delay = get_delay(t9);
3899 intel_dp->panel_power_down_delay = get_delay(t10);
3900 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
3901 #undef get_delay
3902
3903 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
3904 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
3905 intel_dp->panel_power_cycle_delay);
3906
3907 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
3908 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
3909
3910 if (out)
3911 *out = final;
3912 }
3913
3914 static void
3915 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3916 struct intel_dp *intel_dp,
3917 struct edp_power_seq *seq)
3918 {
3919 struct drm_i915_private *dev_priv = dev->dev_private;
3920 u32 pp_on, pp_off, pp_div, port_sel = 0;
3921 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
3922 int pp_on_reg, pp_off_reg, pp_div_reg;
3923
3924 if (HAS_PCH_SPLIT(dev)) {
3925 pp_on_reg = PCH_PP_ON_DELAYS;
3926 pp_off_reg = PCH_PP_OFF_DELAYS;
3927 pp_div_reg = PCH_PP_DIVISOR;
3928 } else {
3929 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3930
3931 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3932 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3933 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3934 }
3935
3936 /*
3937 * And finally store the new values in the power sequencer. The
3938 * backlight delays are set to 1 because we do manual waits on them. For
3939 * T8, even BSpec recommends doing it. For T9, if we don't do this,
3940 * we'll end up waiting for the backlight off delay twice: once when we
3941 * do the manual sleep, and once when we disable the panel and wait for
3942 * the PP_STATUS bit to become zero.
3943 */
3944 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
3945 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
3946 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3947 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
3948 /* Compute the divisor for the pp clock, simply match the Bspec
3949 * formula. */
3950 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
3951 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
3952 << PANEL_POWER_CYCLE_DELAY_SHIFT);
3953
3954 /* Haswell doesn't have any port selection bits for the panel
3955 * power sequencer any more. */
3956 if (IS_VALLEYVIEW(dev)) {
3957 if (dp_to_dig_port(intel_dp)->port == PORT_B)
3958 port_sel = PANEL_PORT_SELECT_DPB_VLV;
3959 else
3960 port_sel = PANEL_PORT_SELECT_DPC_VLV;
3961 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
3962 if (dp_to_dig_port(intel_dp)->port == PORT_A)
3963 port_sel = PANEL_PORT_SELECT_DPA;
3964 else
3965 port_sel = PANEL_PORT_SELECT_DPD;
3966 }
3967
3968 pp_on |= port_sel;
3969
3970 I915_WRITE(pp_on_reg, pp_on);
3971 I915_WRITE(pp_off_reg, pp_off);
3972 I915_WRITE(pp_div_reg, pp_div);
3973
3974 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
3975 I915_READ(pp_on_reg),
3976 I915_READ(pp_off_reg),
3977 I915_READ(pp_div_reg));
3978 }
3979
3980 void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
3981 {
3982 struct drm_i915_private *dev_priv = dev->dev_private;
3983 struct intel_encoder *encoder;
3984 struct intel_dp *intel_dp = NULL;
3985 struct intel_crtc_config *config = NULL;
3986 struct intel_crtc *intel_crtc = NULL;
3987 struct intel_connector *intel_connector = dev_priv->drrs.connector;
3988 u32 reg, val;
3989 enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
3990
3991 if (refresh_rate <= 0) {
3992 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
3993 return;
3994 }
3995
3996 if (intel_connector == NULL) {
3997 DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
3998 return;
3999 }
4000
4001 if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
4002 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
4003 return;
4004 }
4005
4006 encoder = intel_attached_encoder(&intel_connector->base);
4007 intel_dp = enc_to_intel_dp(&encoder->base);
4008 intel_crtc = encoder->new_crtc;
4009
4010 if (!intel_crtc) {
4011 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4012 return;
4013 }
4014
4015 config = &intel_crtc->config;
4016
4017 if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
4018 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4019 return;
4020 }
4021
4022 if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
4023 index = DRRS_LOW_RR;
4024
4025 if (index == intel_dp->drrs_state.refresh_rate_type) {
4026 DRM_DEBUG_KMS(
4027 "DRRS requested for previously set RR...ignoring\n");
4028 return;
4029 }
4030
4031 if (!intel_crtc->active) {
4032 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4033 return;
4034 }
4035
4036 if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
4037 reg = PIPECONF(intel_crtc->config.cpu_transcoder);
4038 val = I915_READ(reg);
4039 if (index > DRRS_HIGH_RR) {
4040 val |= PIPECONF_EDP_RR_MODE_SWITCH;
4041 intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
4042 } else {
4043 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4044 }
4045 I915_WRITE(reg, val);
4046 }
4047
4048 /*
4049 * mutex taken to ensure that there is no race between differnt
4050 * drrs calls trying to update refresh rate. This scenario may occur
4051 * in future when idleness detection based DRRS in kernel and
4052 * possible calls from user space to set differnt RR are made.
4053 */
4054
4055 mutex_lock(&intel_dp->drrs_state.mutex);
4056
4057 intel_dp->drrs_state.refresh_rate_type = index;
4058
4059 mutex_unlock(&intel_dp->drrs_state.mutex);
4060
4061 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
4062 }
4063
4064 static struct drm_display_mode *
4065 intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
4066 struct intel_connector *intel_connector,
4067 struct drm_display_mode *fixed_mode)
4068 {
4069 struct drm_connector *connector = &intel_connector->base;
4070 struct intel_dp *intel_dp = &intel_dig_port->dp;
4071 struct drm_device *dev = intel_dig_port->base.base.dev;
4072 struct drm_i915_private *dev_priv = dev->dev_private;
4073 struct drm_display_mode *downclock_mode = NULL;
4074
4075 if (INTEL_INFO(dev)->gen <= 6) {
4076 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
4077 return NULL;
4078 }
4079
4080 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4081 DRM_INFO("VBT doesn't support DRRS\n");
4082 return NULL;
4083 }
4084
4085 downclock_mode = intel_find_panel_downclock
4086 (dev, fixed_mode, connector);
4087
4088 if (!downclock_mode) {
4089 DRM_INFO("DRRS not supported\n");
4090 return NULL;
4091 }
4092
4093 dev_priv->drrs.connector = intel_connector;
4094
4095 mutex_init(&intel_dp->drrs_state.mutex);
4096
4097 intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
4098
4099 intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
4100 DRM_INFO("seamless DRRS supported for eDP panel.\n");
4101 return downclock_mode;
4102 }
4103
4104 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4105 struct intel_connector *intel_connector,
4106 struct edp_power_seq *power_seq)
4107 {
4108 struct drm_connector *connector = &intel_connector->base;
4109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4110 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4111 struct drm_device *dev = intel_encoder->base.dev;
4112 struct drm_i915_private *dev_priv = dev->dev_private;
4113 struct drm_display_mode *fixed_mode = NULL;
4114 struct drm_display_mode *downclock_mode = NULL;
4115 bool has_dpcd;
4116 struct drm_display_mode *scan;
4117 struct edid *edid;
4118
4119 intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
4120
4121 if (!is_edp(intel_dp))
4122 return true;
4123
4124 /* The VDD bit needs a power domain reference, so if the bit is already
4125 * enabled when we boot, grab this reference. */
4126 if (edp_have_panel_vdd(intel_dp)) {
4127 enum intel_display_power_domain power_domain;
4128 power_domain = intel_display_port_power_domain(intel_encoder);
4129 intel_display_power_get(dev_priv, power_domain);
4130 }
4131
4132 /* Cache DPCD and EDID for edp. */
4133 intel_edp_panel_vdd_on(intel_dp);
4134 has_dpcd = intel_dp_get_dpcd(intel_dp);
4135 edp_panel_vdd_off(intel_dp, false);
4136
4137 if (has_dpcd) {
4138 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4139 dev_priv->no_aux_handshake =
4140 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4141 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
4142 } else {
4143 /* if this fails, presume the device is a ghost */
4144 DRM_INFO("failed to retrieve link info, disabling eDP\n");
4145 return false;
4146 }
4147
4148 /* We now know it's not a ghost, init power sequence regs. */
4149 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
4150
4151 mutex_lock(&dev->mode_config.mutex);
4152 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
4153 if (edid) {
4154 if (drm_add_edid_modes(connector, edid)) {
4155 drm_mode_connector_update_edid_property(connector,
4156 edid);
4157 drm_edid_to_eld(connector, edid);
4158 } else {
4159 kfree(edid);
4160 edid = ERR_PTR(-EINVAL);
4161 }
4162 } else {
4163 edid = ERR_PTR(-ENOENT);
4164 }
4165 intel_connector->edid = edid;
4166
4167 /* prefer fixed mode from EDID if available */
4168 list_for_each_entry(scan, &connector->probed_modes, head) {
4169 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
4170 fixed_mode = drm_mode_duplicate(dev, scan);
4171 downclock_mode = intel_dp_drrs_init(
4172 intel_dig_port,
4173 intel_connector, fixed_mode);
4174 break;
4175 }
4176 }
4177
4178 /* fallback to VBT if available for eDP */
4179 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
4180 fixed_mode = drm_mode_duplicate(dev,
4181 dev_priv->vbt.lfp_lvds_vbt_mode);
4182 if (fixed_mode)
4183 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
4184 }
4185 mutex_unlock(&dev->mode_config.mutex);
4186
4187 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
4188 intel_panel_setup_backlight(connector);
4189
4190 return true;
4191 }
4192
4193 bool
4194 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4195 struct intel_connector *intel_connector)
4196 {
4197 struct drm_connector *connector = &intel_connector->base;
4198 struct intel_dp *intel_dp = &intel_dig_port->dp;
4199 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4200 struct drm_device *dev = intel_encoder->base.dev;
4201 struct drm_i915_private *dev_priv = dev->dev_private;
4202 enum port port = intel_dig_port->port;
4203 struct edp_power_seq power_seq = { 0 };
4204 int type;
4205
4206 /* intel_dp vfuncs */
4207 if (IS_VALLEYVIEW(dev))
4208 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
4209 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4210 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
4211 else if (HAS_PCH_SPLIT(dev))
4212 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
4213 else
4214 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
4215
4216 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
4217
4218 /* Preserve the current hw state. */
4219 intel_dp->DP = I915_READ(intel_dp->output_reg);
4220 intel_dp->attached_connector = intel_connector;
4221
4222 if (intel_dp_is_edp(dev, port))
4223 type = DRM_MODE_CONNECTOR_eDP;
4224 else
4225 type = DRM_MODE_CONNECTOR_DisplayPort;
4226
4227 /*
4228 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
4229 * for DP the encoder type can be set by the caller to
4230 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
4231 */
4232 if (type == DRM_MODE_CONNECTOR_eDP)
4233 intel_encoder->type = INTEL_OUTPUT_EDP;
4234
4235 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
4236 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
4237 port_name(port));
4238
4239 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
4240 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
4241
4242 connector->interlace_allowed = true;
4243 connector->doublescan_allowed = 0;
4244
4245 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4246 edp_panel_vdd_work);
4247
4248 intel_connector_attach_encoder(intel_connector, intel_encoder);
4249 drm_connector_register(connector);
4250
4251 if (HAS_DDI(dev))
4252 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
4253 else
4254 intel_connector->get_hw_state = intel_connector_get_hw_state;
4255 intel_connector->unregister = intel_dp_connector_unregister;
4256
4257 /* Set up the hotplug pin. */
4258 switch (port) {
4259 case PORT_A:
4260 intel_encoder->hpd_pin = HPD_PORT_A;
4261 break;
4262 case PORT_B:
4263 intel_encoder->hpd_pin = HPD_PORT_B;
4264 break;
4265 case PORT_C:
4266 intel_encoder->hpd_pin = HPD_PORT_C;
4267 break;
4268 case PORT_D:
4269 intel_encoder->hpd_pin = HPD_PORT_D;
4270 break;
4271 default:
4272 BUG();
4273 }
4274
4275 if (is_edp(intel_dp)) {
4276 intel_dp_init_panel_power_timestamps(intel_dp);
4277 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
4278 }
4279
4280 intel_dp_aux_init(intel_dp, intel_connector);
4281
4282 intel_dp->psr_setup_done = false;
4283
4284 if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
4285 drm_dp_aux_unregister(&intel_dp->aux);
4286 if (is_edp(intel_dp)) {
4287 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4288 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4289 edp_panel_vdd_off_sync(intel_dp);
4290 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4291 }
4292 drm_connector_unregister(connector);
4293 drm_connector_cleanup(connector);
4294 return false;
4295 }
4296
4297 intel_dp_add_properties(intel_dp, connector);
4298
4299 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
4300 * 0xd. Failure to do so will result in spurious interrupts being
4301 * generated on the port when a cable is not attached.
4302 */
4303 if (IS_G4X(dev) && !IS_GM45(dev)) {
4304 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
4305 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
4306 }
4307
4308 return true;
4309 }
4310
4311 void
4312 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4313 {
4314 struct intel_digital_port *intel_dig_port;
4315 struct intel_encoder *intel_encoder;
4316 struct drm_encoder *encoder;
4317 struct intel_connector *intel_connector;
4318
4319 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
4320 if (!intel_dig_port)
4321 return;
4322
4323 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
4324 if (!intel_connector) {
4325 kfree(intel_dig_port);
4326 return;
4327 }
4328
4329 intel_encoder = &intel_dig_port->base;
4330 encoder = &intel_encoder->base;
4331
4332 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
4333 DRM_MODE_ENCODER_TMDS);
4334
4335 intel_encoder->compute_config = intel_dp_compute_config;
4336 intel_encoder->disable = intel_disable_dp;
4337 intel_encoder->get_hw_state = intel_dp_get_hw_state;
4338 intel_encoder->get_config = intel_dp_get_config;
4339 if (IS_CHERRYVIEW(dev)) {
4340 intel_encoder->pre_enable = chv_pre_enable_dp;
4341 intel_encoder->enable = vlv_enable_dp;
4342 intel_encoder->post_disable = chv_post_disable_dp;
4343 } else if (IS_VALLEYVIEW(dev)) {
4344 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
4345 intel_encoder->pre_enable = vlv_pre_enable_dp;
4346 intel_encoder->enable = vlv_enable_dp;
4347 intel_encoder->post_disable = vlv_post_disable_dp;
4348 } else {
4349 intel_encoder->pre_enable = g4x_pre_enable_dp;
4350 intel_encoder->enable = g4x_enable_dp;
4351 intel_encoder->post_disable = g4x_post_disable_dp;
4352 }
4353
4354 intel_dig_port->port = port;
4355 intel_dig_port->dp.output_reg = output_reg;
4356
4357 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4358 if (IS_CHERRYVIEW(dev)) {
4359 if (port == PORT_D)
4360 intel_encoder->crtc_mask = 1 << 2;
4361 else
4362 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
4363 } else {
4364 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
4365 }
4366 intel_encoder->cloneable = 0;
4367 intel_encoder->hot_plug = intel_dp_hot_plug;
4368
4369 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
4370 drm_encoder_cleanup(encoder);
4371 kfree(intel_dig_port);
4372 kfree(intel_connector);
4373 }
4374 }
This page took 0.277748 seconds and 5 git commands to generate.