drm/i915/dp: move edp vdd enable/disable at a lower level in i2c-over-aux
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_crtc.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/drm_edid.h>
35 #include "intel_drv.h"
36 #include <drm/i915_drm.h>
37 #include "i915_drv.h"
38
39 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40
41 struct dp_link_dpll {
42 int link_bw;
43 struct dpll dpll;
44 };
45
46 static const struct dp_link_dpll gen4_dpll[] = {
47 { DP_LINK_BW_1_62,
48 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
49 { DP_LINK_BW_2_7,
50 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
51 };
52
53 static const struct dp_link_dpll pch_dpll[] = {
54 { DP_LINK_BW_1_62,
55 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
56 { DP_LINK_BW_2_7,
57 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
58 };
59
60 static const struct dp_link_dpll vlv_dpll[] = {
61 { DP_LINK_BW_1_62,
62 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
63 { DP_LINK_BW_2_7,
64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
65 };
66
67 /**
68 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
69 * @intel_dp: DP struct
70 *
71 * If a CPU or PCH DP output is attached to an eDP panel, this function
72 * will return true, and false otherwise.
73 */
74 static bool is_edp(struct intel_dp *intel_dp)
75 {
76 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
77
78 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
79 }
80
81 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
82 {
83 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
84
85 return intel_dig_port->base.base.dev;
86 }
87
88 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
89 {
90 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
91 }
92
93 static void intel_dp_link_down(struct intel_dp *intel_dp);
94 static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
95 static void edp_panel_vdd_on(struct intel_dp *intel_dp);
96 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
97
98 static int
99 intel_dp_max_link_bw(struct intel_dp *intel_dp)
100 {
101 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
102 struct drm_device *dev = intel_dp->attached_connector->base.dev;
103
104 switch (max_link_bw) {
105 case DP_LINK_BW_1_62:
106 case DP_LINK_BW_2_7:
107 break;
108 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
109 if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) &&
110 intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
111 max_link_bw = DP_LINK_BW_5_4;
112 else
113 max_link_bw = DP_LINK_BW_2_7;
114 break;
115 default:
116 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
117 max_link_bw);
118 max_link_bw = DP_LINK_BW_1_62;
119 break;
120 }
121 return max_link_bw;
122 }
123
124 /*
125 * The units on the numbers in the next two are... bizarre. Examples will
126 * make it clearer; this one parallels an example in the eDP spec.
127 *
128 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
129 *
130 * 270000 * 1 * 8 / 10 == 216000
131 *
132 * The actual data capacity of that configuration is 2.16Gbit/s, so the
133 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
134 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
135 * 119000. At 18bpp that's 2142000 kilobits per second.
136 *
137 * Thus the strange-looking division by 10 in intel_dp_link_required, to
138 * get the result in decakilobits instead of kilobits.
139 */
140
141 static int
142 intel_dp_link_required(int pixel_clock, int bpp)
143 {
144 return (pixel_clock * bpp + 9) / 10;
145 }
146
147 static int
148 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
149 {
150 return (max_link_clock * max_lanes * 8) / 10;
151 }
152
153 static enum drm_mode_status
154 intel_dp_mode_valid(struct drm_connector *connector,
155 struct drm_display_mode *mode)
156 {
157 struct intel_dp *intel_dp = intel_attached_dp(connector);
158 struct intel_connector *intel_connector = to_intel_connector(connector);
159 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
160 int target_clock = mode->clock;
161 int max_rate, mode_rate, max_lanes, max_link_clock;
162
163 if (is_edp(intel_dp) && fixed_mode) {
164 if (mode->hdisplay > fixed_mode->hdisplay)
165 return MODE_PANEL;
166
167 if (mode->vdisplay > fixed_mode->vdisplay)
168 return MODE_PANEL;
169
170 target_clock = fixed_mode->clock;
171 }
172
173 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
174 max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
175
176 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
177 mode_rate = intel_dp_link_required(target_clock, 18);
178
179 if (mode_rate > max_rate)
180 return MODE_CLOCK_HIGH;
181
182 if (mode->clock < 10000)
183 return MODE_CLOCK_LOW;
184
185 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
186 return MODE_H_ILLEGAL;
187
188 return MODE_OK;
189 }
190
191 static uint32_t
192 pack_aux(uint8_t *src, int src_bytes)
193 {
194 int i;
195 uint32_t v = 0;
196
197 if (src_bytes > 4)
198 src_bytes = 4;
199 for (i = 0; i < src_bytes; i++)
200 v |= ((uint32_t) src[i]) << ((3-i) * 8);
201 return v;
202 }
203
204 static void
205 unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
206 {
207 int i;
208 if (dst_bytes > 4)
209 dst_bytes = 4;
210 for (i = 0; i < dst_bytes; i++)
211 dst[i] = src >> ((3-i) * 8);
212 }
213
214 /* hrawclock is 1/4 the FSB frequency */
215 static int
216 intel_hrawclk(struct drm_device *dev)
217 {
218 struct drm_i915_private *dev_priv = dev->dev_private;
219 uint32_t clkcfg;
220
221 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
222 if (IS_VALLEYVIEW(dev))
223 return 200;
224
225 clkcfg = I915_READ(CLKCFG);
226 switch (clkcfg & CLKCFG_FSB_MASK) {
227 case CLKCFG_FSB_400:
228 return 100;
229 case CLKCFG_FSB_533:
230 return 133;
231 case CLKCFG_FSB_667:
232 return 166;
233 case CLKCFG_FSB_800:
234 return 200;
235 case CLKCFG_FSB_1067:
236 return 266;
237 case CLKCFG_FSB_1333:
238 return 333;
239 /* these two are just a guess; one of them might be right */
240 case CLKCFG_FSB_1600:
241 case CLKCFG_FSB_1600_ALT:
242 return 400;
243 default:
244 return 133;
245 }
246 }
247
248 static void
249 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
250 struct intel_dp *intel_dp,
251 struct edp_power_seq *out);
252 static void
253 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
254 struct intel_dp *intel_dp,
255 struct edp_power_seq *out);
256
257 static enum pipe
258 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
259 {
260 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
261 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
262 struct drm_device *dev = intel_dig_port->base.base.dev;
263 struct drm_i915_private *dev_priv = dev->dev_private;
264 enum port port = intel_dig_port->port;
265 enum pipe pipe;
266
267 /* modeset should have pipe */
268 if (crtc)
269 return to_intel_crtc(crtc)->pipe;
270
271 /* init time, try to find a pipe with this port selected */
272 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
273 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
274 PANEL_PORT_SELECT_MASK;
275 if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
276 return pipe;
277 if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
278 return pipe;
279 }
280
281 /* shrug */
282 return PIPE_A;
283 }
284
285 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
286 {
287 struct drm_device *dev = intel_dp_to_dev(intel_dp);
288
289 if (HAS_PCH_SPLIT(dev))
290 return PCH_PP_CONTROL;
291 else
292 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
293 }
294
295 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
296 {
297 struct drm_device *dev = intel_dp_to_dev(intel_dp);
298
299 if (HAS_PCH_SPLIT(dev))
300 return PCH_PP_STATUS;
301 else
302 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
303 }
304
305 static bool edp_have_panel_power(struct intel_dp *intel_dp)
306 {
307 struct drm_device *dev = intel_dp_to_dev(intel_dp);
308 struct drm_i915_private *dev_priv = dev->dev_private;
309
310 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
311 }
312
313 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
314 {
315 struct drm_device *dev = intel_dp_to_dev(intel_dp);
316 struct drm_i915_private *dev_priv = dev->dev_private;
317
318 return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
319 }
320
321 static void
322 intel_dp_check_edp(struct intel_dp *intel_dp)
323 {
324 struct drm_device *dev = intel_dp_to_dev(intel_dp);
325 struct drm_i915_private *dev_priv = dev->dev_private;
326
327 if (!is_edp(intel_dp))
328 return;
329
330 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
331 WARN(1, "eDP powered off while attempting aux channel communication.\n");
332 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
333 I915_READ(_pp_stat_reg(intel_dp)),
334 I915_READ(_pp_ctrl_reg(intel_dp)));
335 }
336 }
337
338 static uint32_t
339 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
340 {
341 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
342 struct drm_device *dev = intel_dig_port->base.base.dev;
343 struct drm_i915_private *dev_priv = dev->dev_private;
344 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
345 uint32_t status;
346 bool done;
347
348 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
349 if (has_aux_irq)
350 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
351 msecs_to_jiffies_timeout(10));
352 else
353 done = wait_for_atomic(C, 10) == 0;
354 if (!done)
355 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
356 has_aux_irq);
357 #undef C
358
359 return status;
360 }
361
362 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
363 {
364 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
365 struct drm_device *dev = intel_dig_port->base.base.dev;
366
367 /*
368 * The clock divider is based off the hrawclk, and would like to run at
369 * 2MHz. So, take the hrawclk value and divide by 2 and use that
370 */
371 return index ? 0 : intel_hrawclk(dev) / 2;
372 }
373
374 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
375 {
376 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
377 struct drm_device *dev = intel_dig_port->base.base.dev;
378
379 if (index)
380 return 0;
381
382 if (intel_dig_port->port == PORT_A) {
383 if (IS_GEN6(dev) || IS_GEN7(dev))
384 return 200; /* SNB & IVB eDP input clock at 400Mhz */
385 else
386 return 225; /* eDP input clock at 450Mhz */
387 } else {
388 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
389 }
390 }
391
392 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
393 {
394 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
395 struct drm_device *dev = intel_dig_port->base.base.dev;
396 struct drm_i915_private *dev_priv = dev->dev_private;
397
398 if (intel_dig_port->port == PORT_A) {
399 if (index)
400 return 0;
401 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
402 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
403 /* Workaround for non-ULT HSW */
404 switch (index) {
405 case 0: return 63;
406 case 1: return 72;
407 default: return 0;
408 }
409 } else {
410 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
411 }
412 }
413
414 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
415 {
416 return index ? 0 : 100;
417 }
418
419 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
420 bool has_aux_irq,
421 int send_bytes,
422 uint32_t aux_clock_divider)
423 {
424 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
425 struct drm_device *dev = intel_dig_port->base.base.dev;
426 uint32_t precharge, timeout;
427
428 if (IS_GEN6(dev))
429 precharge = 3;
430 else
431 precharge = 5;
432
433 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
434 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
435 else
436 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
437
438 return DP_AUX_CH_CTL_SEND_BUSY |
439 DP_AUX_CH_CTL_DONE |
440 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
441 DP_AUX_CH_CTL_TIME_OUT_ERROR |
442 timeout |
443 DP_AUX_CH_CTL_RECEIVE_ERROR |
444 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
445 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
446 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
447 }
448
449 static int
450 intel_dp_aux_ch(struct intel_dp *intel_dp,
451 uint8_t *send, int send_bytes,
452 uint8_t *recv, int recv_size)
453 {
454 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
455 struct drm_device *dev = intel_dig_port->base.base.dev;
456 struct drm_i915_private *dev_priv = dev->dev_private;
457 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
458 uint32_t ch_data = ch_ctl + 4;
459 uint32_t aux_clock_divider;
460 int i, ret, recv_bytes;
461 uint32_t status;
462 int try, clock = 0;
463 bool has_aux_irq = HAS_AUX_IRQ(dev);
464 bool vdd;
465
466 vdd = _edp_panel_vdd_on(intel_dp);
467
468 /* dp aux is extremely sensitive to irq latency, hence request the
469 * lowest possible wakeup latency and so prevent the cpu from going into
470 * deep sleep states.
471 */
472 pm_qos_update_request(&dev_priv->pm_qos, 0);
473
474 intel_dp_check_edp(intel_dp);
475
476 intel_aux_display_runtime_get(dev_priv);
477
478 /* Try to wait for any previous AUX channel activity */
479 for (try = 0; try < 3; try++) {
480 status = I915_READ_NOTRACE(ch_ctl);
481 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
482 break;
483 msleep(1);
484 }
485
486 if (try == 3) {
487 WARN(1, "dp_aux_ch not started status 0x%08x\n",
488 I915_READ(ch_ctl));
489 ret = -EBUSY;
490 goto out;
491 }
492
493 /* Only 5 data registers! */
494 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
495 ret = -E2BIG;
496 goto out;
497 }
498
499 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
500 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
501 has_aux_irq,
502 send_bytes,
503 aux_clock_divider);
504
505 /* Must try at least 3 times according to DP spec */
506 for (try = 0; try < 5; try++) {
507 /* Load the send data into the aux channel data registers */
508 for (i = 0; i < send_bytes; i += 4)
509 I915_WRITE(ch_data + i,
510 pack_aux(send + i, send_bytes - i));
511
512 /* Send the command and wait for it to complete */
513 I915_WRITE(ch_ctl, send_ctl);
514
515 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
516
517 /* Clear done status and any errors */
518 I915_WRITE(ch_ctl,
519 status |
520 DP_AUX_CH_CTL_DONE |
521 DP_AUX_CH_CTL_TIME_OUT_ERROR |
522 DP_AUX_CH_CTL_RECEIVE_ERROR);
523
524 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
525 DP_AUX_CH_CTL_RECEIVE_ERROR))
526 continue;
527 if (status & DP_AUX_CH_CTL_DONE)
528 break;
529 }
530 if (status & DP_AUX_CH_CTL_DONE)
531 break;
532 }
533
534 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
535 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
536 ret = -EBUSY;
537 goto out;
538 }
539
540 /* Check for timeout or receive error.
541 * Timeouts occur when the sink is not connected
542 */
543 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
544 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
545 ret = -EIO;
546 goto out;
547 }
548
549 /* Timeouts occur when the device isn't connected, so they're
550 * "normal" -- don't fill the kernel log with these */
551 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
552 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
553 ret = -ETIMEDOUT;
554 goto out;
555 }
556
557 /* Unload any bytes sent back from the other side */
558 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
559 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
560 if (recv_bytes > recv_size)
561 recv_bytes = recv_size;
562
563 for (i = 0; i < recv_bytes; i += 4)
564 unpack_aux(I915_READ(ch_data + i),
565 recv + i, recv_bytes - i);
566
567 ret = recv_bytes;
568 out:
569 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
570 intel_aux_display_runtime_put(dev_priv);
571
572 if (vdd)
573 edp_panel_vdd_off(intel_dp, false);
574
575 return ret;
576 }
577
578 /* Write data to the aux channel in native mode */
579 static int
580 intel_dp_aux_native_write(struct intel_dp *intel_dp,
581 uint16_t address, uint8_t *send, int send_bytes)
582 {
583 int ret;
584 uint8_t msg[20];
585 int msg_bytes;
586 uint8_t ack;
587 int retry;
588
589 if (WARN_ON(send_bytes > 16))
590 return -E2BIG;
591
592 intel_dp_check_edp(intel_dp);
593 msg[0] = DP_AUX_NATIVE_WRITE << 4;
594 msg[1] = address >> 8;
595 msg[2] = address & 0xff;
596 msg[3] = send_bytes - 1;
597 memcpy(&msg[4], send, send_bytes);
598 msg_bytes = send_bytes + 4;
599 for (retry = 0; retry < 7; retry++) {
600 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
601 if (ret < 0)
602 return ret;
603 ack >>= 4;
604 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
605 return send_bytes;
606 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
607 usleep_range(400, 500);
608 else
609 return -EIO;
610 }
611
612 DRM_ERROR("too many retries, giving up\n");
613 return -EIO;
614 }
615
616 /* Write a single byte to the aux channel in native mode */
617 static int
618 intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
619 uint16_t address, uint8_t byte)
620 {
621 return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
622 }
623
624 /* read bytes from a native aux channel */
625 static int
626 intel_dp_aux_native_read(struct intel_dp *intel_dp,
627 uint16_t address, uint8_t *recv, int recv_bytes)
628 {
629 uint8_t msg[4];
630 int msg_bytes;
631 uint8_t reply[20];
632 int reply_bytes;
633 uint8_t ack;
634 int ret;
635 int retry;
636
637 if (WARN_ON(recv_bytes > 19))
638 return -E2BIG;
639
640 intel_dp_check_edp(intel_dp);
641 msg[0] = DP_AUX_NATIVE_READ << 4;
642 msg[1] = address >> 8;
643 msg[2] = address & 0xff;
644 msg[3] = recv_bytes - 1;
645
646 msg_bytes = 4;
647 reply_bytes = recv_bytes + 1;
648
649 for (retry = 0; retry < 7; retry++) {
650 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
651 reply, reply_bytes);
652 if (ret == 0)
653 return -EPROTO;
654 if (ret < 0)
655 return ret;
656 ack = reply[0] >> 4;
657 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
658 memcpy(recv, reply + 1, ret - 1);
659 return ret - 1;
660 }
661 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
662 usleep_range(400, 500);
663 else
664 return -EIO;
665 }
666
667 DRM_ERROR("too many retries, giving up\n");
668 return -EIO;
669 }
670
671 static int
672 intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
673 uint8_t write_byte, uint8_t *read_byte)
674 {
675 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
676 struct intel_dp *intel_dp = container_of(adapter,
677 struct intel_dp,
678 adapter);
679 uint16_t address = algo_data->address;
680 uint8_t msg[5];
681 uint8_t reply[2];
682 unsigned retry;
683 int msg_bytes;
684 int reply_bytes;
685 int ret;
686
687 /* Set up the command byte */
688 if (mode & MODE_I2C_READ)
689 msg[0] = DP_AUX_I2C_READ << 4;
690 else
691 msg[0] = DP_AUX_I2C_WRITE << 4;
692
693 if (!(mode & MODE_I2C_STOP))
694 msg[0] |= DP_AUX_I2C_MOT << 4;
695
696 msg[1] = address >> 8;
697 msg[2] = address;
698
699 switch (mode) {
700 case MODE_I2C_WRITE:
701 msg[3] = 0;
702 msg[4] = write_byte;
703 msg_bytes = 5;
704 reply_bytes = 1;
705 break;
706 case MODE_I2C_READ:
707 msg[3] = 0;
708 msg_bytes = 4;
709 reply_bytes = 2;
710 break;
711 default:
712 msg_bytes = 3;
713 reply_bytes = 1;
714 break;
715 }
716
717 /*
718 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
719 * required to retry at least seven times upon receiving AUX_DEFER
720 * before giving up the AUX transaction.
721 */
722 for (retry = 0; retry < 7; retry++) {
723 ret = intel_dp_aux_ch(intel_dp,
724 msg, msg_bytes,
725 reply, reply_bytes);
726 if (ret < 0) {
727 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
728 goto out;
729 }
730
731 switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
732 case DP_AUX_NATIVE_REPLY_ACK:
733 /* I2C-over-AUX Reply field is only valid
734 * when paired with AUX ACK.
735 */
736 break;
737 case DP_AUX_NATIVE_REPLY_NACK:
738 DRM_DEBUG_KMS("aux_ch native nack\n");
739 ret = -EREMOTEIO;
740 goto out;
741 case DP_AUX_NATIVE_REPLY_DEFER:
742 /*
743 * For now, just give more slack to branch devices. We
744 * could check the DPCD for I2C bit rate capabilities,
745 * and if available, adjust the interval. We could also
746 * be more careful with DP-to-Legacy adapters where a
747 * long legacy cable may force very low I2C bit rates.
748 */
749 if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
750 DP_DWN_STRM_PORT_PRESENT)
751 usleep_range(500, 600);
752 else
753 usleep_range(300, 400);
754 continue;
755 default:
756 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
757 reply[0]);
758 ret = -EREMOTEIO;
759 goto out;
760 }
761
762 switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
763 case DP_AUX_I2C_REPLY_ACK:
764 if (mode == MODE_I2C_READ) {
765 *read_byte = reply[1];
766 }
767 ret = reply_bytes - 1;
768 goto out;
769 case DP_AUX_I2C_REPLY_NACK:
770 DRM_DEBUG_KMS("aux_i2c nack\n");
771 ret = -EREMOTEIO;
772 goto out;
773 case DP_AUX_I2C_REPLY_DEFER:
774 DRM_DEBUG_KMS("aux_i2c defer\n");
775 udelay(100);
776 break;
777 default:
778 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
779 ret = -EREMOTEIO;
780 goto out;
781 }
782 }
783
784 DRM_ERROR("too many retries, giving up\n");
785 ret = -EREMOTEIO;
786
787 out:
788 return ret;
789 }
790
791 static void
792 intel_dp_connector_unregister(struct intel_connector *intel_connector)
793 {
794 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
795
796 sysfs_remove_link(&intel_connector->base.kdev->kobj,
797 intel_dp->adapter.dev.kobj.name);
798 intel_connector_unregister(intel_connector);
799 }
800
801 static int
802 intel_dp_i2c_init(struct intel_dp *intel_dp,
803 struct intel_connector *intel_connector, const char *name)
804 {
805 int ret;
806
807 DRM_DEBUG_KMS("i2c_init %s\n", name);
808 intel_dp->algo.running = false;
809 intel_dp->algo.address = 0;
810 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
811
812 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
813 intel_dp->adapter.owner = THIS_MODULE;
814 intel_dp->adapter.class = I2C_CLASS_DDC;
815 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
816 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
817 intel_dp->adapter.algo_data = &intel_dp->algo;
818 intel_dp->adapter.dev.parent = intel_connector->base.dev->dev;
819
820 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
821 if (ret < 0)
822 return ret;
823
824 ret = sysfs_create_link(&intel_connector->base.kdev->kobj,
825 &intel_dp->adapter.dev.kobj,
826 intel_dp->adapter.dev.kobj.name);
827
828 if (ret < 0)
829 i2c_del_adapter(&intel_dp->adapter);
830
831 return ret;
832 }
833
834 static void
835 intel_dp_set_clock(struct intel_encoder *encoder,
836 struct intel_crtc_config *pipe_config, int link_bw)
837 {
838 struct drm_device *dev = encoder->base.dev;
839 const struct dp_link_dpll *divisor = NULL;
840 int i, count = 0;
841
842 if (IS_G4X(dev)) {
843 divisor = gen4_dpll;
844 count = ARRAY_SIZE(gen4_dpll);
845 } else if (IS_HASWELL(dev)) {
846 /* Haswell has special-purpose DP DDI clocks. */
847 } else if (HAS_PCH_SPLIT(dev)) {
848 divisor = pch_dpll;
849 count = ARRAY_SIZE(pch_dpll);
850 } else if (IS_VALLEYVIEW(dev)) {
851 divisor = vlv_dpll;
852 count = ARRAY_SIZE(vlv_dpll);
853 }
854
855 if (divisor && count) {
856 for (i = 0; i < count; i++) {
857 if (link_bw == divisor[i].link_bw) {
858 pipe_config->dpll = divisor[i].dpll;
859 pipe_config->clock_set = true;
860 break;
861 }
862 }
863 }
864 }
865
866 bool
867 intel_dp_compute_config(struct intel_encoder *encoder,
868 struct intel_crtc_config *pipe_config)
869 {
870 struct drm_device *dev = encoder->base.dev;
871 struct drm_i915_private *dev_priv = dev->dev_private;
872 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
873 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
874 enum port port = dp_to_dig_port(intel_dp)->port;
875 struct intel_crtc *intel_crtc = encoder->new_crtc;
876 struct intel_connector *intel_connector = intel_dp->attached_connector;
877 int lane_count, clock;
878 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
879 /* Conveniently, the link BW constants become indices with a shift...*/
880 int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
881 int bpp, mode_rate;
882 static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
883 int link_avail, link_clock;
884
885 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
886 pipe_config->has_pch_encoder = true;
887
888 pipe_config->has_dp_encoder = true;
889
890 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
891 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
892 adjusted_mode);
893 if (!HAS_PCH_SPLIT(dev))
894 intel_gmch_panel_fitting(intel_crtc, pipe_config,
895 intel_connector->panel.fitting_mode);
896 else
897 intel_pch_panel_fitting(intel_crtc, pipe_config,
898 intel_connector->panel.fitting_mode);
899 }
900
901 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
902 return false;
903
904 DRM_DEBUG_KMS("DP link computation with max lane count %i "
905 "max bw %02x pixel clock %iKHz\n",
906 max_lane_count, bws[max_clock],
907 adjusted_mode->crtc_clock);
908
909 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
910 * bpc in between. */
911 bpp = pipe_config->pipe_bpp;
912 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
913 dev_priv->vbt.edp_bpp < bpp) {
914 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
915 dev_priv->vbt.edp_bpp);
916 bpp = dev_priv->vbt.edp_bpp;
917 }
918
919 for (; bpp >= 6*3; bpp -= 2*3) {
920 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
921 bpp);
922
923 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
924 for (clock = 0; clock <= max_clock; clock++) {
925 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
926 link_avail = intel_dp_max_data_rate(link_clock,
927 lane_count);
928
929 if (mode_rate <= link_avail) {
930 goto found;
931 }
932 }
933 }
934 }
935
936 return false;
937
938 found:
939 if (intel_dp->color_range_auto) {
940 /*
941 * See:
942 * CEA-861-E - 5.1 Default Encoding Parameters
943 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
944 */
945 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
946 intel_dp->color_range = DP_COLOR_RANGE_16_235;
947 else
948 intel_dp->color_range = 0;
949 }
950
951 if (intel_dp->color_range)
952 pipe_config->limited_color_range = true;
953
954 intel_dp->link_bw = bws[clock];
955 intel_dp->lane_count = lane_count;
956 pipe_config->pipe_bpp = bpp;
957 pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
958
959 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
960 intel_dp->link_bw, intel_dp->lane_count,
961 pipe_config->port_clock, bpp);
962 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
963 mode_rate, link_avail);
964
965 intel_link_compute_m_n(bpp, lane_count,
966 adjusted_mode->crtc_clock,
967 pipe_config->port_clock,
968 &pipe_config->dp_m_n);
969
970 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
971
972 return true;
973 }
974
975 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
976 {
977 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
978 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
979 struct drm_device *dev = crtc->base.dev;
980 struct drm_i915_private *dev_priv = dev->dev_private;
981 u32 dpa_ctl;
982
983 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
984 dpa_ctl = I915_READ(DP_A);
985 dpa_ctl &= ~DP_PLL_FREQ_MASK;
986
987 if (crtc->config.port_clock == 162000) {
988 /* For a long time we've carried around a ILK-DevA w/a for the
989 * 160MHz clock. If we're really unlucky, it's still required.
990 */
991 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
992 dpa_ctl |= DP_PLL_FREQ_160MHZ;
993 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
994 } else {
995 dpa_ctl |= DP_PLL_FREQ_270MHZ;
996 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
997 }
998
999 I915_WRITE(DP_A, dpa_ctl);
1000
1001 POSTING_READ(DP_A);
1002 udelay(500);
1003 }
1004
1005 static void intel_dp_mode_set(struct intel_encoder *encoder)
1006 {
1007 struct drm_device *dev = encoder->base.dev;
1008 struct drm_i915_private *dev_priv = dev->dev_private;
1009 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1010 enum port port = dp_to_dig_port(intel_dp)->port;
1011 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1012 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
1013
1014 /*
1015 * There are four kinds of DP registers:
1016 *
1017 * IBX PCH
1018 * SNB CPU
1019 * IVB CPU
1020 * CPT PCH
1021 *
1022 * IBX PCH and CPU are the same for almost everything,
1023 * except that the CPU DP PLL is configured in this
1024 * register
1025 *
1026 * CPT PCH is quite different, having many bits moved
1027 * to the TRANS_DP_CTL register instead. That
1028 * configuration happens (oddly) in ironlake_pch_enable
1029 */
1030
1031 /* Preserve the BIOS-computed detected bit. This is
1032 * supposed to be read-only.
1033 */
1034 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1035
1036 /* Handle DP bits in common between all three register formats */
1037 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1038 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1039
1040 if (intel_dp->has_audio) {
1041 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
1042 pipe_name(crtc->pipe));
1043 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1044 intel_write_eld(&encoder->base, adjusted_mode);
1045 }
1046
1047 /* Split out the IBX/CPU vs CPT settings */
1048
1049 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1050 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1051 intel_dp->DP |= DP_SYNC_HS_HIGH;
1052 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1053 intel_dp->DP |= DP_SYNC_VS_HIGH;
1054 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1055
1056 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1057 intel_dp->DP |= DP_ENHANCED_FRAMING;
1058
1059 intel_dp->DP |= crtc->pipe << 29;
1060 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1061 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1062 intel_dp->DP |= intel_dp->color_range;
1063
1064 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1065 intel_dp->DP |= DP_SYNC_HS_HIGH;
1066 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1067 intel_dp->DP |= DP_SYNC_VS_HIGH;
1068 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1069
1070 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1071 intel_dp->DP |= DP_ENHANCED_FRAMING;
1072
1073 if (crtc->pipe == 1)
1074 intel_dp->DP |= DP_PIPEB_SELECT;
1075 } else {
1076 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1077 }
1078
1079 if (port == PORT_A && !IS_VALLEYVIEW(dev))
1080 ironlake_set_pll_cpu_edp(intel_dp);
1081 }
1082
1083 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1084 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1085
1086 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1087 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1088
1089 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1090 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1091
1092 static void wait_panel_status(struct intel_dp *intel_dp,
1093 u32 mask,
1094 u32 value)
1095 {
1096 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1097 struct drm_i915_private *dev_priv = dev->dev_private;
1098 u32 pp_stat_reg, pp_ctrl_reg;
1099
1100 pp_stat_reg = _pp_stat_reg(intel_dp);
1101 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1102
1103 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1104 mask, value,
1105 I915_READ(pp_stat_reg),
1106 I915_READ(pp_ctrl_reg));
1107
1108 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1109 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1110 I915_READ(pp_stat_reg),
1111 I915_READ(pp_ctrl_reg));
1112 }
1113
1114 DRM_DEBUG_KMS("Wait complete\n");
1115 }
1116
1117 static void wait_panel_on(struct intel_dp *intel_dp)
1118 {
1119 DRM_DEBUG_KMS("Wait for panel power on\n");
1120 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1121 }
1122
1123 static void wait_panel_off(struct intel_dp *intel_dp)
1124 {
1125 DRM_DEBUG_KMS("Wait for panel power off time\n");
1126 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1127 }
1128
1129 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1130 {
1131 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1132
1133 /* When we disable the VDD override bit last we have to do the manual
1134 * wait. */
1135 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1136 intel_dp->panel_power_cycle_delay);
1137
1138 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1139 }
1140
1141 static void wait_backlight_on(struct intel_dp *intel_dp)
1142 {
1143 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1144 intel_dp->backlight_on_delay);
1145 }
1146
1147 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1148 {
1149 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1150 intel_dp->backlight_off_delay);
1151 }
1152
1153 /* Read the current pp_control value, unlocking the register if it
1154 * is locked
1155 */
1156
1157 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1158 {
1159 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1160 struct drm_i915_private *dev_priv = dev->dev_private;
1161 u32 control;
1162
1163 control = I915_READ(_pp_ctrl_reg(intel_dp));
1164 control &= ~PANEL_UNLOCK_MASK;
1165 control |= PANEL_UNLOCK_REGS;
1166 return control;
1167 }
1168
1169 static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1170 {
1171 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1172 struct drm_i915_private *dev_priv = dev->dev_private;
1173 u32 pp;
1174 u32 pp_stat_reg, pp_ctrl_reg;
1175 bool need_to_disable = !intel_dp->want_panel_vdd;
1176
1177 if (!is_edp(intel_dp))
1178 return false;
1179
1180 intel_dp->want_panel_vdd = true;
1181
1182 if (edp_have_panel_vdd(intel_dp))
1183 return need_to_disable;
1184
1185 intel_runtime_pm_get(dev_priv);
1186
1187 DRM_DEBUG_KMS("Turning eDP VDD on\n");
1188
1189 if (!edp_have_panel_power(intel_dp))
1190 wait_panel_power_cycle(intel_dp);
1191
1192 pp = ironlake_get_pp_control(intel_dp);
1193 pp |= EDP_FORCE_VDD;
1194
1195 pp_stat_reg = _pp_stat_reg(intel_dp);
1196 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1197
1198 I915_WRITE(pp_ctrl_reg, pp);
1199 POSTING_READ(pp_ctrl_reg);
1200 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1201 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1202 /*
1203 * If the panel wasn't on, delay before accessing aux channel
1204 */
1205 if (!edp_have_panel_power(intel_dp)) {
1206 DRM_DEBUG_KMS("eDP was not running\n");
1207 msleep(intel_dp->panel_power_up_delay);
1208 }
1209
1210 return need_to_disable;
1211 }
1212
1213 static void edp_panel_vdd_on(struct intel_dp *intel_dp)
1214 {
1215 if (is_edp(intel_dp)) {
1216 bool vdd = _edp_panel_vdd_on(intel_dp);
1217
1218 WARN(!vdd, "eDP VDD already requested on\n");
1219 }
1220 }
1221
1222 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1223 {
1224 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1225 struct drm_i915_private *dev_priv = dev->dev_private;
1226 u32 pp;
1227 u32 pp_stat_reg, pp_ctrl_reg;
1228
1229 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1230
1231 if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
1232 DRM_DEBUG_KMS("Turning eDP VDD off\n");
1233
1234 pp = ironlake_get_pp_control(intel_dp);
1235 pp &= ~EDP_FORCE_VDD;
1236
1237 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1238 pp_stat_reg = _pp_stat_reg(intel_dp);
1239
1240 I915_WRITE(pp_ctrl_reg, pp);
1241 POSTING_READ(pp_ctrl_reg);
1242
1243 /* Make sure sequencer is idle before allowing subsequent activity */
1244 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1245 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1246
1247 if ((pp & POWER_TARGET_ON) == 0)
1248 intel_dp->last_power_cycle = jiffies;
1249
1250 intel_runtime_pm_put(dev_priv);
1251 }
1252 }
1253
1254 static void edp_panel_vdd_work(struct work_struct *__work)
1255 {
1256 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1257 struct intel_dp, panel_vdd_work);
1258 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1259
1260 mutex_lock(&dev->mode_config.mutex);
1261 edp_panel_vdd_off_sync(intel_dp);
1262 mutex_unlock(&dev->mode_config.mutex);
1263 }
1264
1265 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1266 {
1267 if (!is_edp(intel_dp))
1268 return;
1269
1270 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1271
1272 intel_dp->want_panel_vdd = false;
1273
1274 if (sync) {
1275 edp_panel_vdd_off_sync(intel_dp);
1276 } else {
1277 /*
1278 * Queue the timer to fire a long
1279 * time from now (relative to the power down delay)
1280 * to keep the panel power up across a sequence of operations
1281 */
1282 schedule_delayed_work(&intel_dp->panel_vdd_work,
1283 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1284 }
1285 }
1286
1287 void intel_edp_panel_on(struct intel_dp *intel_dp)
1288 {
1289 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1290 struct drm_i915_private *dev_priv = dev->dev_private;
1291 u32 pp;
1292 u32 pp_ctrl_reg;
1293
1294 if (!is_edp(intel_dp))
1295 return;
1296
1297 DRM_DEBUG_KMS("Turn eDP power on\n");
1298
1299 if (edp_have_panel_power(intel_dp)) {
1300 DRM_DEBUG_KMS("eDP power already on\n");
1301 return;
1302 }
1303
1304 wait_panel_power_cycle(intel_dp);
1305
1306 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1307 pp = ironlake_get_pp_control(intel_dp);
1308 if (IS_GEN5(dev)) {
1309 /* ILK workaround: disable reset around power sequence */
1310 pp &= ~PANEL_POWER_RESET;
1311 I915_WRITE(pp_ctrl_reg, pp);
1312 POSTING_READ(pp_ctrl_reg);
1313 }
1314
1315 pp |= POWER_TARGET_ON;
1316 if (!IS_GEN5(dev))
1317 pp |= PANEL_POWER_RESET;
1318
1319 I915_WRITE(pp_ctrl_reg, pp);
1320 POSTING_READ(pp_ctrl_reg);
1321
1322 wait_panel_on(intel_dp);
1323 intel_dp->last_power_on = jiffies;
1324
1325 if (IS_GEN5(dev)) {
1326 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1327 I915_WRITE(pp_ctrl_reg, pp);
1328 POSTING_READ(pp_ctrl_reg);
1329 }
1330 }
1331
1332 void intel_edp_panel_off(struct intel_dp *intel_dp)
1333 {
1334 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1335 struct drm_i915_private *dev_priv = dev->dev_private;
1336 u32 pp;
1337 u32 pp_ctrl_reg;
1338
1339 if (!is_edp(intel_dp))
1340 return;
1341
1342 DRM_DEBUG_KMS("Turn eDP power off\n");
1343
1344 edp_wait_backlight_off(intel_dp);
1345
1346 pp = ironlake_get_pp_control(intel_dp);
1347 /* We need to switch off panel power _and_ force vdd, for otherwise some
1348 * panels get very unhappy and cease to work. */
1349 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1350 EDP_BLC_ENABLE);
1351
1352 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1353
1354 I915_WRITE(pp_ctrl_reg, pp);
1355 POSTING_READ(pp_ctrl_reg);
1356
1357 intel_dp->last_power_cycle = jiffies;
1358 wait_panel_off(intel_dp);
1359 }
1360
1361 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1362 {
1363 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1364 struct drm_device *dev = intel_dig_port->base.base.dev;
1365 struct drm_i915_private *dev_priv = dev->dev_private;
1366 u32 pp;
1367 u32 pp_ctrl_reg;
1368
1369 if (!is_edp(intel_dp))
1370 return;
1371
1372 DRM_DEBUG_KMS("\n");
1373 /*
1374 * If we enable the backlight right away following a panel power
1375 * on, we may see slight flicker as the panel syncs with the eDP
1376 * link. So delay a bit to make sure the image is solid before
1377 * allowing it to appear.
1378 */
1379 wait_backlight_on(intel_dp);
1380 pp = ironlake_get_pp_control(intel_dp);
1381 pp |= EDP_BLC_ENABLE;
1382
1383 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1384
1385 I915_WRITE(pp_ctrl_reg, pp);
1386 POSTING_READ(pp_ctrl_reg);
1387
1388 intel_panel_enable_backlight(intel_dp->attached_connector);
1389 }
1390
1391 void intel_edp_backlight_off(struct intel_dp *intel_dp)
1392 {
1393 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1394 struct drm_i915_private *dev_priv = dev->dev_private;
1395 u32 pp;
1396 u32 pp_ctrl_reg;
1397
1398 if (!is_edp(intel_dp))
1399 return;
1400
1401 intel_panel_disable_backlight(intel_dp->attached_connector);
1402
1403 DRM_DEBUG_KMS("\n");
1404 pp = ironlake_get_pp_control(intel_dp);
1405 pp &= ~EDP_BLC_ENABLE;
1406
1407 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1408
1409 I915_WRITE(pp_ctrl_reg, pp);
1410 POSTING_READ(pp_ctrl_reg);
1411 intel_dp->last_backlight_off = jiffies;
1412 }
1413
1414 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1415 {
1416 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1417 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1418 struct drm_device *dev = crtc->dev;
1419 struct drm_i915_private *dev_priv = dev->dev_private;
1420 u32 dpa_ctl;
1421
1422 assert_pipe_disabled(dev_priv,
1423 to_intel_crtc(crtc)->pipe);
1424
1425 DRM_DEBUG_KMS("\n");
1426 dpa_ctl = I915_READ(DP_A);
1427 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1428 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1429
1430 /* We don't adjust intel_dp->DP while tearing down the link, to
1431 * facilitate link retraining (e.g. after hotplug). Hence clear all
1432 * enable bits here to ensure that we don't enable too much. */
1433 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1434 intel_dp->DP |= DP_PLL_ENABLE;
1435 I915_WRITE(DP_A, intel_dp->DP);
1436 POSTING_READ(DP_A);
1437 udelay(200);
1438 }
1439
1440 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1441 {
1442 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1443 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1444 struct drm_device *dev = crtc->dev;
1445 struct drm_i915_private *dev_priv = dev->dev_private;
1446 u32 dpa_ctl;
1447
1448 assert_pipe_disabled(dev_priv,
1449 to_intel_crtc(crtc)->pipe);
1450
1451 dpa_ctl = I915_READ(DP_A);
1452 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1453 "dp pll off, should be on\n");
1454 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1455
1456 /* We can't rely on the value tracked for the DP register in
1457 * intel_dp->DP because link_down must not change that (otherwise link
1458 * re-training will fail. */
1459 dpa_ctl &= ~DP_PLL_ENABLE;
1460 I915_WRITE(DP_A, dpa_ctl);
1461 POSTING_READ(DP_A);
1462 udelay(200);
1463 }
1464
1465 /* If the sink supports it, try to set the power state appropriately */
1466 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1467 {
1468 int ret, i;
1469
1470 /* Should have a valid DPCD by this point */
1471 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1472 return;
1473
1474 if (mode != DRM_MODE_DPMS_ON) {
1475 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1476 DP_SET_POWER_D3);
1477 if (ret != 1)
1478 DRM_DEBUG_DRIVER("failed to write sink power state\n");
1479 } else {
1480 /*
1481 * When turning on, we need to retry for 1ms to give the sink
1482 * time to wake up.
1483 */
1484 for (i = 0; i < 3; i++) {
1485 ret = intel_dp_aux_native_write_1(intel_dp,
1486 DP_SET_POWER,
1487 DP_SET_POWER_D0);
1488 if (ret == 1)
1489 break;
1490 msleep(1);
1491 }
1492 }
1493 }
1494
1495 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1496 enum pipe *pipe)
1497 {
1498 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1499 enum port port = dp_to_dig_port(intel_dp)->port;
1500 struct drm_device *dev = encoder->base.dev;
1501 struct drm_i915_private *dev_priv = dev->dev_private;
1502 enum intel_display_power_domain power_domain;
1503 u32 tmp;
1504
1505 power_domain = intel_display_port_power_domain(encoder);
1506 if (!intel_display_power_enabled(dev_priv, power_domain))
1507 return false;
1508
1509 tmp = I915_READ(intel_dp->output_reg);
1510
1511 if (!(tmp & DP_PORT_EN))
1512 return false;
1513
1514 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1515 *pipe = PORT_TO_PIPE_CPT(tmp);
1516 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1517 *pipe = PORT_TO_PIPE(tmp);
1518 } else {
1519 u32 trans_sel;
1520 u32 trans_dp;
1521 int i;
1522
1523 switch (intel_dp->output_reg) {
1524 case PCH_DP_B:
1525 trans_sel = TRANS_DP_PORT_SEL_B;
1526 break;
1527 case PCH_DP_C:
1528 trans_sel = TRANS_DP_PORT_SEL_C;
1529 break;
1530 case PCH_DP_D:
1531 trans_sel = TRANS_DP_PORT_SEL_D;
1532 break;
1533 default:
1534 return true;
1535 }
1536
1537 for_each_pipe(i) {
1538 trans_dp = I915_READ(TRANS_DP_CTL(i));
1539 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1540 *pipe = i;
1541 return true;
1542 }
1543 }
1544
1545 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1546 intel_dp->output_reg);
1547 }
1548
1549 return true;
1550 }
1551
1552 static void intel_dp_get_config(struct intel_encoder *encoder,
1553 struct intel_crtc_config *pipe_config)
1554 {
1555 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1556 u32 tmp, flags = 0;
1557 struct drm_device *dev = encoder->base.dev;
1558 struct drm_i915_private *dev_priv = dev->dev_private;
1559 enum port port = dp_to_dig_port(intel_dp)->port;
1560 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1561 int dotclock;
1562
1563 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1564 tmp = I915_READ(intel_dp->output_reg);
1565 if (tmp & DP_SYNC_HS_HIGH)
1566 flags |= DRM_MODE_FLAG_PHSYNC;
1567 else
1568 flags |= DRM_MODE_FLAG_NHSYNC;
1569
1570 if (tmp & DP_SYNC_VS_HIGH)
1571 flags |= DRM_MODE_FLAG_PVSYNC;
1572 else
1573 flags |= DRM_MODE_FLAG_NVSYNC;
1574 } else {
1575 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1576 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1577 flags |= DRM_MODE_FLAG_PHSYNC;
1578 else
1579 flags |= DRM_MODE_FLAG_NHSYNC;
1580
1581 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1582 flags |= DRM_MODE_FLAG_PVSYNC;
1583 else
1584 flags |= DRM_MODE_FLAG_NVSYNC;
1585 }
1586
1587 pipe_config->adjusted_mode.flags |= flags;
1588
1589 pipe_config->has_dp_encoder = true;
1590
1591 intel_dp_get_m_n(crtc, pipe_config);
1592
1593 if (port == PORT_A) {
1594 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1595 pipe_config->port_clock = 162000;
1596 else
1597 pipe_config->port_clock = 270000;
1598 }
1599
1600 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1601 &pipe_config->dp_m_n);
1602
1603 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1604 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1605
1606 pipe_config->adjusted_mode.crtc_clock = dotclock;
1607
1608 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1609 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1610 /*
1611 * This is a big fat ugly hack.
1612 *
1613 * Some machines in UEFI boot mode provide us a VBT that has 18
1614 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
1615 * unknown we fail to light up. Yet the same BIOS boots up with
1616 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
1617 * max, not what it tells us to use.
1618 *
1619 * Note: This will still be broken if the eDP panel is not lit
1620 * up by the BIOS, and thus we can't get the mode at module
1621 * load.
1622 */
1623 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
1624 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1625 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1626 }
1627 }
1628
1629 static bool is_edp_psr(struct drm_device *dev)
1630 {
1631 struct drm_i915_private *dev_priv = dev->dev_private;
1632
1633 return dev_priv->psr.sink_support;
1634 }
1635
1636 static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1637 {
1638 struct drm_i915_private *dev_priv = dev->dev_private;
1639
1640 if (!HAS_PSR(dev))
1641 return false;
1642
1643 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1644 }
1645
1646 static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1647 struct edp_vsc_psr *vsc_psr)
1648 {
1649 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1650 struct drm_device *dev = dig_port->base.base.dev;
1651 struct drm_i915_private *dev_priv = dev->dev_private;
1652 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1653 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1654 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1655 uint32_t *data = (uint32_t *) vsc_psr;
1656 unsigned int i;
1657
1658 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
1659 the video DIP being updated before program video DIP data buffer
1660 registers for DIP being updated. */
1661 I915_WRITE(ctl_reg, 0);
1662 POSTING_READ(ctl_reg);
1663
1664 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1665 if (i < sizeof(struct edp_vsc_psr))
1666 I915_WRITE(data_reg + i, *data++);
1667 else
1668 I915_WRITE(data_reg + i, 0);
1669 }
1670
1671 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1672 POSTING_READ(ctl_reg);
1673 }
1674
1675 static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1676 {
1677 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1678 struct drm_i915_private *dev_priv = dev->dev_private;
1679 struct edp_vsc_psr psr_vsc;
1680
1681 if (intel_dp->psr_setup_done)
1682 return;
1683
1684 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1685 memset(&psr_vsc, 0, sizeof(psr_vsc));
1686 psr_vsc.sdp_header.HB0 = 0;
1687 psr_vsc.sdp_header.HB1 = 0x7;
1688 psr_vsc.sdp_header.HB2 = 0x2;
1689 psr_vsc.sdp_header.HB3 = 0x8;
1690 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1691
1692 /* Avoid continuous PSR exit by masking memup and hpd */
1693 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1694 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1695
1696 intel_dp->psr_setup_done = true;
1697 }
1698
1699 static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1700 {
1701 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1702 struct drm_i915_private *dev_priv = dev->dev_private;
1703 uint32_t aux_clock_divider;
1704 int precharge = 0x3;
1705 int msg_size = 5; /* Header(4) + Message(1) */
1706
1707 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
1708
1709 /* Enable PSR in sink */
1710 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
1711 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1712 DP_PSR_ENABLE &
1713 ~DP_PSR_MAIN_LINK_ACTIVE);
1714 else
1715 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1716 DP_PSR_ENABLE |
1717 DP_PSR_MAIN_LINK_ACTIVE);
1718
1719 /* Setup AUX registers */
1720 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1721 I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1722 I915_WRITE(EDP_PSR_AUX_CTL(dev),
1723 DP_AUX_CH_CTL_TIME_OUT_400us |
1724 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1725 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1726 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
1727 }
1728
1729 static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1730 {
1731 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1732 struct drm_i915_private *dev_priv = dev->dev_private;
1733 uint32_t max_sleep_time = 0x1f;
1734 uint32_t idle_frames = 1;
1735 uint32_t val = 0x0;
1736 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
1737
1738 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
1739 val |= EDP_PSR_LINK_STANDBY;
1740 val |= EDP_PSR_TP2_TP3_TIME_0us;
1741 val |= EDP_PSR_TP1_TIME_0us;
1742 val |= EDP_PSR_SKIP_AUX_EXIT;
1743 } else
1744 val |= EDP_PSR_LINK_DISABLE;
1745
1746 I915_WRITE(EDP_PSR_CTL(dev), val |
1747 IS_BROADWELL(dev) ? 0 : link_entry_time |
1748 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1749 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1750 EDP_PSR_ENABLE);
1751 }
1752
1753 static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1754 {
1755 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1756 struct drm_device *dev = dig_port->base.base.dev;
1757 struct drm_i915_private *dev_priv = dev->dev_private;
1758 struct drm_crtc *crtc = dig_port->base.base.crtc;
1759 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1760 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
1761 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1762
1763 dev_priv->psr.source_ok = false;
1764
1765 if (!HAS_PSR(dev)) {
1766 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1767 return false;
1768 }
1769
1770 if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1771 (dig_port->port != PORT_A)) {
1772 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1773 return false;
1774 }
1775
1776 if (!i915.enable_psr) {
1777 DRM_DEBUG_KMS("PSR disable by flag\n");
1778 return false;
1779 }
1780
1781 crtc = dig_port->base.base.crtc;
1782 if (crtc == NULL) {
1783 DRM_DEBUG_KMS("crtc not active for PSR\n");
1784 return false;
1785 }
1786
1787 intel_crtc = to_intel_crtc(crtc);
1788 if (!intel_crtc_active(crtc)) {
1789 DRM_DEBUG_KMS("crtc not active for PSR\n");
1790 return false;
1791 }
1792
1793 obj = to_intel_framebuffer(crtc->fb)->obj;
1794 if (obj->tiling_mode != I915_TILING_X ||
1795 obj->fence_reg == I915_FENCE_REG_NONE) {
1796 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1797 return false;
1798 }
1799
1800 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1801 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1802 return false;
1803 }
1804
1805 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1806 S3D_ENABLE) {
1807 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1808 return false;
1809 }
1810
1811 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1812 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1813 return false;
1814 }
1815
1816 dev_priv->psr.source_ok = true;
1817 return true;
1818 }
1819
1820 static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1821 {
1822 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1823
1824 if (!intel_edp_psr_match_conditions(intel_dp) ||
1825 intel_edp_is_psr_enabled(dev))
1826 return;
1827
1828 /* Setup PSR once */
1829 intel_edp_psr_setup(intel_dp);
1830
1831 /* Enable PSR on the panel */
1832 intel_edp_psr_enable_sink(intel_dp);
1833
1834 /* Enable PSR on the host */
1835 intel_edp_psr_enable_source(intel_dp);
1836 }
1837
1838 void intel_edp_psr_enable(struct intel_dp *intel_dp)
1839 {
1840 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1841
1842 if (intel_edp_psr_match_conditions(intel_dp) &&
1843 !intel_edp_is_psr_enabled(dev))
1844 intel_edp_psr_do_enable(intel_dp);
1845 }
1846
1847 void intel_edp_psr_disable(struct intel_dp *intel_dp)
1848 {
1849 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1850 struct drm_i915_private *dev_priv = dev->dev_private;
1851
1852 if (!intel_edp_is_psr_enabled(dev))
1853 return;
1854
1855 I915_WRITE(EDP_PSR_CTL(dev),
1856 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
1857
1858 /* Wait till PSR is idle */
1859 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1860 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1861 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1862 }
1863
1864 void intel_edp_psr_update(struct drm_device *dev)
1865 {
1866 struct intel_encoder *encoder;
1867 struct intel_dp *intel_dp = NULL;
1868
1869 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
1870 if (encoder->type == INTEL_OUTPUT_EDP) {
1871 intel_dp = enc_to_intel_dp(&encoder->base);
1872
1873 if (!is_edp_psr(dev))
1874 return;
1875
1876 if (!intel_edp_psr_match_conditions(intel_dp))
1877 intel_edp_psr_disable(intel_dp);
1878 else
1879 if (!intel_edp_is_psr_enabled(dev))
1880 intel_edp_psr_do_enable(intel_dp);
1881 }
1882 }
1883
1884 static void intel_disable_dp(struct intel_encoder *encoder)
1885 {
1886 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1887 enum port port = dp_to_dig_port(intel_dp)->port;
1888 struct drm_device *dev = encoder->base.dev;
1889
1890 /* Make sure the panel is off before trying to change the mode. But also
1891 * ensure that we have vdd while we switch off the panel. */
1892 edp_panel_vdd_on(intel_dp);
1893 intel_edp_backlight_off(intel_dp);
1894 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1895 intel_edp_panel_off(intel_dp);
1896 edp_panel_vdd_off(intel_dp, true);
1897
1898 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1899 if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
1900 intel_dp_link_down(intel_dp);
1901 }
1902
1903 static void intel_post_disable_dp(struct intel_encoder *encoder)
1904 {
1905 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1906 enum port port = dp_to_dig_port(intel_dp)->port;
1907 struct drm_device *dev = encoder->base.dev;
1908
1909 if (port == PORT_A || IS_VALLEYVIEW(dev)) {
1910 intel_dp_link_down(intel_dp);
1911 if (!IS_VALLEYVIEW(dev))
1912 ironlake_edp_pll_off(intel_dp);
1913 }
1914 }
1915
1916 static void intel_enable_dp(struct intel_encoder *encoder)
1917 {
1918 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1919 struct drm_device *dev = encoder->base.dev;
1920 struct drm_i915_private *dev_priv = dev->dev_private;
1921 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1922
1923 if (WARN_ON(dp_reg & DP_PORT_EN))
1924 return;
1925
1926 edp_panel_vdd_on(intel_dp);
1927 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1928 intel_dp_start_link_train(intel_dp);
1929 intel_edp_panel_on(intel_dp);
1930 edp_panel_vdd_off(intel_dp, true);
1931 intel_dp_complete_link_train(intel_dp);
1932 intel_dp_stop_link_train(intel_dp);
1933 }
1934
1935 static void g4x_enable_dp(struct intel_encoder *encoder)
1936 {
1937 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1938
1939 intel_enable_dp(encoder);
1940 intel_edp_backlight_on(intel_dp);
1941 }
1942
1943 static void vlv_enable_dp(struct intel_encoder *encoder)
1944 {
1945 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1946
1947 intel_edp_backlight_on(intel_dp);
1948 }
1949
1950 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
1951 {
1952 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1953 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1954
1955 if (dport->port == PORT_A)
1956 ironlake_edp_pll_on(intel_dp);
1957 }
1958
1959 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
1960 {
1961 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1962 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1963 struct drm_device *dev = encoder->base.dev;
1964 struct drm_i915_private *dev_priv = dev->dev_private;
1965 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1966 enum dpio_channel port = vlv_dport_to_channel(dport);
1967 int pipe = intel_crtc->pipe;
1968 struct edp_power_seq power_seq;
1969 u32 val;
1970
1971 mutex_lock(&dev_priv->dpio_lock);
1972
1973 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
1974 val = 0;
1975 if (pipe)
1976 val |= (1<<21);
1977 else
1978 val &= ~(1<<21);
1979 val |= 0x001000c4;
1980 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
1981 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
1982 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
1983
1984 mutex_unlock(&dev_priv->dpio_lock);
1985
1986 if (is_edp(intel_dp)) {
1987 /* init power sequencer on this pipe and port */
1988 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
1989 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
1990 &power_seq);
1991 }
1992
1993 intel_enable_dp(encoder);
1994
1995 vlv_wait_port_ready(dev_priv, dport);
1996 }
1997
1998 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
1999 {
2000 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2001 struct drm_device *dev = encoder->base.dev;
2002 struct drm_i915_private *dev_priv = dev->dev_private;
2003 struct intel_crtc *intel_crtc =
2004 to_intel_crtc(encoder->base.crtc);
2005 enum dpio_channel port = vlv_dport_to_channel(dport);
2006 int pipe = intel_crtc->pipe;
2007
2008 /* Program Tx lane resets to default */
2009 mutex_lock(&dev_priv->dpio_lock);
2010 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2011 DPIO_PCS_TX_LANE2_RESET |
2012 DPIO_PCS_TX_LANE1_RESET);
2013 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2014 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2015 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2016 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2017 DPIO_PCS_CLK_SOFT_RESET);
2018
2019 /* Fix up inter-pair skew failure */
2020 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2021 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2022 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2023 mutex_unlock(&dev_priv->dpio_lock);
2024 }
2025
2026 /*
2027 * Native read with retry for link status and receiver capability reads for
2028 * cases where the sink may still be asleep.
2029 */
2030 static bool
2031 intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
2032 uint8_t *recv, int recv_bytes)
2033 {
2034 int ret, i;
2035
2036 /*
2037 * Sinks are *supposed* to come up within 1ms from an off state,
2038 * but we're also supposed to retry 3 times per the spec.
2039 */
2040 for (i = 0; i < 3; i++) {
2041 ret = intel_dp_aux_native_read(intel_dp, address, recv,
2042 recv_bytes);
2043 if (ret == recv_bytes)
2044 return true;
2045 msleep(1);
2046 }
2047
2048 return false;
2049 }
2050
2051 /*
2052 * Fetch AUX CH registers 0x202 - 0x207 which contain
2053 * link status information
2054 */
2055 static bool
2056 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2057 {
2058 return intel_dp_aux_native_read_retry(intel_dp,
2059 DP_LANE0_1_STATUS,
2060 link_status,
2061 DP_LINK_STATUS_SIZE);
2062 }
2063
2064 /*
2065 * These are source-specific values; current Intel hardware supports
2066 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
2067 */
2068
2069 static uint8_t
2070 intel_dp_voltage_max(struct intel_dp *intel_dp)
2071 {
2072 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2073 enum port port = dp_to_dig_port(intel_dp)->port;
2074
2075 if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
2076 return DP_TRAIN_VOLTAGE_SWING_1200;
2077 else if (IS_GEN7(dev) && port == PORT_A)
2078 return DP_TRAIN_VOLTAGE_SWING_800;
2079 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2080 return DP_TRAIN_VOLTAGE_SWING_1200;
2081 else
2082 return DP_TRAIN_VOLTAGE_SWING_800;
2083 }
2084
2085 static uint8_t
2086 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2087 {
2088 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2089 enum port port = dp_to_dig_port(intel_dp)->port;
2090
2091 if (IS_BROADWELL(dev)) {
2092 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2093 case DP_TRAIN_VOLTAGE_SWING_400:
2094 case DP_TRAIN_VOLTAGE_SWING_600:
2095 return DP_TRAIN_PRE_EMPHASIS_6;
2096 case DP_TRAIN_VOLTAGE_SWING_800:
2097 return DP_TRAIN_PRE_EMPHASIS_3_5;
2098 case DP_TRAIN_VOLTAGE_SWING_1200:
2099 default:
2100 return DP_TRAIN_PRE_EMPHASIS_0;
2101 }
2102 } else if (IS_HASWELL(dev)) {
2103 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2104 case DP_TRAIN_VOLTAGE_SWING_400:
2105 return DP_TRAIN_PRE_EMPHASIS_9_5;
2106 case DP_TRAIN_VOLTAGE_SWING_600:
2107 return DP_TRAIN_PRE_EMPHASIS_6;
2108 case DP_TRAIN_VOLTAGE_SWING_800:
2109 return DP_TRAIN_PRE_EMPHASIS_3_5;
2110 case DP_TRAIN_VOLTAGE_SWING_1200:
2111 default:
2112 return DP_TRAIN_PRE_EMPHASIS_0;
2113 }
2114 } else if (IS_VALLEYVIEW(dev)) {
2115 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2116 case DP_TRAIN_VOLTAGE_SWING_400:
2117 return DP_TRAIN_PRE_EMPHASIS_9_5;
2118 case DP_TRAIN_VOLTAGE_SWING_600:
2119 return DP_TRAIN_PRE_EMPHASIS_6;
2120 case DP_TRAIN_VOLTAGE_SWING_800:
2121 return DP_TRAIN_PRE_EMPHASIS_3_5;
2122 case DP_TRAIN_VOLTAGE_SWING_1200:
2123 default:
2124 return DP_TRAIN_PRE_EMPHASIS_0;
2125 }
2126 } else if (IS_GEN7(dev) && port == PORT_A) {
2127 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2128 case DP_TRAIN_VOLTAGE_SWING_400:
2129 return DP_TRAIN_PRE_EMPHASIS_6;
2130 case DP_TRAIN_VOLTAGE_SWING_600:
2131 case DP_TRAIN_VOLTAGE_SWING_800:
2132 return DP_TRAIN_PRE_EMPHASIS_3_5;
2133 default:
2134 return DP_TRAIN_PRE_EMPHASIS_0;
2135 }
2136 } else {
2137 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2138 case DP_TRAIN_VOLTAGE_SWING_400:
2139 return DP_TRAIN_PRE_EMPHASIS_6;
2140 case DP_TRAIN_VOLTAGE_SWING_600:
2141 return DP_TRAIN_PRE_EMPHASIS_6;
2142 case DP_TRAIN_VOLTAGE_SWING_800:
2143 return DP_TRAIN_PRE_EMPHASIS_3_5;
2144 case DP_TRAIN_VOLTAGE_SWING_1200:
2145 default:
2146 return DP_TRAIN_PRE_EMPHASIS_0;
2147 }
2148 }
2149 }
2150
2151 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2152 {
2153 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2154 struct drm_i915_private *dev_priv = dev->dev_private;
2155 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2156 struct intel_crtc *intel_crtc =
2157 to_intel_crtc(dport->base.base.crtc);
2158 unsigned long demph_reg_value, preemph_reg_value,
2159 uniqtranscale_reg_value;
2160 uint8_t train_set = intel_dp->train_set[0];
2161 enum dpio_channel port = vlv_dport_to_channel(dport);
2162 int pipe = intel_crtc->pipe;
2163
2164 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2165 case DP_TRAIN_PRE_EMPHASIS_0:
2166 preemph_reg_value = 0x0004000;
2167 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2168 case DP_TRAIN_VOLTAGE_SWING_400:
2169 demph_reg_value = 0x2B405555;
2170 uniqtranscale_reg_value = 0x552AB83A;
2171 break;
2172 case DP_TRAIN_VOLTAGE_SWING_600:
2173 demph_reg_value = 0x2B404040;
2174 uniqtranscale_reg_value = 0x5548B83A;
2175 break;
2176 case DP_TRAIN_VOLTAGE_SWING_800:
2177 demph_reg_value = 0x2B245555;
2178 uniqtranscale_reg_value = 0x5560B83A;
2179 break;
2180 case DP_TRAIN_VOLTAGE_SWING_1200:
2181 demph_reg_value = 0x2B405555;
2182 uniqtranscale_reg_value = 0x5598DA3A;
2183 break;
2184 default:
2185 return 0;
2186 }
2187 break;
2188 case DP_TRAIN_PRE_EMPHASIS_3_5:
2189 preemph_reg_value = 0x0002000;
2190 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2191 case DP_TRAIN_VOLTAGE_SWING_400:
2192 demph_reg_value = 0x2B404040;
2193 uniqtranscale_reg_value = 0x5552B83A;
2194 break;
2195 case DP_TRAIN_VOLTAGE_SWING_600:
2196 demph_reg_value = 0x2B404848;
2197 uniqtranscale_reg_value = 0x5580B83A;
2198 break;
2199 case DP_TRAIN_VOLTAGE_SWING_800:
2200 demph_reg_value = 0x2B404040;
2201 uniqtranscale_reg_value = 0x55ADDA3A;
2202 break;
2203 default:
2204 return 0;
2205 }
2206 break;
2207 case DP_TRAIN_PRE_EMPHASIS_6:
2208 preemph_reg_value = 0x0000000;
2209 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2210 case DP_TRAIN_VOLTAGE_SWING_400:
2211 demph_reg_value = 0x2B305555;
2212 uniqtranscale_reg_value = 0x5570B83A;
2213 break;
2214 case DP_TRAIN_VOLTAGE_SWING_600:
2215 demph_reg_value = 0x2B2B4040;
2216 uniqtranscale_reg_value = 0x55ADDA3A;
2217 break;
2218 default:
2219 return 0;
2220 }
2221 break;
2222 case DP_TRAIN_PRE_EMPHASIS_9_5:
2223 preemph_reg_value = 0x0006000;
2224 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2225 case DP_TRAIN_VOLTAGE_SWING_400:
2226 demph_reg_value = 0x1B405555;
2227 uniqtranscale_reg_value = 0x55ADDA3A;
2228 break;
2229 default:
2230 return 0;
2231 }
2232 break;
2233 default:
2234 return 0;
2235 }
2236
2237 mutex_lock(&dev_priv->dpio_lock);
2238 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2239 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2240 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
2241 uniqtranscale_reg_value);
2242 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2243 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2244 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2245 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
2246 mutex_unlock(&dev_priv->dpio_lock);
2247
2248 return 0;
2249 }
2250
2251 static void
2252 intel_get_adjust_train(struct intel_dp *intel_dp,
2253 const uint8_t link_status[DP_LINK_STATUS_SIZE])
2254 {
2255 uint8_t v = 0;
2256 uint8_t p = 0;
2257 int lane;
2258 uint8_t voltage_max;
2259 uint8_t preemph_max;
2260
2261 for (lane = 0; lane < intel_dp->lane_count; lane++) {
2262 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
2263 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
2264
2265 if (this_v > v)
2266 v = this_v;
2267 if (this_p > p)
2268 p = this_p;
2269 }
2270
2271 voltage_max = intel_dp_voltage_max(intel_dp);
2272 if (v >= voltage_max)
2273 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
2274
2275 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
2276 if (p >= preemph_max)
2277 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
2278
2279 for (lane = 0; lane < 4; lane++)
2280 intel_dp->train_set[lane] = v | p;
2281 }
2282
2283 static uint32_t
2284 intel_gen4_signal_levels(uint8_t train_set)
2285 {
2286 uint32_t signal_levels = 0;
2287
2288 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2289 case DP_TRAIN_VOLTAGE_SWING_400:
2290 default:
2291 signal_levels |= DP_VOLTAGE_0_4;
2292 break;
2293 case DP_TRAIN_VOLTAGE_SWING_600:
2294 signal_levels |= DP_VOLTAGE_0_6;
2295 break;
2296 case DP_TRAIN_VOLTAGE_SWING_800:
2297 signal_levels |= DP_VOLTAGE_0_8;
2298 break;
2299 case DP_TRAIN_VOLTAGE_SWING_1200:
2300 signal_levels |= DP_VOLTAGE_1_2;
2301 break;
2302 }
2303 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2304 case DP_TRAIN_PRE_EMPHASIS_0:
2305 default:
2306 signal_levels |= DP_PRE_EMPHASIS_0;
2307 break;
2308 case DP_TRAIN_PRE_EMPHASIS_3_5:
2309 signal_levels |= DP_PRE_EMPHASIS_3_5;
2310 break;
2311 case DP_TRAIN_PRE_EMPHASIS_6:
2312 signal_levels |= DP_PRE_EMPHASIS_6;
2313 break;
2314 case DP_TRAIN_PRE_EMPHASIS_9_5:
2315 signal_levels |= DP_PRE_EMPHASIS_9_5;
2316 break;
2317 }
2318 return signal_levels;
2319 }
2320
2321 /* Gen6's DP voltage swing and pre-emphasis control */
2322 static uint32_t
2323 intel_gen6_edp_signal_levels(uint8_t train_set)
2324 {
2325 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2326 DP_TRAIN_PRE_EMPHASIS_MASK);
2327 switch (signal_levels) {
2328 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2329 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2330 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2331 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2332 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
2333 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2334 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2335 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
2336 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2337 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2338 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
2339 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2340 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2341 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
2342 default:
2343 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2344 "0x%x\n", signal_levels);
2345 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2346 }
2347 }
2348
2349 /* Gen7's DP voltage swing and pre-emphasis control */
2350 static uint32_t
2351 intel_gen7_edp_signal_levels(uint8_t train_set)
2352 {
2353 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2354 DP_TRAIN_PRE_EMPHASIS_MASK);
2355 switch (signal_levels) {
2356 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2357 return EDP_LINK_TRAIN_400MV_0DB_IVB;
2358 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2359 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
2360 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2361 return EDP_LINK_TRAIN_400MV_6DB_IVB;
2362
2363 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2364 return EDP_LINK_TRAIN_600MV_0DB_IVB;
2365 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2366 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
2367
2368 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2369 return EDP_LINK_TRAIN_800MV_0DB_IVB;
2370 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2371 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
2372
2373 default:
2374 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2375 "0x%x\n", signal_levels);
2376 return EDP_LINK_TRAIN_500MV_0DB_IVB;
2377 }
2378 }
2379
2380 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
2381 static uint32_t
2382 intel_hsw_signal_levels(uint8_t train_set)
2383 {
2384 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2385 DP_TRAIN_PRE_EMPHASIS_MASK);
2386 switch (signal_levels) {
2387 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2388 return DDI_BUF_EMP_400MV_0DB_HSW;
2389 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2390 return DDI_BUF_EMP_400MV_3_5DB_HSW;
2391 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2392 return DDI_BUF_EMP_400MV_6DB_HSW;
2393 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
2394 return DDI_BUF_EMP_400MV_9_5DB_HSW;
2395
2396 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2397 return DDI_BUF_EMP_600MV_0DB_HSW;
2398 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2399 return DDI_BUF_EMP_600MV_3_5DB_HSW;
2400 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2401 return DDI_BUF_EMP_600MV_6DB_HSW;
2402
2403 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2404 return DDI_BUF_EMP_800MV_0DB_HSW;
2405 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2406 return DDI_BUF_EMP_800MV_3_5DB_HSW;
2407 default:
2408 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2409 "0x%x\n", signal_levels);
2410 return DDI_BUF_EMP_400MV_0DB_HSW;
2411 }
2412 }
2413
2414 static uint32_t
2415 intel_bdw_signal_levels(uint8_t train_set)
2416 {
2417 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2418 DP_TRAIN_PRE_EMPHASIS_MASK);
2419 switch (signal_levels) {
2420 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2421 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2422 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2423 return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
2424 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2425 return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
2426
2427 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2428 return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
2429 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2430 return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
2431 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2432 return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
2433
2434 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2435 return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
2436 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2437 return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
2438
2439 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2440 return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
2441
2442 default:
2443 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2444 "0x%x\n", signal_levels);
2445 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2446 }
2447 }
2448
2449 /* Properly updates "DP" with the correct signal levels. */
2450 static void
2451 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2452 {
2453 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2454 enum port port = intel_dig_port->port;
2455 struct drm_device *dev = intel_dig_port->base.base.dev;
2456 uint32_t signal_levels, mask;
2457 uint8_t train_set = intel_dp->train_set[0];
2458
2459 if (IS_BROADWELL(dev)) {
2460 signal_levels = intel_bdw_signal_levels(train_set);
2461 mask = DDI_BUF_EMP_MASK;
2462 } else if (IS_HASWELL(dev)) {
2463 signal_levels = intel_hsw_signal_levels(train_set);
2464 mask = DDI_BUF_EMP_MASK;
2465 } else if (IS_VALLEYVIEW(dev)) {
2466 signal_levels = intel_vlv_signal_levels(intel_dp);
2467 mask = 0;
2468 } else if (IS_GEN7(dev) && port == PORT_A) {
2469 signal_levels = intel_gen7_edp_signal_levels(train_set);
2470 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
2471 } else if (IS_GEN6(dev) && port == PORT_A) {
2472 signal_levels = intel_gen6_edp_signal_levels(train_set);
2473 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
2474 } else {
2475 signal_levels = intel_gen4_signal_levels(train_set);
2476 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
2477 }
2478
2479 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
2480
2481 *DP = (*DP & ~mask) | signal_levels;
2482 }
2483
2484 static bool
2485 intel_dp_set_link_train(struct intel_dp *intel_dp,
2486 uint32_t *DP,
2487 uint8_t dp_train_pat)
2488 {
2489 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2490 struct drm_device *dev = intel_dig_port->base.base.dev;
2491 struct drm_i915_private *dev_priv = dev->dev_private;
2492 enum port port = intel_dig_port->port;
2493 uint8_t buf[sizeof(intel_dp->train_set) + 1];
2494 int ret, len;
2495
2496 if (HAS_DDI(dev)) {
2497 uint32_t temp = I915_READ(DP_TP_CTL(port));
2498
2499 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2500 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2501 else
2502 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2503
2504 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2505 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2506 case DP_TRAINING_PATTERN_DISABLE:
2507 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2508
2509 break;
2510 case DP_TRAINING_PATTERN_1:
2511 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2512 break;
2513 case DP_TRAINING_PATTERN_2:
2514 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2515 break;
2516 case DP_TRAINING_PATTERN_3:
2517 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2518 break;
2519 }
2520 I915_WRITE(DP_TP_CTL(port), temp);
2521
2522 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2523 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2524
2525 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2526 case DP_TRAINING_PATTERN_DISABLE:
2527 *DP |= DP_LINK_TRAIN_OFF_CPT;
2528 break;
2529 case DP_TRAINING_PATTERN_1:
2530 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2531 break;
2532 case DP_TRAINING_PATTERN_2:
2533 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2534 break;
2535 case DP_TRAINING_PATTERN_3:
2536 DRM_ERROR("DP training pattern 3 not supported\n");
2537 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2538 break;
2539 }
2540
2541 } else {
2542 *DP &= ~DP_LINK_TRAIN_MASK;
2543
2544 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2545 case DP_TRAINING_PATTERN_DISABLE:
2546 *DP |= DP_LINK_TRAIN_OFF;
2547 break;
2548 case DP_TRAINING_PATTERN_1:
2549 *DP |= DP_LINK_TRAIN_PAT_1;
2550 break;
2551 case DP_TRAINING_PATTERN_2:
2552 *DP |= DP_LINK_TRAIN_PAT_2;
2553 break;
2554 case DP_TRAINING_PATTERN_3:
2555 DRM_ERROR("DP training pattern 3 not supported\n");
2556 *DP |= DP_LINK_TRAIN_PAT_2;
2557 break;
2558 }
2559 }
2560
2561 I915_WRITE(intel_dp->output_reg, *DP);
2562 POSTING_READ(intel_dp->output_reg);
2563
2564 buf[0] = dp_train_pat;
2565 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
2566 DP_TRAINING_PATTERN_DISABLE) {
2567 /* don't write DP_TRAINING_LANEx_SET on disable */
2568 len = 1;
2569 } else {
2570 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
2571 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
2572 len = intel_dp->lane_count + 1;
2573 }
2574
2575 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
2576 buf, len);
2577
2578 return ret == len;
2579 }
2580
2581 static bool
2582 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2583 uint8_t dp_train_pat)
2584 {
2585 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
2586 intel_dp_set_signal_levels(intel_dp, DP);
2587 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2588 }
2589
2590 static bool
2591 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2592 const uint8_t link_status[DP_LINK_STATUS_SIZE])
2593 {
2594 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2595 struct drm_device *dev = intel_dig_port->base.base.dev;
2596 struct drm_i915_private *dev_priv = dev->dev_private;
2597 int ret;
2598
2599 intel_get_adjust_train(intel_dp, link_status);
2600 intel_dp_set_signal_levels(intel_dp, DP);
2601
2602 I915_WRITE(intel_dp->output_reg, *DP);
2603 POSTING_READ(intel_dp->output_reg);
2604
2605 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
2606 intel_dp->train_set,
2607 intel_dp->lane_count);
2608
2609 return ret == intel_dp->lane_count;
2610 }
2611
2612 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2613 {
2614 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2615 struct drm_device *dev = intel_dig_port->base.base.dev;
2616 struct drm_i915_private *dev_priv = dev->dev_private;
2617 enum port port = intel_dig_port->port;
2618 uint32_t val;
2619
2620 if (!HAS_DDI(dev))
2621 return;
2622
2623 val = I915_READ(DP_TP_CTL(port));
2624 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2625 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
2626 I915_WRITE(DP_TP_CTL(port), val);
2627
2628 /*
2629 * On PORT_A we can have only eDP in SST mode. There the only reason
2630 * we need to set idle transmission mode is to work around a HW issue
2631 * where we enable the pipe while not in idle link-training mode.
2632 * In this case there is requirement to wait for a minimum number of
2633 * idle patterns to be sent.
2634 */
2635 if (port == PORT_A)
2636 return;
2637
2638 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
2639 1))
2640 DRM_ERROR("Timed out waiting for DP idle patterns\n");
2641 }
2642
2643 /* Enable corresponding port and start training pattern 1 */
2644 void
2645 intel_dp_start_link_train(struct intel_dp *intel_dp)
2646 {
2647 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
2648 struct drm_device *dev = encoder->dev;
2649 int i;
2650 uint8_t voltage;
2651 int voltage_tries, loop_tries;
2652 uint32_t DP = intel_dp->DP;
2653 uint8_t link_config[2];
2654
2655 if (HAS_DDI(dev))
2656 intel_ddi_prepare_link_retrain(encoder);
2657
2658 /* Write the link configuration data */
2659 link_config[0] = intel_dp->link_bw;
2660 link_config[1] = intel_dp->lane_count;
2661 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2662 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
2663 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
2664
2665 link_config[0] = 0;
2666 link_config[1] = DP_SET_ANSI_8B10B;
2667 intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
2668
2669 DP |= DP_PORT_EN;
2670
2671 /* clock recovery */
2672 if (!intel_dp_reset_link_train(intel_dp, &DP,
2673 DP_TRAINING_PATTERN_1 |
2674 DP_LINK_SCRAMBLING_DISABLE)) {
2675 DRM_ERROR("failed to enable link training\n");
2676 return;
2677 }
2678
2679 voltage = 0xff;
2680 voltage_tries = 0;
2681 loop_tries = 0;
2682 for (;;) {
2683 uint8_t link_status[DP_LINK_STATUS_SIZE];
2684
2685 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2686 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2687 DRM_ERROR("failed to get link status\n");
2688 break;
2689 }
2690
2691 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2692 DRM_DEBUG_KMS("clock recovery OK\n");
2693 break;
2694 }
2695
2696 /* Check to see if we've tried the max voltage */
2697 for (i = 0; i < intel_dp->lane_count; i++)
2698 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
2699 break;
2700 if (i == intel_dp->lane_count) {
2701 ++loop_tries;
2702 if (loop_tries == 5) {
2703 DRM_ERROR("too many full retries, give up\n");
2704 break;
2705 }
2706 intel_dp_reset_link_train(intel_dp, &DP,
2707 DP_TRAINING_PATTERN_1 |
2708 DP_LINK_SCRAMBLING_DISABLE);
2709 voltage_tries = 0;
2710 continue;
2711 }
2712
2713 /* Check to see if we've tried the same voltage 5 times */
2714 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2715 ++voltage_tries;
2716 if (voltage_tries == 5) {
2717 DRM_ERROR("too many voltage retries, give up\n");
2718 break;
2719 }
2720 } else
2721 voltage_tries = 0;
2722 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
2723
2724 /* Update training set as requested by target */
2725 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2726 DRM_ERROR("failed to update link training\n");
2727 break;
2728 }
2729 }
2730
2731 intel_dp->DP = DP;
2732 }
2733
2734 void
2735 intel_dp_complete_link_train(struct intel_dp *intel_dp)
2736 {
2737 bool channel_eq = false;
2738 int tries, cr_tries;
2739 uint32_t DP = intel_dp->DP;
2740 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
2741
2742 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
2743 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
2744 training_pattern = DP_TRAINING_PATTERN_3;
2745
2746 /* channel equalization */
2747 if (!intel_dp_set_link_train(intel_dp, &DP,
2748 training_pattern |
2749 DP_LINK_SCRAMBLING_DISABLE)) {
2750 DRM_ERROR("failed to start channel equalization\n");
2751 return;
2752 }
2753
2754 tries = 0;
2755 cr_tries = 0;
2756 channel_eq = false;
2757 for (;;) {
2758 uint8_t link_status[DP_LINK_STATUS_SIZE];
2759
2760 if (cr_tries > 5) {
2761 DRM_ERROR("failed to train DP, aborting\n");
2762 break;
2763 }
2764
2765 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
2766 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2767 DRM_ERROR("failed to get link status\n");
2768 break;
2769 }
2770
2771 /* Make sure clock is still ok */
2772 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2773 intel_dp_start_link_train(intel_dp);
2774 intel_dp_set_link_train(intel_dp, &DP,
2775 training_pattern |
2776 DP_LINK_SCRAMBLING_DISABLE);
2777 cr_tries++;
2778 continue;
2779 }
2780
2781 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2782 channel_eq = true;
2783 break;
2784 }
2785
2786 /* Try 5 times, then try clock recovery if that fails */
2787 if (tries > 5) {
2788 intel_dp_link_down(intel_dp);
2789 intel_dp_start_link_train(intel_dp);
2790 intel_dp_set_link_train(intel_dp, &DP,
2791 training_pattern |
2792 DP_LINK_SCRAMBLING_DISABLE);
2793 tries = 0;
2794 cr_tries++;
2795 continue;
2796 }
2797
2798 /* Update training set as requested by target */
2799 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2800 DRM_ERROR("failed to update link training\n");
2801 break;
2802 }
2803 ++tries;
2804 }
2805
2806 intel_dp_set_idle_link_train(intel_dp);
2807
2808 intel_dp->DP = DP;
2809
2810 if (channel_eq)
2811 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
2812
2813 }
2814
2815 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2816 {
2817 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2818 DP_TRAINING_PATTERN_DISABLE);
2819 }
2820
2821 static void
2822 intel_dp_link_down(struct intel_dp *intel_dp)
2823 {
2824 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2825 enum port port = intel_dig_port->port;
2826 struct drm_device *dev = intel_dig_port->base.base.dev;
2827 struct drm_i915_private *dev_priv = dev->dev_private;
2828 struct intel_crtc *intel_crtc =
2829 to_intel_crtc(intel_dig_port->base.base.crtc);
2830 uint32_t DP = intel_dp->DP;
2831
2832 /*
2833 * DDI code has a strict mode set sequence and we should try to respect
2834 * it, otherwise we might hang the machine in many different ways. So we
2835 * really should be disabling the port only on a complete crtc_disable
2836 * sequence. This function is just called under two conditions on DDI
2837 * code:
2838 * - Link train failed while doing crtc_enable, and on this case we
2839 * really should respect the mode set sequence and wait for a
2840 * crtc_disable.
2841 * - Someone turned the monitor off and intel_dp_check_link_status
2842 * called us. We don't need to disable the whole port on this case, so
2843 * when someone turns the monitor on again,
2844 * intel_ddi_prepare_link_retrain will take care of redoing the link
2845 * train.
2846 */
2847 if (HAS_DDI(dev))
2848 return;
2849
2850 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2851 return;
2852
2853 DRM_DEBUG_KMS("\n");
2854
2855 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2856 DP &= ~DP_LINK_TRAIN_MASK_CPT;
2857 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
2858 } else {
2859 DP &= ~DP_LINK_TRAIN_MASK;
2860 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
2861 }
2862 POSTING_READ(intel_dp->output_reg);
2863
2864 /* We don't really know why we're doing this */
2865 intel_wait_for_vblank(dev, intel_crtc->pipe);
2866
2867 if (HAS_PCH_IBX(dev) &&
2868 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
2869 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2870
2871 /* Hardware workaround: leaving our transcoder select
2872 * set to transcoder B while it's off will prevent the
2873 * corresponding HDMI output on transcoder A.
2874 *
2875 * Combine this with another hardware workaround:
2876 * transcoder select bit can only be cleared while the
2877 * port is enabled.
2878 */
2879 DP &= ~DP_PIPEB_SELECT;
2880 I915_WRITE(intel_dp->output_reg, DP);
2881
2882 /* Changes to enable or select take place the vblank
2883 * after being written.
2884 */
2885 if (WARN_ON(crtc == NULL)) {
2886 /* We should never try to disable a port without a crtc
2887 * attached. For paranoia keep the code around for a
2888 * bit. */
2889 POSTING_READ(intel_dp->output_reg);
2890 msleep(50);
2891 } else
2892 intel_wait_for_vblank(dev, intel_crtc->pipe);
2893 }
2894
2895 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
2896 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2897 POSTING_READ(intel_dp->output_reg);
2898 msleep(intel_dp->panel_power_down_delay);
2899 }
2900
2901 static bool
2902 intel_dp_get_dpcd(struct intel_dp *intel_dp)
2903 {
2904 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2905 struct drm_device *dev = dig_port->base.base.dev;
2906 struct drm_i915_private *dev_priv = dev->dev_private;
2907
2908 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2909
2910 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
2911 sizeof(intel_dp->dpcd)) == 0)
2912 return false; /* aux transfer failed */
2913
2914 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2915 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2916 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2917
2918 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2919 return false; /* DPCD not present */
2920
2921 /* Check if the panel supports PSR */
2922 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
2923 if (is_edp(intel_dp)) {
2924 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
2925 intel_dp->psr_dpcd,
2926 sizeof(intel_dp->psr_dpcd));
2927 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
2928 dev_priv->psr.sink_support = true;
2929 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
2930 }
2931 }
2932
2933 /* Training Pattern 3 support */
2934 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
2935 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
2936 intel_dp->use_tps3 = true;
2937 DRM_DEBUG_KMS("Displayport TPS3 supported");
2938 } else
2939 intel_dp->use_tps3 = false;
2940
2941 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2942 DP_DWN_STRM_PORT_PRESENT))
2943 return true; /* native DP sink */
2944
2945 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2946 return true; /* no per-port downstream info */
2947
2948 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2949 intel_dp->downstream_ports,
2950 DP_MAX_DOWNSTREAM_PORTS) == 0)
2951 return false; /* downstream port status fetch failed */
2952
2953 return true;
2954 }
2955
2956 static void
2957 intel_dp_probe_oui(struct intel_dp *intel_dp)
2958 {
2959 u8 buf[3];
2960
2961 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2962 return;
2963
2964 edp_panel_vdd_on(intel_dp);
2965
2966 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2967 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2968 buf[0], buf[1], buf[2]);
2969
2970 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2971 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2972 buf[0], buf[1], buf[2]);
2973
2974 edp_panel_vdd_off(intel_dp, false);
2975 }
2976
2977 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
2978 {
2979 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2980 struct drm_device *dev = intel_dig_port->base.base.dev;
2981 struct intel_crtc *intel_crtc =
2982 to_intel_crtc(intel_dig_port->base.base.crtc);
2983 u8 buf[1];
2984
2985 if (!intel_dp_aux_native_read(intel_dp, DP_TEST_SINK_MISC, buf, 1))
2986 return -EAGAIN;
2987
2988 if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
2989 return -ENOTTY;
2990
2991 if (!intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK,
2992 DP_TEST_SINK_START))
2993 return -EAGAIN;
2994
2995 /* Wait 2 vblanks to be sure we will have the correct CRC value */
2996 intel_wait_for_vblank(dev, intel_crtc->pipe);
2997 intel_wait_for_vblank(dev, intel_crtc->pipe);
2998
2999 if (!intel_dp_aux_native_read(intel_dp, DP_TEST_CRC_R_CR, crc, 6))
3000 return -EAGAIN;
3001
3002 intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 0);
3003 return 0;
3004 }
3005
3006 static bool
3007 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3008 {
3009 int ret;
3010
3011 ret = intel_dp_aux_native_read_retry(intel_dp,
3012 DP_DEVICE_SERVICE_IRQ_VECTOR,
3013 sink_irq_vector, 1);
3014 if (!ret)
3015 return false;
3016
3017 return true;
3018 }
3019
3020 static void
3021 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3022 {
3023 /* NAK by default */
3024 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
3025 }
3026
3027 /*
3028 * According to DP spec
3029 * 5.1.2:
3030 * 1. Read DPCD
3031 * 2. Configure link according to Receiver Capabilities
3032 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
3033 * 4. Check link status on receipt of hot-plug interrupt
3034 */
3035
3036 void
3037 intel_dp_check_link_status(struct intel_dp *intel_dp)
3038 {
3039 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3040 u8 sink_irq_vector;
3041 u8 link_status[DP_LINK_STATUS_SIZE];
3042
3043 if (!intel_encoder->connectors_active)
3044 return;
3045
3046 if (WARN_ON(!intel_encoder->base.crtc))
3047 return;
3048
3049 /* Try to read receiver status if the link appears to be up */
3050 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3051 return;
3052 }
3053
3054 /* Now read the DPCD to see if it's actually running */
3055 if (!intel_dp_get_dpcd(intel_dp)) {
3056 return;
3057 }
3058
3059 /* Try to read the source of the interrupt */
3060 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3061 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3062 /* Clear interrupt source */
3063 intel_dp_aux_native_write_1(intel_dp,
3064 DP_DEVICE_SERVICE_IRQ_VECTOR,
3065 sink_irq_vector);
3066
3067 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3068 intel_dp_handle_test_request(intel_dp);
3069 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3070 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3071 }
3072
3073 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3074 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
3075 drm_get_encoder_name(&intel_encoder->base));
3076 intel_dp_start_link_train(intel_dp);
3077 intel_dp_complete_link_train(intel_dp);
3078 intel_dp_stop_link_train(intel_dp);
3079 }
3080 }
3081
3082 /* XXX this is probably wrong for multiple downstream ports */
3083 static enum drm_connector_status
3084 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
3085 {
3086 uint8_t *dpcd = intel_dp->dpcd;
3087 uint8_t type;
3088
3089 if (!intel_dp_get_dpcd(intel_dp))
3090 return connector_status_disconnected;
3091
3092 /* if there's no downstream port, we're done */
3093 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
3094 return connector_status_connected;
3095
3096 /* If we're HPD-aware, SINK_COUNT changes dynamically */
3097 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3098 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
3099 uint8_t reg;
3100 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
3101 &reg, 1))
3102 return connector_status_unknown;
3103 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
3104 : connector_status_disconnected;
3105 }
3106
3107 /* If no HPD, poke DDC gently */
3108 if (drm_probe_ddc(&intel_dp->adapter))
3109 return connector_status_connected;
3110
3111 /* Well we tried, say unknown for unreliable port types */
3112 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
3113 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
3114 if (type == DP_DS_PORT_TYPE_VGA ||
3115 type == DP_DS_PORT_TYPE_NON_EDID)
3116 return connector_status_unknown;
3117 } else {
3118 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3119 DP_DWN_STRM_PORT_TYPE_MASK;
3120 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
3121 type == DP_DWN_STRM_PORT_TYPE_OTHER)
3122 return connector_status_unknown;
3123 }
3124
3125 /* Anything else is out of spec, warn and ignore */
3126 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
3127 return connector_status_disconnected;
3128 }
3129
3130 static enum drm_connector_status
3131 ironlake_dp_detect(struct intel_dp *intel_dp)
3132 {
3133 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3134 struct drm_i915_private *dev_priv = dev->dev_private;
3135 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3136 enum drm_connector_status status;
3137
3138 /* Can't disconnect eDP, but you can close the lid... */
3139 if (is_edp(intel_dp)) {
3140 status = intel_panel_detect(dev);
3141 if (status == connector_status_unknown)
3142 status = connector_status_connected;
3143 return status;
3144 }
3145
3146 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
3147 return connector_status_disconnected;
3148
3149 return intel_dp_detect_dpcd(intel_dp);
3150 }
3151
3152 static enum drm_connector_status
3153 g4x_dp_detect(struct intel_dp *intel_dp)
3154 {
3155 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3156 struct drm_i915_private *dev_priv = dev->dev_private;
3157 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3158 uint32_t bit;
3159
3160 /* Can't disconnect eDP, but you can close the lid... */
3161 if (is_edp(intel_dp)) {
3162 enum drm_connector_status status;
3163
3164 status = intel_panel_detect(dev);
3165 if (status == connector_status_unknown)
3166 status = connector_status_connected;
3167 return status;
3168 }
3169
3170 if (IS_VALLEYVIEW(dev)) {
3171 switch (intel_dig_port->port) {
3172 case PORT_B:
3173 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
3174 break;
3175 case PORT_C:
3176 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
3177 break;
3178 case PORT_D:
3179 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
3180 break;
3181 default:
3182 return connector_status_unknown;
3183 }
3184 } else {
3185 switch (intel_dig_port->port) {
3186 case PORT_B:
3187 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
3188 break;
3189 case PORT_C:
3190 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
3191 break;
3192 case PORT_D:
3193 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
3194 break;
3195 default:
3196 return connector_status_unknown;
3197 }
3198 }
3199
3200 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
3201 return connector_status_disconnected;
3202
3203 return intel_dp_detect_dpcd(intel_dp);
3204 }
3205
3206 static struct edid *
3207 intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
3208 {
3209 struct intel_connector *intel_connector = to_intel_connector(connector);
3210
3211 /* use cached edid if we have one */
3212 if (intel_connector->edid) {
3213 /* invalid edid */
3214 if (IS_ERR(intel_connector->edid))
3215 return NULL;
3216
3217 return drm_edid_duplicate(intel_connector->edid);
3218 }
3219
3220 return drm_get_edid(connector, adapter);
3221 }
3222
3223 static int
3224 intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
3225 {
3226 struct intel_connector *intel_connector = to_intel_connector(connector);
3227
3228 /* use cached edid if we have one */
3229 if (intel_connector->edid) {
3230 /* invalid edid */
3231 if (IS_ERR(intel_connector->edid))
3232 return 0;
3233
3234 return intel_connector_update_modes(connector,
3235 intel_connector->edid);
3236 }
3237
3238 return intel_ddc_get_modes(connector, adapter);
3239 }
3240
3241 static enum drm_connector_status
3242 intel_dp_detect(struct drm_connector *connector, bool force)
3243 {
3244 struct intel_dp *intel_dp = intel_attached_dp(connector);
3245 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3246 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3247 struct drm_device *dev = connector->dev;
3248 struct drm_i915_private *dev_priv = dev->dev_private;
3249 enum drm_connector_status status;
3250 enum intel_display_power_domain power_domain;
3251 struct edid *edid = NULL;
3252
3253 intel_runtime_pm_get(dev_priv);
3254
3255 power_domain = intel_display_port_power_domain(intel_encoder);
3256 intel_display_power_get(dev_priv, power_domain);
3257
3258 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3259 connector->base.id, drm_get_connector_name(connector));
3260
3261 intel_dp->has_audio = false;
3262
3263 if (HAS_PCH_SPLIT(dev))
3264 status = ironlake_dp_detect(intel_dp);
3265 else
3266 status = g4x_dp_detect(intel_dp);
3267
3268 if (status != connector_status_connected)
3269 goto out;
3270
3271 intel_dp_probe_oui(intel_dp);
3272
3273 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
3274 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
3275 } else {
3276 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
3277 if (edid) {
3278 intel_dp->has_audio = drm_detect_monitor_audio(edid);
3279 kfree(edid);
3280 }
3281 }
3282
3283 if (intel_encoder->type != INTEL_OUTPUT_EDP)
3284 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3285 status = connector_status_connected;
3286
3287 out:
3288 intel_display_power_put(dev_priv, power_domain);
3289
3290 intel_runtime_pm_put(dev_priv);
3291
3292 return status;
3293 }
3294
3295 static int intel_dp_get_modes(struct drm_connector *connector)
3296 {
3297 struct intel_dp *intel_dp = intel_attached_dp(connector);
3298 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3299 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3300 struct intel_connector *intel_connector = to_intel_connector(connector);
3301 struct drm_device *dev = connector->dev;
3302 struct drm_i915_private *dev_priv = dev->dev_private;
3303 enum intel_display_power_domain power_domain;
3304 int ret;
3305
3306 /* We should parse the EDID data and find out if it has an audio sink
3307 */
3308
3309 power_domain = intel_display_port_power_domain(intel_encoder);
3310 intel_display_power_get(dev_priv, power_domain);
3311
3312 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
3313 intel_display_power_put(dev_priv, power_domain);
3314 if (ret)
3315 return ret;
3316
3317 /* if eDP has no EDID, fall back to fixed mode */
3318 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
3319 struct drm_display_mode *mode;
3320 mode = drm_mode_duplicate(dev,
3321 intel_connector->panel.fixed_mode);
3322 if (mode) {
3323 drm_mode_probed_add(connector, mode);
3324 return 1;
3325 }
3326 }
3327 return 0;
3328 }
3329
3330 static bool
3331 intel_dp_detect_audio(struct drm_connector *connector)
3332 {
3333 struct intel_dp *intel_dp = intel_attached_dp(connector);
3334 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3335 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3336 struct drm_device *dev = connector->dev;
3337 struct drm_i915_private *dev_priv = dev->dev_private;
3338 enum intel_display_power_domain power_domain;
3339 struct edid *edid;
3340 bool has_audio = false;
3341
3342 power_domain = intel_display_port_power_domain(intel_encoder);
3343 intel_display_power_get(dev_priv, power_domain);
3344
3345 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
3346 if (edid) {
3347 has_audio = drm_detect_monitor_audio(edid);
3348 kfree(edid);
3349 }
3350
3351 intel_display_power_put(dev_priv, power_domain);
3352
3353 return has_audio;
3354 }
3355
3356 static int
3357 intel_dp_set_property(struct drm_connector *connector,
3358 struct drm_property *property,
3359 uint64_t val)
3360 {
3361 struct drm_i915_private *dev_priv = connector->dev->dev_private;
3362 struct intel_connector *intel_connector = to_intel_connector(connector);
3363 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
3364 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3365 int ret;
3366
3367 ret = drm_object_property_set_value(&connector->base, property, val);
3368 if (ret)
3369 return ret;
3370
3371 if (property == dev_priv->force_audio_property) {
3372 int i = val;
3373 bool has_audio;
3374
3375 if (i == intel_dp->force_audio)
3376 return 0;
3377
3378 intel_dp->force_audio = i;
3379
3380 if (i == HDMI_AUDIO_AUTO)
3381 has_audio = intel_dp_detect_audio(connector);
3382 else
3383 has_audio = (i == HDMI_AUDIO_ON);
3384
3385 if (has_audio == intel_dp->has_audio)
3386 return 0;
3387
3388 intel_dp->has_audio = has_audio;
3389 goto done;
3390 }
3391
3392 if (property == dev_priv->broadcast_rgb_property) {
3393 bool old_auto = intel_dp->color_range_auto;
3394 uint32_t old_range = intel_dp->color_range;
3395
3396 switch (val) {
3397 case INTEL_BROADCAST_RGB_AUTO:
3398 intel_dp->color_range_auto = true;
3399 break;
3400 case INTEL_BROADCAST_RGB_FULL:
3401 intel_dp->color_range_auto = false;
3402 intel_dp->color_range = 0;
3403 break;
3404 case INTEL_BROADCAST_RGB_LIMITED:
3405 intel_dp->color_range_auto = false;
3406 intel_dp->color_range = DP_COLOR_RANGE_16_235;
3407 break;
3408 default:
3409 return -EINVAL;
3410 }
3411
3412 if (old_auto == intel_dp->color_range_auto &&
3413 old_range == intel_dp->color_range)
3414 return 0;
3415
3416 goto done;
3417 }
3418
3419 if (is_edp(intel_dp) &&
3420 property == connector->dev->mode_config.scaling_mode_property) {
3421 if (val == DRM_MODE_SCALE_NONE) {
3422 DRM_DEBUG_KMS("no scaling not supported\n");
3423 return -EINVAL;
3424 }
3425
3426 if (intel_connector->panel.fitting_mode == val) {
3427 /* the eDP scaling property is not changed */
3428 return 0;
3429 }
3430 intel_connector->panel.fitting_mode = val;
3431
3432 goto done;
3433 }
3434
3435 return -EINVAL;
3436
3437 done:
3438 if (intel_encoder->base.crtc)
3439 intel_crtc_restore_mode(intel_encoder->base.crtc);
3440
3441 return 0;
3442 }
3443
3444 static void
3445 intel_dp_connector_destroy(struct drm_connector *connector)
3446 {
3447 struct intel_connector *intel_connector = to_intel_connector(connector);
3448
3449 if (!IS_ERR_OR_NULL(intel_connector->edid))
3450 kfree(intel_connector->edid);
3451
3452 /* Can't call is_edp() since the encoder may have been destroyed
3453 * already. */
3454 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3455 intel_panel_fini(&intel_connector->panel);
3456
3457 drm_connector_cleanup(connector);
3458 kfree(connector);
3459 }
3460
3461 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
3462 {
3463 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
3464 struct intel_dp *intel_dp = &intel_dig_port->dp;
3465 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3466
3467 i2c_del_adapter(&intel_dp->adapter);
3468 drm_encoder_cleanup(encoder);
3469 if (is_edp(intel_dp)) {
3470 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3471 mutex_lock(&dev->mode_config.mutex);
3472 edp_panel_vdd_off_sync(intel_dp);
3473 mutex_unlock(&dev->mode_config.mutex);
3474 }
3475 kfree(intel_dig_port);
3476 }
3477
3478 static const struct drm_connector_funcs intel_dp_connector_funcs = {
3479 .dpms = intel_connector_dpms,
3480 .detect = intel_dp_detect,
3481 .fill_modes = drm_helper_probe_single_connector_modes,
3482 .set_property = intel_dp_set_property,
3483 .destroy = intel_dp_connector_destroy,
3484 };
3485
3486 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
3487 .get_modes = intel_dp_get_modes,
3488 .mode_valid = intel_dp_mode_valid,
3489 .best_encoder = intel_best_encoder,
3490 };
3491
3492 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
3493 .destroy = intel_dp_encoder_destroy,
3494 };
3495
3496 static void
3497 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
3498 {
3499 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3500
3501 intel_dp_check_link_status(intel_dp);
3502 }
3503
3504 /* Return which DP Port should be selected for Transcoder DP control */
3505 int
3506 intel_trans_dp_port_sel(struct drm_crtc *crtc)
3507 {
3508 struct drm_device *dev = crtc->dev;
3509 struct intel_encoder *intel_encoder;
3510 struct intel_dp *intel_dp;
3511
3512 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
3513 intel_dp = enc_to_intel_dp(&intel_encoder->base);
3514
3515 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
3516 intel_encoder->type == INTEL_OUTPUT_EDP)
3517 return intel_dp->output_reg;
3518 }
3519
3520 return -1;
3521 }
3522
3523 /* check the VBT to see whether the eDP is on DP-D port */
3524 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
3525 {
3526 struct drm_i915_private *dev_priv = dev->dev_private;
3527 union child_device_config *p_child;
3528 int i;
3529 static const short port_mapping[] = {
3530 [PORT_B] = PORT_IDPB,
3531 [PORT_C] = PORT_IDPC,
3532 [PORT_D] = PORT_IDPD,
3533 };
3534
3535 if (port == PORT_A)
3536 return true;
3537
3538 if (!dev_priv->vbt.child_dev_num)
3539 return false;
3540
3541 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3542 p_child = dev_priv->vbt.child_dev + i;
3543
3544 if (p_child->common.dvo_port == port_mapping[port] &&
3545 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3546 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
3547 return true;
3548 }
3549 return false;
3550 }
3551
3552 static void
3553 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
3554 {
3555 struct intel_connector *intel_connector = to_intel_connector(connector);
3556
3557 intel_attach_force_audio_property(connector);
3558 intel_attach_broadcast_rgb_property(connector);
3559 intel_dp->color_range_auto = true;
3560
3561 if (is_edp(intel_dp)) {
3562 drm_mode_create_scaling_mode_property(connector->dev);
3563 drm_object_attach_property(
3564 &connector->base,
3565 connector->dev->mode_config.scaling_mode_property,
3566 DRM_MODE_SCALE_ASPECT);
3567 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
3568 }
3569 }
3570
3571 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
3572 {
3573 intel_dp->last_power_cycle = jiffies;
3574 intel_dp->last_power_on = jiffies;
3575 intel_dp->last_backlight_off = jiffies;
3576 }
3577
3578 static void
3579 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
3580 struct intel_dp *intel_dp,
3581 struct edp_power_seq *out)
3582 {
3583 struct drm_i915_private *dev_priv = dev->dev_private;
3584 struct edp_power_seq cur, vbt, spec, final;
3585 u32 pp_on, pp_off, pp_div, pp;
3586 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
3587
3588 if (HAS_PCH_SPLIT(dev)) {
3589 pp_ctrl_reg = PCH_PP_CONTROL;
3590 pp_on_reg = PCH_PP_ON_DELAYS;
3591 pp_off_reg = PCH_PP_OFF_DELAYS;
3592 pp_div_reg = PCH_PP_DIVISOR;
3593 } else {
3594 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3595
3596 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
3597 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3598 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3599 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3600 }
3601
3602 /* Workaround: Need to write PP_CONTROL with the unlock key as
3603 * the very first thing. */
3604 pp = ironlake_get_pp_control(intel_dp);
3605 I915_WRITE(pp_ctrl_reg, pp);
3606
3607 pp_on = I915_READ(pp_on_reg);
3608 pp_off = I915_READ(pp_off_reg);
3609 pp_div = I915_READ(pp_div_reg);
3610
3611 /* Pull timing values out of registers */
3612 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
3613 PANEL_POWER_UP_DELAY_SHIFT;
3614
3615 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
3616 PANEL_LIGHT_ON_DELAY_SHIFT;
3617
3618 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
3619 PANEL_LIGHT_OFF_DELAY_SHIFT;
3620
3621 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
3622 PANEL_POWER_DOWN_DELAY_SHIFT;
3623
3624 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
3625 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
3626
3627 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3628 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
3629
3630 vbt = dev_priv->vbt.edp_pps;
3631
3632 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
3633 * our hw here, which are all in 100usec. */
3634 spec.t1_t3 = 210 * 10;
3635 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
3636 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
3637 spec.t10 = 500 * 10;
3638 /* This one is special and actually in units of 100ms, but zero
3639 * based in the hw (so we need to add 100 ms). But the sw vbt
3640 * table multiplies it with 1000 to make it in units of 100usec,
3641 * too. */
3642 spec.t11_t12 = (510 + 100) * 10;
3643
3644 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3645 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
3646
3647 /* Use the max of the register settings and vbt. If both are
3648 * unset, fall back to the spec limits. */
3649 #define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
3650 spec.field : \
3651 max(cur.field, vbt.field))
3652 assign_final(t1_t3);
3653 assign_final(t8);
3654 assign_final(t9);
3655 assign_final(t10);
3656 assign_final(t11_t12);
3657 #undef assign_final
3658
3659 #define get_delay(field) (DIV_ROUND_UP(final.field, 10))
3660 intel_dp->panel_power_up_delay = get_delay(t1_t3);
3661 intel_dp->backlight_on_delay = get_delay(t8);
3662 intel_dp->backlight_off_delay = get_delay(t9);
3663 intel_dp->panel_power_down_delay = get_delay(t10);
3664 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
3665 #undef get_delay
3666
3667 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
3668 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
3669 intel_dp->panel_power_cycle_delay);
3670
3671 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
3672 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
3673
3674 if (out)
3675 *out = final;
3676 }
3677
3678 static void
3679 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3680 struct intel_dp *intel_dp,
3681 struct edp_power_seq *seq)
3682 {
3683 struct drm_i915_private *dev_priv = dev->dev_private;
3684 u32 pp_on, pp_off, pp_div, port_sel = 0;
3685 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
3686 int pp_on_reg, pp_off_reg, pp_div_reg;
3687
3688 if (HAS_PCH_SPLIT(dev)) {
3689 pp_on_reg = PCH_PP_ON_DELAYS;
3690 pp_off_reg = PCH_PP_OFF_DELAYS;
3691 pp_div_reg = PCH_PP_DIVISOR;
3692 } else {
3693 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3694
3695 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3696 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3697 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3698 }
3699
3700 /*
3701 * And finally store the new values in the power sequencer. The
3702 * backlight delays are set to 1 because we do manual waits on them. For
3703 * T8, even BSpec recommends doing it. For T9, if we don't do this,
3704 * we'll end up waiting for the backlight off delay twice: once when we
3705 * do the manual sleep, and once when we disable the panel and wait for
3706 * the PP_STATUS bit to become zero.
3707 */
3708 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
3709 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
3710 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3711 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
3712 /* Compute the divisor for the pp clock, simply match the Bspec
3713 * formula. */
3714 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
3715 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
3716 << PANEL_POWER_CYCLE_DELAY_SHIFT);
3717
3718 /* Haswell doesn't have any port selection bits for the panel
3719 * power sequencer any more. */
3720 if (IS_VALLEYVIEW(dev)) {
3721 if (dp_to_dig_port(intel_dp)->port == PORT_B)
3722 port_sel = PANEL_PORT_SELECT_DPB_VLV;
3723 else
3724 port_sel = PANEL_PORT_SELECT_DPC_VLV;
3725 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
3726 if (dp_to_dig_port(intel_dp)->port == PORT_A)
3727 port_sel = PANEL_PORT_SELECT_DPA;
3728 else
3729 port_sel = PANEL_PORT_SELECT_DPD;
3730 }
3731
3732 pp_on |= port_sel;
3733
3734 I915_WRITE(pp_on_reg, pp_on);
3735 I915_WRITE(pp_off_reg, pp_off);
3736 I915_WRITE(pp_div_reg, pp_div);
3737
3738 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
3739 I915_READ(pp_on_reg),
3740 I915_READ(pp_off_reg),
3741 I915_READ(pp_div_reg));
3742 }
3743
3744 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3745 struct intel_connector *intel_connector,
3746 struct edp_power_seq *power_seq)
3747 {
3748 struct drm_connector *connector = &intel_connector->base;
3749 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3750 struct drm_device *dev = intel_dig_port->base.base.dev;
3751 struct drm_i915_private *dev_priv = dev->dev_private;
3752 struct drm_display_mode *fixed_mode = NULL;
3753 bool has_dpcd;
3754 struct drm_display_mode *scan;
3755 struct edid *edid;
3756
3757 if (!is_edp(intel_dp))
3758 return true;
3759
3760 /* Cache DPCD and EDID for edp. */
3761 edp_panel_vdd_on(intel_dp);
3762 has_dpcd = intel_dp_get_dpcd(intel_dp);
3763 edp_panel_vdd_off(intel_dp, false);
3764
3765 if (has_dpcd) {
3766 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3767 dev_priv->no_aux_handshake =
3768 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3769 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3770 } else {
3771 /* if this fails, presume the device is a ghost */
3772 DRM_INFO("failed to retrieve link info, disabling eDP\n");
3773 return false;
3774 }
3775
3776 /* We now know it's not a ghost, init power sequence regs. */
3777 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
3778
3779 edid = drm_get_edid(connector, &intel_dp->adapter);
3780 if (edid) {
3781 if (drm_add_edid_modes(connector, edid)) {
3782 drm_mode_connector_update_edid_property(connector,
3783 edid);
3784 drm_edid_to_eld(connector, edid);
3785 } else {
3786 kfree(edid);
3787 edid = ERR_PTR(-EINVAL);
3788 }
3789 } else {
3790 edid = ERR_PTR(-ENOENT);
3791 }
3792 intel_connector->edid = edid;
3793
3794 /* prefer fixed mode from EDID if available */
3795 list_for_each_entry(scan, &connector->probed_modes, head) {
3796 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3797 fixed_mode = drm_mode_duplicate(dev, scan);
3798 break;
3799 }
3800 }
3801
3802 /* fallback to VBT if available for eDP */
3803 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
3804 fixed_mode = drm_mode_duplicate(dev,
3805 dev_priv->vbt.lfp_lvds_vbt_mode);
3806 if (fixed_mode)
3807 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3808 }
3809
3810 intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
3811 intel_panel_setup_backlight(connector);
3812
3813 return true;
3814 }
3815
3816 bool
3817 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3818 struct intel_connector *intel_connector)
3819 {
3820 struct drm_connector *connector = &intel_connector->base;
3821 struct intel_dp *intel_dp = &intel_dig_port->dp;
3822 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3823 struct drm_device *dev = intel_encoder->base.dev;
3824 struct drm_i915_private *dev_priv = dev->dev_private;
3825 enum port port = intel_dig_port->port;
3826 struct edp_power_seq power_seq = { 0 };
3827 const char *name = NULL;
3828 int type, error;
3829
3830 /* intel_dp vfuncs */
3831 if (IS_VALLEYVIEW(dev))
3832 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
3833 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3834 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
3835 else if (HAS_PCH_SPLIT(dev))
3836 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
3837 else
3838 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
3839
3840 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
3841
3842 /* Preserve the current hw state. */
3843 intel_dp->DP = I915_READ(intel_dp->output_reg);
3844 intel_dp->attached_connector = intel_connector;
3845
3846 if (intel_dp_is_edp(dev, port))
3847 type = DRM_MODE_CONNECTOR_eDP;
3848 else
3849 type = DRM_MODE_CONNECTOR_DisplayPort;
3850
3851 /*
3852 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
3853 * for DP the encoder type can be set by the caller to
3854 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
3855 */
3856 if (type == DRM_MODE_CONNECTOR_eDP)
3857 intel_encoder->type = INTEL_OUTPUT_EDP;
3858
3859 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
3860 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
3861 port_name(port));
3862
3863 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
3864 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
3865
3866 connector->interlace_allowed = true;
3867 connector->doublescan_allowed = 0;
3868
3869 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
3870 edp_panel_vdd_work);
3871
3872 intel_connector_attach_encoder(intel_connector, intel_encoder);
3873 drm_sysfs_connector_add(connector);
3874
3875 if (HAS_DDI(dev))
3876 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
3877 else
3878 intel_connector->get_hw_state = intel_connector_get_hw_state;
3879 intel_connector->unregister = intel_dp_connector_unregister;
3880
3881 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
3882 if (HAS_DDI(dev)) {
3883 switch (intel_dig_port->port) {
3884 case PORT_A:
3885 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
3886 break;
3887 case PORT_B:
3888 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
3889 break;
3890 case PORT_C:
3891 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
3892 break;
3893 case PORT_D:
3894 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
3895 break;
3896 default:
3897 BUG();
3898 }
3899 }
3900
3901 /* Set up the DDC bus. */
3902 switch (port) {
3903 case PORT_A:
3904 intel_encoder->hpd_pin = HPD_PORT_A;
3905 name = "DPDDC-A";
3906 break;
3907 case PORT_B:
3908 intel_encoder->hpd_pin = HPD_PORT_B;
3909 name = "DPDDC-B";
3910 break;
3911 case PORT_C:
3912 intel_encoder->hpd_pin = HPD_PORT_C;
3913 name = "DPDDC-C";
3914 break;
3915 case PORT_D:
3916 intel_encoder->hpd_pin = HPD_PORT_D;
3917 name = "DPDDC-D";
3918 break;
3919 default:
3920 BUG();
3921 }
3922
3923 if (is_edp(intel_dp)) {
3924 intel_dp_init_panel_power_timestamps(intel_dp);
3925 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
3926 }
3927
3928 error = intel_dp_i2c_init(intel_dp, intel_connector, name);
3929 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
3930 error, port_name(port));
3931
3932 intel_dp->psr_setup_done = false;
3933
3934 if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
3935 i2c_del_adapter(&intel_dp->adapter);
3936 if (is_edp(intel_dp)) {
3937 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3938 mutex_lock(&dev->mode_config.mutex);
3939 edp_panel_vdd_off_sync(intel_dp);
3940 mutex_unlock(&dev->mode_config.mutex);
3941 }
3942 drm_sysfs_connector_remove(connector);
3943 drm_connector_cleanup(connector);
3944 return false;
3945 }
3946
3947 intel_dp_add_properties(intel_dp, connector);
3948
3949 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
3950 * 0xd. Failure to do so will result in spurious interrupts being
3951 * generated on the port when a cable is not attached.
3952 */
3953 if (IS_G4X(dev) && !IS_GM45(dev)) {
3954 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
3955 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
3956 }
3957
3958 return true;
3959 }
3960
3961 void
3962 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3963 {
3964 struct intel_digital_port *intel_dig_port;
3965 struct intel_encoder *intel_encoder;
3966 struct drm_encoder *encoder;
3967 struct intel_connector *intel_connector;
3968
3969 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
3970 if (!intel_dig_port)
3971 return;
3972
3973 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
3974 if (!intel_connector) {
3975 kfree(intel_dig_port);
3976 return;
3977 }
3978
3979 intel_encoder = &intel_dig_port->base;
3980 encoder = &intel_encoder->base;
3981
3982 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
3983 DRM_MODE_ENCODER_TMDS);
3984
3985 intel_encoder->compute_config = intel_dp_compute_config;
3986 intel_encoder->mode_set = intel_dp_mode_set;
3987 intel_encoder->disable = intel_disable_dp;
3988 intel_encoder->post_disable = intel_post_disable_dp;
3989 intel_encoder->get_hw_state = intel_dp_get_hw_state;
3990 intel_encoder->get_config = intel_dp_get_config;
3991 if (IS_VALLEYVIEW(dev)) {
3992 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
3993 intel_encoder->pre_enable = vlv_pre_enable_dp;
3994 intel_encoder->enable = vlv_enable_dp;
3995 } else {
3996 intel_encoder->pre_enable = g4x_pre_enable_dp;
3997 intel_encoder->enable = g4x_enable_dp;
3998 }
3999
4000 intel_dig_port->port = port;
4001 intel_dig_port->dp.output_reg = output_reg;
4002
4003 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4004 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
4005 intel_encoder->cloneable = false;
4006 intel_encoder->hot_plug = intel_dp_hot_plug;
4007
4008 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
4009 drm_encoder_cleanup(encoder);
4010 kfree(intel_dig_port);
4011 kfree(intel_connector);
4012 }
4013 }
This page took 0.501414 seconds and 5 git commands to generate.