drm: extract dp link bw helpers
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_crtc.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/drm_edid.h>
35 #include "intel_drv.h"
36 #include <drm/i915_drm.h>
37 #include "i915_drv.h"
38
39 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40
41 /**
42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
43 * @intel_dp: DP struct
44 *
45 * If a CPU or PCH DP output is attached to an eDP panel, this function
46 * will return true, and false otherwise.
47 */
48 static bool is_edp(struct intel_dp *intel_dp)
49 {
50 return intel_dp->base.type == INTEL_OUTPUT_EDP;
51 }
52
53 /**
54 * is_pch_edp - is the port on the PCH and attached to an eDP panel?
55 * @intel_dp: DP struct
56 *
57 * Returns true if the given DP struct corresponds to a PCH DP port attached
58 * to an eDP panel, false otherwise. Helpful for determining whether we
59 * may need FDI resources for a given DP output or not.
60 */
61 static bool is_pch_edp(struct intel_dp *intel_dp)
62 {
63 return intel_dp->is_pch_edp;
64 }
65
66 /**
67 * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
68 * @intel_dp: DP struct
69 *
70 * Returns true if the given DP struct corresponds to a CPU eDP port.
71 */
72 static bool is_cpu_edp(struct intel_dp *intel_dp)
73 {
74 return is_edp(intel_dp) && !is_pch_edp(intel_dp);
75 }
76
77 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
78 {
79 return container_of(intel_attached_encoder(connector),
80 struct intel_dp, base);
81 }
82
83 /**
84 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
85 * @encoder: DRM encoder
86 *
87 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed
88 * by intel_display.c.
89 */
90 bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
91 {
92 struct intel_dp *intel_dp;
93
94 if (!encoder)
95 return false;
96
97 intel_dp = enc_to_intel_dp(encoder);
98
99 return is_pch_edp(intel_dp);
100 }
101
102 static void intel_dp_link_down(struct intel_dp *intel_dp);
103
104 void
105 intel_edp_link_config(struct intel_encoder *intel_encoder,
106 int *lane_num, int *link_bw)
107 {
108 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
109
110 *lane_num = intel_dp->lane_count;
111 *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
112 }
113
114 int
115 intel_edp_target_clock(struct intel_encoder *intel_encoder,
116 struct drm_display_mode *mode)
117 {
118 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
119 struct intel_connector *intel_connector = intel_dp->attached_connector;
120
121 if (intel_connector->panel.fixed_mode)
122 return intel_connector->panel.fixed_mode->clock;
123 else
124 return mode->clock;
125 }
126
127 static int
128 intel_dp_max_lane_count(struct intel_dp *intel_dp)
129 {
130 int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
131 switch (max_lane_count) {
132 case 1: case 2: case 4:
133 break;
134 default:
135 max_lane_count = 4;
136 }
137 return max_lane_count;
138 }
139
140 static int
141 intel_dp_max_link_bw(struct intel_dp *intel_dp)
142 {
143 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
144
145 switch (max_link_bw) {
146 case DP_LINK_BW_1_62:
147 case DP_LINK_BW_2_7:
148 break;
149 default:
150 max_link_bw = DP_LINK_BW_1_62;
151 break;
152 }
153 return max_link_bw;
154 }
155
156 static int
157 intel_dp_link_clock(uint8_t link_bw)
158 {
159 if (link_bw == DP_LINK_BW_2_7)
160 return 270000;
161 else
162 return 162000;
163 }
164
165 /*
166 * The units on the numbers in the next two are... bizarre. Examples will
167 * make it clearer; this one parallels an example in the eDP spec.
168 *
169 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
170 *
171 * 270000 * 1 * 8 / 10 == 216000
172 *
173 * The actual data capacity of that configuration is 2.16Gbit/s, so the
174 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
175 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
176 * 119000. At 18bpp that's 2142000 kilobits per second.
177 *
178 * Thus the strange-looking division by 10 in intel_dp_link_required, to
179 * get the result in decakilobits instead of kilobits.
180 */
181
182 static int
183 intel_dp_link_required(int pixel_clock, int bpp)
184 {
185 return (pixel_clock * bpp + 9) / 10;
186 }
187
188 static int
189 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
190 {
191 return (max_link_clock * max_lanes * 8) / 10;
192 }
193
194 static bool
195 intel_dp_adjust_dithering(struct intel_dp *intel_dp,
196 struct drm_display_mode *mode,
197 bool adjust_mode)
198 {
199 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
200 int max_lanes = intel_dp_max_lane_count(intel_dp);
201 int max_rate, mode_rate;
202
203 mode_rate = intel_dp_link_required(mode->clock, 24);
204 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
205
206 if (mode_rate > max_rate) {
207 mode_rate = intel_dp_link_required(mode->clock, 18);
208 if (mode_rate > max_rate)
209 return false;
210
211 if (adjust_mode)
212 mode->private_flags
213 |= INTEL_MODE_DP_FORCE_6BPC;
214
215 return true;
216 }
217
218 return true;
219 }
220
221 static int
222 intel_dp_mode_valid(struct drm_connector *connector,
223 struct drm_display_mode *mode)
224 {
225 struct intel_dp *intel_dp = intel_attached_dp(connector);
226 struct intel_connector *intel_connector = to_intel_connector(connector);
227 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
228
229 if (is_edp(intel_dp) && fixed_mode) {
230 if (mode->hdisplay > fixed_mode->hdisplay)
231 return MODE_PANEL;
232
233 if (mode->vdisplay > fixed_mode->vdisplay)
234 return MODE_PANEL;
235 }
236
237 if (!intel_dp_adjust_dithering(intel_dp, mode, false))
238 return MODE_CLOCK_HIGH;
239
240 if (mode->clock < 10000)
241 return MODE_CLOCK_LOW;
242
243 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
244 return MODE_H_ILLEGAL;
245
246 return MODE_OK;
247 }
248
249 static uint32_t
250 pack_aux(uint8_t *src, int src_bytes)
251 {
252 int i;
253 uint32_t v = 0;
254
255 if (src_bytes > 4)
256 src_bytes = 4;
257 for (i = 0; i < src_bytes; i++)
258 v |= ((uint32_t) src[i]) << ((3-i) * 8);
259 return v;
260 }
261
262 static void
263 unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
264 {
265 int i;
266 if (dst_bytes > 4)
267 dst_bytes = 4;
268 for (i = 0; i < dst_bytes; i++)
269 dst[i] = src >> ((3-i) * 8);
270 }
271
272 /* hrawclock is 1/4 the FSB frequency */
273 static int
274 intel_hrawclk(struct drm_device *dev)
275 {
276 struct drm_i915_private *dev_priv = dev->dev_private;
277 uint32_t clkcfg;
278
279 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
280 if (IS_VALLEYVIEW(dev))
281 return 200;
282
283 clkcfg = I915_READ(CLKCFG);
284 switch (clkcfg & CLKCFG_FSB_MASK) {
285 case CLKCFG_FSB_400:
286 return 100;
287 case CLKCFG_FSB_533:
288 return 133;
289 case CLKCFG_FSB_667:
290 return 166;
291 case CLKCFG_FSB_800:
292 return 200;
293 case CLKCFG_FSB_1067:
294 return 266;
295 case CLKCFG_FSB_1333:
296 return 333;
297 /* these two are just a guess; one of them might be right */
298 case CLKCFG_FSB_1600:
299 case CLKCFG_FSB_1600_ALT:
300 return 400;
301 default:
302 return 133;
303 }
304 }
305
306 static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
307 {
308 struct drm_device *dev = intel_dp->base.base.dev;
309 struct drm_i915_private *dev_priv = dev->dev_private;
310
311 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
312 }
313
314 static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
315 {
316 struct drm_device *dev = intel_dp->base.base.dev;
317 struct drm_i915_private *dev_priv = dev->dev_private;
318
319 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
320 }
321
322 static void
323 intel_dp_check_edp(struct intel_dp *intel_dp)
324 {
325 struct drm_device *dev = intel_dp->base.base.dev;
326 struct drm_i915_private *dev_priv = dev->dev_private;
327
328 if (!is_edp(intel_dp))
329 return;
330 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
331 WARN(1, "eDP powered off while attempting aux channel communication.\n");
332 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
333 I915_READ(PCH_PP_STATUS),
334 I915_READ(PCH_PP_CONTROL));
335 }
336 }
337
338 static int
339 intel_dp_aux_ch(struct intel_dp *intel_dp,
340 uint8_t *send, int send_bytes,
341 uint8_t *recv, int recv_size)
342 {
343 uint32_t output_reg = intel_dp->output_reg;
344 struct drm_device *dev = intel_dp->base.base.dev;
345 struct drm_i915_private *dev_priv = dev->dev_private;
346 uint32_t ch_ctl = output_reg + 0x10;
347 uint32_t ch_data = ch_ctl + 4;
348 int i;
349 int recv_bytes;
350 uint32_t status;
351 uint32_t aux_clock_divider;
352 int try, precharge;
353
354 if (IS_HASWELL(dev)) {
355 switch (intel_dp->port) {
356 case PORT_A:
357 ch_ctl = DPA_AUX_CH_CTL;
358 ch_data = DPA_AUX_CH_DATA1;
359 break;
360 case PORT_B:
361 ch_ctl = PCH_DPB_AUX_CH_CTL;
362 ch_data = PCH_DPB_AUX_CH_DATA1;
363 break;
364 case PORT_C:
365 ch_ctl = PCH_DPC_AUX_CH_CTL;
366 ch_data = PCH_DPC_AUX_CH_DATA1;
367 break;
368 case PORT_D:
369 ch_ctl = PCH_DPD_AUX_CH_CTL;
370 ch_data = PCH_DPD_AUX_CH_DATA1;
371 break;
372 default:
373 BUG();
374 }
375 }
376
377 intel_dp_check_edp(intel_dp);
378 /* The clock divider is based off the hrawclk,
379 * and would like to run at 2MHz. So, take the
380 * hrawclk value and divide by 2 and use that
381 *
382 * Note that PCH attached eDP panels should use a 125MHz input
383 * clock divider.
384 */
385 if (is_cpu_edp(intel_dp)) {
386 if (IS_VALLEYVIEW(dev))
387 aux_clock_divider = 100;
388 else if (IS_GEN6(dev) || IS_GEN7(dev))
389 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
390 else
391 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
392 } else if (HAS_PCH_SPLIT(dev))
393 aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
394 else
395 aux_clock_divider = intel_hrawclk(dev) / 2;
396
397 if (IS_GEN6(dev))
398 precharge = 3;
399 else
400 precharge = 5;
401
402 /* Try to wait for any previous AUX channel activity */
403 for (try = 0; try < 3; try++) {
404 status = I915_READ(ch_ctl);
405 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
406 break;
407 msleep(1);
408 }
409
410 if (try == 3) {
411 WARN(1, "dp_aux_ch not started status 0x%08x\n",
412 I915_READ(ch_ctl));
413 return -EBUSY;
414 }
415
416 /* Must try at least 3 times according to DP spec */
417 for (try = 0; try < 5; try++) {
418 /* Load the send data into the aux channel data registers */
419 for (i = 0; i < send_bytes; i += 4)
420 I915_WRITE(ch_data + i,
421 pack_aux(send + i, send_bytes - i));
422
423 /* Send the command and wait for it to complete */
424 I915_WRITE(ch_ctl,
425 DP_AUX_CH_CTL_SEND_BUSY |
426 DP_AUX_CH_CTL_TIME_OUT_400us |
427 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
428 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
429 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
430 DP_AUX_CH_CTL_DONE |
431 DP_AUX_CH_CTL_TIME_OUT_ERROR |
432 DP_AUX_CH_CTL_RECEIVE_ERROR);
433 for (;;) {
434 status = I915_READ(ch_ctl);
435 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
436 break;
437 udelay(100);
438 }
439
440 /* Clear done status and any errors */
441 I915_WRITE(ch_ctl,
442 status |
443 DP_AUX_CH_CTL_DONE |
444 DP_AUX_CH_CTL_TIME_OUT_ERROR |
445 DP_AUX_CH_CTL_RECEIVE_ERROR);
446
447 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
448 DP_AUX_CH_CTL_RECEIVE_ERROR))
449 continue;
450 if (status & DP_AUX_CH_CTL_DONE)
451 break;
452 }
453
454 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
455 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
456 return -EBUSY;
457 }
458
459 /* Check for timeout or receive error.
460 * Timeouts occur when the sink is not connected
461 */
462 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
463 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
464 return -EIO;
465 }
466
467 /* Timeouts occur when the device isn't connected, so they're
468 * "normal" -- don't fill the kernel log with these */
469 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
470 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
471 return -ETIMEDOUT;
472 }
473
474 /* Unload any bytes sent back from the other side */
475 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
476 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
477 if (recv_bytes > recv_size)
478 recv_bytes = recv_size;
479
480 for (i = 0; i < recv_bytes; i += 4)
481 unpack_aux(I915_READ(ch_data + i),
482 recv + i, recv_bytes - i);
483
484 return recv_bytes;
485 }
486
487 /* Write data to the aux channel in native mode */
488 static int
489 intel_dp_aux_native_write(struct intel_dp *intel_dp,
490 uint16_t address, uint8_t *send, int send_bytes)
491 {
492 int ret;
493 uint8_t msg[20];
494 int msg_bytes;
495 uint8_t ack;
496
497 intel_dp_check_edp(intel_dp);
498 if (send_bytes > 16)
499 return -1;
500 msg[0] = AUX_NATIVE_WRITE << 4;
501 msg[1] = address >> 8;
502 msg[2] = address & 0xff;
503 msg[3] = send_bytes - 1;
504 memcpy(&msg[4], send, send_bytes);
505 msg_bytes = send_bytes + 4;
506 for (;;) {
507 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
508 if (ret < 0)
509 return ret;
510 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
511 break;
512 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
513 udelay(100);
514 else
515 return -EIO;
516 }
517 return send_bytes;
518 }
519
520 /* Write a single byte to the aux channel in native mode */
521 static int
522 intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
523 uint16_t address, uint8_t byte)
524 {
525 return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
526 }
527
528 /* read bytes from a native aux channel */
529 static int
530 intel_dp_aux_native_read(struct intel_dp *intel_dp,
531 uint16_t address, uint8_t *recv, int recv_bytes)
532 {
533 uint8_t msg[4];
534 int msg_bytes;
535 uint8_t reply[20];
536 int reply_bytes;
537 uint8_t ack;
538 int ret;
539
540 intel_dp_check_edp(intel_dp);
541 msg[0] = AUX_NATIVE_READ << 4;
542 msg[1] = address >> 8;
543 msg[2] = address & 0xff;
544 msg[3] = recv_bytes - 1;
545
546 msg_bytes = 4;
547 reply_bytes = recv_bytes + 1;
548
549 for (;;) {
550 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
551 reply, reply_bytes);
552 if (ret == 0)
553 return -EPROTO;
554 if (ret < 0)
555 return ret;
556 ack = reply[0];
557 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
558 memcpy(recv, reply + 1, ret - 1);
559 return ret - 1;
560 }
561 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
562 udelay(100);
563 else
564 return -EIO;
565 }
566 }
567
568 static int
569 intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
570 uint8_t write_byte, uint8_t *read_byte)
571 {
572 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
573 struct intel_dp *intel_dp = container_of(adapter,
574 struct intel_dp,
575 adapter);
576 uint16_t address = algo_data->address;
577 uint8_t msg[5];
578 uint8_t reply[2];
579 unsigned retry;
580 int msg_bytes;
581 int reply_bytes;
582 int ret;
583
584 intel_dp_check_edp(intel_dp);
585 /* Set up the command byte */
586 if (mode & MODE_I2C_READ)
587 msg[0] = AUX_I2C_READ << 4;
588 else
589 msg[0] = AUX_I2C_WRITE << 4;
590
591 if (!(mode & MODE_I2C_STOP))
592 msg[0] |= AUX_I2C_MOT << 4;
593
594 msg[1] = address >> 8;
595 msg[2] = address;
596
597 switch (mode) {
598 case MODE_I2C_WRITE:
599 msg[3] = 0;
600 msg[4] = write_byte;
601 msg_bytes = 5;
602 reply_bytes = 1;
603 break;
604 case MODE_I2C_READ:
605 msg[3] = 0;
606 msg_bytes = 4;
607 reply_bytes = 2;
608 break;
609 default:
610 msg_bytes = 3;
611 reply_bytes = 1;
612 break;
613 }
614
615 for (retry = 0; retry < 5; retry++) {
616 ret = intel_dp_aux_ch(intel_dp,
617 msg, msg_bytes,
618 reply, reply_bytes);
619 if (ret < 0) {
620 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
621 return ret;
622 }
623
624 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
625 case AUX_NATIVE_REPLY_ACK:
626 /* I2C-over-AUX Reply field is only valid
627 * when paired with AUX ACK.
628 */
629 break;
630 case AUX_NATIVE_REPLY_NACK:
631 DRM_DEBUG_KMS("aux_ch native nack\n");
632 return -EREMOTEIO;
633 case AUX_NATIVE_REPLY_DEFER:
634 udelay(100);
635 continue;
636 default:
637 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
638 reply[0]);
639 return -EREMOTEIO;
640 }
641
642 switch (reply[0] & AUX_I2C_REPLY_MASK) {
643 case AUX_I2C_REPLY_ACK:
644 if (mode == MODE_I2C_READ) {
645 *read_byte = reply[1];
646 }
647 return reply_bytes - 1;
648 case AUX_I2C_REPLY_NACK:
649 DRM_DEBUG_KMS("aux_i2c nack\n");
650 return -EREMOTEIO;
651 case AUX_I2C_REPLY_DEFER:
652 DRM_DEBUG_KMS("aux_i2c defer\n");
653 udelay(100);
654 break;
655 default:
656 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
657 return -EREMOTEIO;
658 }
659 }
660
661 DRM_ERROR("too many retries, giving up\n");
662 return -EREMOTEIO;
663 }
664
665 static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
666 static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
667
668 static int
669 intel_dp_i2c_init(struct intel_dp *intel_dp,
670 struct intel_connector *intel_connector, const char *name)
671 {
672 int ret;
673
674 DRM_DEBUG_KMS("i2c_init %s\n", name);
675 intel_dp->algo.running = false;
676 intel_dp->algo.address = 0;
677 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
678
679 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
680 intel_dp->adapter.owner = THIS_MODULE;
681 intel_dp->adapter.class = I2C_CLASS_DDC;
682 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
683 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
684 intel_dp->adapter.algo_data = &intel_dp->algo;
685 intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
686
687 ironlake_edp_panel_vdd_on(intel_dp);
688 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
689 ironlake_edp_panel_vdd_off(intel_dp, false);
690 return ret;
691 }
692
693 static bool
694 intel_dp_mode_fixup(struct drm_encoder *encoder,
695 const struct drm_display_mode *mode,
696 struct drm_display_mode *adjusted_mode)
697 {
698 struct drm_device *dev = encoder->dev;
699 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
700 struct intel_connector *intel_connector = intel_dp->attached_connector;
701 int lane_count, clock;
702 int max_lane_count = intel_dp_max_lane_count(intel_dp);
703 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
704 int bpp, mode_rate;
705 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
706
707 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
708 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
709 adjusted_mode);
710 intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
711 mode, adjusted_mode);
712 }
713
714 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
715 return false;
716
717 DRM_DEBUG_KMS("DP link computation with max lane count %i "
718 "max bw %02x pixel clock %iKHz\n",
719 max_lane_count, bws[max_clock], adjusted_mode->clock);
720
721 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
722 return false;
723
724 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
725 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
726
727 for (clock = 0; clock <= max_clock; clock++) {
728 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
729 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
730
731 if (mode_rate <= link_avail) {
732 intel_dp->link_bw = bws[clock];
733 intel_dp->lane_count = lane_count;
734 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
735 DRM_DEBUG_KMS("DP link bw %02x lane "
736 "count %d clock %d bpp %d\n",
737 intel_dp->link_bw, intel_dp->lane_count,
738 adjusted_mode->clock, bpp);
739 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
740 mode_rate, link_avail);
741 return true;
742 }
743 }
744 }
745
746 return false;
747 }
748
749 struct intel_dp_m_n {
750 uint32_t tu;
751 uint32_t gmch_m;
752 uint32_t gmch_n;
753 uint32_t link_m;
754 uint32_t link_n;
755 };
756
757 static void
758 intel_reduce_ratio(uint32_t *num, uint32_t *den)
759 {
760 while (*num > 0xffffff || *den > 0xffffff) {
761 *num >>= 1;
762 *den >>= 1;
763 }
764 }
765
766 static void
767 intel_dp_compute_m_n(int bpp,
768 int nlanes,
769 int pixel_clock,
770 int link_clock,
771 struct intel_dp_m_n *m_n)
772 {
773 m_n->tu = 64;
774 m_n->gmch_m = (pixel_clock * bpp) >> 3;
775 m_n->gmch_n = link_clock * nlanes;
776 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
777 m_n->link_m = pixel_clock;
778 m_n->link_n = link_clock;
779 intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
780 }
781
782 void
783 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
784 struct drm_display_mode *adjusted_mode)
785 {
786 struct drm_device *dev = crtc->dev;
787 struct intel_encoder *encoder;
788 struct drm_i915_private *dev_priv = dev->dev_private;
789 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
790 int lane_count = 4;
791 struct intel_dp_m_n m_n;
792 int pipe = intel_crtc->pipe;
793
794 /*
795 * Find the lane count in the intel_encoder private
796 */
797 for_each_encoder_on_crtc(dev, crtc, encoder) {
798 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
799
800 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
801 intel_dp->base.type == INTEL_OUTPUT_EDP)
802 {
803 lane_count = intel_dp->lane_count;
804 break;
805 }
806 }
807
808 /*
809 * Compute the GMCH and Link ratios. The '3' here is
810 * the number of bytes_per_pixel post-LUT, which we always
811 * set up for 8-bits of R/G/B, or 3 bytes total.
812 */
813 intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
814 mode->clock, adjusted_mode->clock, &m_n);
815
816 if (IS_HASWELL(dev)) {
817 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
818 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
819 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
820 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
821 } else if (HAS_PCH_SPLIT(dev)) {
822 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
823 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
824 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
825 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
826 } else if (IS_VALLEYVIEW(dev)) {
827 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
828 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
829 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
830 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
831 } else {
832 I915_WRITE(PIPE_GMCH_DATA_M(pipe),
833 TU_SIZE(m_n.tu) | m_n.gmch_m);
834 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
835 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
836 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
837 }
838 }
839
840 void intel_dp_init_link_config(struct intel_dp *intel_dp)
841 {
842 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
843 intel_dp->link_configuration[0] = intel_dp->link_bw;
844 intel_dp->link_configuration[1] = intel_dp->lane_count;
845 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
846 /*
847 * Check for DPCD version > 1.1 and enhanced framing support
848 */
849 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
850 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
851 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
852 }
853 }
854
855 static void
856 intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
857 struct drm_display_mode *adjusted_mode)
858 {
859 struct drm_device *dev = encoder->dev;
860 struct drm_i915_private *dev_priv = dev->dev_private;
861 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
862 struct drm_crtc *crtc = intel_dp->base.base.crtc;
863 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
864
865 /*
866 * There are four kinds of DP registers:
867 *
868 * IBX PCH
869 * SNB CPU
870 * IVB CPU
871 * CPT PCH
872 *
873 * IBX PCH and CPU are the same for almost everything,
874 * except that the CPU DP PLL is configured in this
875 * register
876 *
877 * CPT PCH is quite different, having many bits moved
878 * to the TRANS_DP_CTL register instead. That
879 * configuration happens (oddly) in ironlake_pch_enable
880 */
881
882 /* Preserve the BIOS-computed detected bit. This is
883 * supposed to be read-only.
884 */
885 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
886
887 /* Handle DP bits in common between all three register formats */
888 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
889
890 switch (intel_dp->lane_count) {
891 case 1:
892 intel_dp->DP |= DP_PORT_WIDTH_1;
893 break;
894 case 2:
895 intel_dp->DP |= DP_PORT_WIDTH_2;
896 break;
897 case 4:
898 intel_dp->DP |= DP_PORT_WIDTH_4;
899 break;
900 }
901 if (intel_dp->has_audio) {
902 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
903 pipe_name(intel_crtc->pipe));
904 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
905 intel_write_eld(encoder, adjusted_mode);
906 }
907
908 intel_dp_init_link_config(intel_dp);
909
910 /* Split out the IBX/CPU vs CPT settings */
911
912 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
913 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
914 intel_dp->DP |= DP_SYNC_HS_HIGH;
915 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
916 intel_dp->DP |= DP_SYNC_VS_HIGH;
917 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
918
919 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
920 intel_dp->DP |= DP_ENHANCED_FRAMING;
921
922 intel_dp->DP |= intel_crtc->pipe << 29;
923
924 /* don't miss out required setting for eDP */
925 if (adjusted_mode->clock < 200000)
926 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
927 else
928 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
929 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
930 intel_dp->DP |= intel_dp->color_range;
931
932 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
933 intel_dp->DP |= DP_SYNC_HS_HIGH;
934 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
935 intel_dp->DP |= DP_SYNC_VS_HIGH;
936 intel_dp->DP |= DP_LINK_TRAIN_OFF;
937
938 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
939 intel_dp->DP |= DP_ENHANCED_FRAMING;
940
941 if (intel_crtc->pipe == 1)
942 intel_dp->DP |= DP_PIPEB_SELECT;
943
944 if (is_cpu_edp(intel_dp)) {
945 /* don't miss out required setting for eDP */
946 if (adjusted_mode->clock < 200000)
947 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
948 else
949 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
950 }
951 } else {
952 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
953 }
954 }
955
956 #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
957 #define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
958
959 #define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
960 #define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
961
962 #define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
963 #define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
964
965 static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
966 u32 mask,
967 u32 value)
968 {
969 struct drm_device *dev = intel_dp->base.base.dev;
970 struct drm_i915_private *dev_priv = dev->dev_private;
971
972 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
973 mask, value,
974 I915_READ(PCH_PP_STATUS),
975 I915_READ(PCH_PP_CONTROL));
976
977 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
978 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
979 I915_READ(PCH_PP_STATUS),
980 I915_READ(PCH_PP_CONTROL));
981 }
982 }
983
984 static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
985 {
986 DRM_DEBUG_KMS("Wait for panel power on\n");
987 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
988 }
989
990 static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
991 {
992 DRM_DEBUG_KMS("Wait for panel power off time\n");
993 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
994 }
995
996 static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
997 {
998 DRM_DEBUG_KMS("Wait for panel power cycle\n");
999 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1000 }
1001
1002
1003 /* Read the current pp_control value, unlocking the register if it
1004 * is locked
1005 */
1006
1007 static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
1008 {
1009 u32 control = I915_READ(PCH_PP_CONTROL);
1010
1011 control &= ~PANEL_UNLOCK_MASK;
1012 control |= PANEL_UNLOCK_REGS;
1013 return control;
1014 }
1015
1016 static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1017 {
1018 struct drm_device *dev = intel_dp->base.base.dev;
1019 struct drm_i915_private *dev_priv = dev->dev_private;
1020 u32 pp;
1021
1022 if (!is_edp(intel_dp))
1023 return;
1024 DRM_DEBUG_KMS("Turn eDP VDD on\n");
1025
1026 WARN(intel_dp->want_panel_vdd,
1027 "eDP VDD already requested on\n");
1028
1029 intel_dp->want_panel_vdd = true;
1030
1031 if (ironlake_edp_have_panel_vdd(intel_dp)) {
1032 DRM_DEBUG_KMS("eDP VDD already on\n");
1033 return;
1034 }
1035
1036 if (!ironlake_edp_have_panel_power(intel_dp))
1037 ironlake_wait_panel_power_cycle(intel_dp);
1038
1039 pp = ironlake_get_pp_control(dev_priv);
1040 pp |= EDP_FORCE_VDD;
1041 I915_WRITE(PCH_PP_CONTROL, pp);
1042 POSTING_READ(PCH_PP_CONTROL);
1043 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1044 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1045
1046 /*
1047 * If the panel wasn't on, delay before accessing aux channel
1048 */
1049 if (!ironlake_edp_have_panel_power(intel_dp)) {
1050 DRM_DEBUG_KMS("eDP was not running\n");
1051 msleep(intel_dp->panel_power_up_delay);
1052 }
1053 }
1054
1055 static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1056 {
1057 struct drm_device *dev = intel_dp->base.base.dev;
1058 struct drm_i915_private *dev_priv = dev->dev_private;
1059 u32 pp;
1060
1061 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1062 pp = ironlake_get_pp_control(dev_priv);
1063 pp &= ~EDP_FORCE_VDD;
1064 I915_WRITE(PCH_PP_CONTROL, pp);
1065 POSTING_READ(PCH_PP_CONTROL);
1066
1067 /* Make sure sequencer is idle before allowing subsequent activity */
1068 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1069 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1070
1071 msleep(intel_dp->panel_power_down_delay);
1072 }
1073 }
1074
1075 static void ironlake_panel_vdd_work(struct work_struct *__work)
1076 {
1077 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1078 struct intel_dp, panel_vdd_work);
1079 struct drm_device *dev = intel_dp->base.base.dev;
1080
1081 mutex_lock(&dev->mode_config.mutex);
1082 ironlake_panel_vdd_off_sync(intel_dp);
1083 mutex_unlock(&dev->mode_config.mutex);
1084 }
1085
1086 static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1087 {
1088 if (!is_edp(intel_dp))
1089 return;
1090
1091 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1092 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1093
1094 intel_dp->want_panel_vdd = false;
1095
1096 if (sync) {
1097 ironlake_panel_vdd_off_sync(intel_dp);
1098 } else {
1099 /*
1100 * Queue the timer to fire a long
1101 * time from now (relative to the power down delay)
1102 * to keep the panel power up across a sequence of operations
1103 */
1104 schedule_delayed_work(&intel_dp->panel_vdd_work,
1105 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1106 }
1107 }
1108
1109 static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1110 {
1111 struct drm_device *dev = intel_dp->base.base.dev;
1112 struct drm_i915_private *dev_priv = dev->dev_private;
1113 u32 pp;
1114
1115 if (!is_edp(intel_dp))
1116 return;
1117
1118 DRM_DEBUG_KMS("Turn eDP power on\n");
1119
1120 if (ironlake_edp_have_panel_power(intel_dp)) {
1121 DRM_DEBUG_KMS("eDP power already on\n");
1122 return;
1123 }
1124
1125 ironlake_wait_panel_power_cycle(intel_dp);
1126
1127 pp = ironlake_get_pp_control(dev_priv);
1128 if (IS_GEN5(dev)) {
1129 /* ILK workaround: disable reset around power sequence */
1130 pp &= ~PANEL_POWER_RESET;
1131 I915_WRITE(PCH_PP_CONTROL, pp);
1132 POSTING_READ(PCH_PP_CONTROL);
1133 }
1134
1135 pp |= POWER_TARGET_ON;
1136 if (!IS_GEN5(dev))
1137 pp |= PANEL_POWER_RESET;
1138
1139 I915_WRITE(PCH_PP_CONTROL, pp);
1140 POSTING_READ(PCH_PP_CONTROL);
1141
1142 ironlake_wait_panel_on(intel_dp);
1143
1144 if (IS_GEN5(dev)) {
1145 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1146 I915_WRITE(PCH_PP_CONTROL, pp);
1147 POSTING_READ(PCH_PP_CONTROL);
1148 }
1149 }
1150
1151 static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1152 {
1153 struct drm_device *dev = intel_dp->base.base.dev;
1154 struct drm_i915_private *dev_priv = dev->dev_private;
1155 u32 pp;
1156
1157 if (!is_edp(intel_dp))
1158 return;
1159
1160 DRM_DEBUG_KMS("Turn eDP power off\n");
1161
1162 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1163
1164 pp = ironlake_get_pp_control(dev_priv);
1165 /* We need to switch off panel power _and_ force vdd, for otherwise some
1166 * panels get very unhappy and cease to work. */
1167 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1168 I915_WRITE(PCH_PP_CONTROL, pp);
1169 POSTING_READ(PCH_PP_CONTROL);
1170
1171 intel_dp->want_panel_vdd = false;
1172
1173 ironlake_wait_panel_off(intel_dp);
1174 }
1175
1176 static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1177 {
1178 struct drm_device *dev = intel_dp->base.base.dev;
1179 struct drm_i915_private *dev_priv = dev->dev_private;
1180 u32 pp;
1181
1182 if (!is_edp(intel_dp))
1183 return;
1184
1185 DRM_DEBUG_KMS("\n");
1186 /*
1187 * If we enable the backlight right away following a panel power
1188 * on, we may see slight flicker as the panel syncs with the eDP
1189 * link. So delay a bit to make sure the image is solid before
1190 * allowing it to appear.
1191 */
1192 msleep(intel_dp->backlight_on_delay);
1193 pp = ironlake_get_pp_control(dev_priv);
1194 pp |= EDP_BLC_ENABLE;
1195 I915_WRITE(PCH_PP_CONTROL, pp);
1196 POSTING_READ(PCH_PP_CONTROL);
1197 }
1198
1199 static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1200 {
1201 struct drm_device *dev = intel_dp->base.base.dev;
1202 struct drm_i915_private *dev_priv = dev->dev_private;
1203 u32 pp;
1204
1205 if (!is_edp(intel_dp))
1206 return;
1207
1208 DRM_DEBUG_KMS("\n");
1209 pp = ironlake_get_pp_control(dev_priv);
1210 pp &= ~EDP_BLC_ENABLE;
1211 I915_WRITE(PCH_PP_CONTROL, pp);
1212 POSTING_READ(PCH_PP_CONTROL);
1213 msleep(intel_dp->backlight_off_delay);
1214 }
1215
1216 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1217 {
1218 struct drm_device *dev = intel_dp->base.base.dev;
1219 struct drm_crtc *crtc = intel_dp->base.base.crtc;
1220 struct drm_i915_private *dev_priv = dev->dev_private;
1221 u32 dpa_ctl;
1222
1223 assert_pipe_disabled(dev_priv,
1224 to_intel_crtc(crtc)->pipe);
1225
1226 DRM_DEBUG_KMS("\n");
1227 dpa_ctl = I915_READ(DP_A);
1228 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1229 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1230
1231 /* We don't adjust intel_dp->DP while tearing down the link, to
1232 * facilitate link retraining (e.g. after hotplug). Hence clear all
1233 * enable bits here to ensure that we don't enable too much. */
1234 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1235 intel_dp->DP |= DP_PLL_ENABLE;
1236 I915_WRITE(DP_A, intel_dp->DP);
1237 POSTING_READ(DP_A);
1238 udelay(200);
1239 }
1240
1241 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1242 {
1243 struct drm_device *dev = intel_dp->base.base.dev;
1244 struct drm_crtc *crtc = intel_dp->base.base.crtc;
1245 struct drm_i915_private *dev_priv = dev->dev_private;
1246 u32 dpa_ctl;
1247
1248 assert_pipe_disabled(dev_priv,
1249 to_intel_crtc(crtc)->pipe);
1250
1251 dpa_ctl = I915_READ(DP_A);
1252 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1253 "dp pll off, should be on\n");
1254 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1255
1256 /* We can't rely on the value tracked for the DP register in
1257 * intel_dp->DP because link_down must not change that (otherwise link
1258 * re-training will fail. */
1259 dpa_ctl &= ~DP_PLL_ENABLE;
1260 I915_WRITE(DP_A, dpa_ctl);
1261 POSTING_READ(DP_A);
1262 udelay(200);
1263 }
1264
1265 /* If the sink supports it, try to set the power state appropriately */
1266 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1267 {
1268 int ret, i;
1269
1270 /* Should have a valid DPCD by this point */
1271 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1272 return;
1273
1274 if (mode != DRM_MODE_DPMS_ON) {
1275 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1276 DP_SET_POWER_D3);
1277 if (ret != 1)
1278 DRM_DEBUG_DRIVER("failed to write sink power state\n");
1279 } else {
1280 /*
1281 * When turning on, we need to retry for 1ms to give the sink
1282 * time to wake up.
1283 */
1284 for (i = 0; i < 3; i++) {
1285 ret = intel_dp_aux_native_write_1(intel_dp,
1286 DP_SET_POWER,
1287 DP_SET_POWER_D0);
1288 if (ret == 1)
1289 break;
1290 msleep(1);
1291 }
1292 }
1293 }
1294
1295 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1296 enum pipe *pipe)
1297 {
1298 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1299 struct drm_device *dev = encoder->base.dev;
1300 struct drm_i915_private *dev_priv = dev->dev_private;
1301 u32 tmp = I915_READ(intel_dp->output_reg);
1302
1303 if (!(tmp & DP_PORT_EN))
1304 return false;
1305
1306 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
1307 *pipe = PORT_TO_PIPE_CPT(tmp);
1308 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
1309 *pipe = PORT_TO_PIPE(tmp);
1310 } else {
1311 u32 trans_sel;
1312 u32 trans_dp;
1313 int i;
1314
1315 switch (intel_dp->output_reg) {
1316 case PCH_DP_B:
1317 trans_sel = TRANS_DP_PORT_SEL_B;
1318 break;
1319 case PCH_DP_C:
1320 trans_sel = TRANS_DP_PORT_SEL_C;
1321 break;
1322 case PCH_DP_D:
1323 trans_sel = TRANS_DP_PORT_SEL_D;
1324 break;
1325 default:
1326 return true;
1327 }
1328
1329 for_each_pipe(i) {
1330 trans_dp = I915_READ(TRANS_DP_CTL(i));
1331 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1332 *pipe = i;
1333 return true;
1334 }
1335 }
1336 }
1337
1338 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
1339
1340 return true;
1341 }
1342
1343 static void intel_disable_dp(struct intel_encoder *encoder)
1344 {
1345 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1346
1347 /* Make sure the panel is off before trying to change the mode. But also
1348 * ensure that we have vdd while we switch off the panel. */
1349 ironlake_edp_panel_vdd_on(intel_dp);
1350 ironlake_edp_backlight_off(intel_dp);
1351 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1352 ironlake_edp_panel_off(intel_dp);
1353
1354 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1355 if (!is_cpu_edp(intel_dp))
1356 intel_dp_link_down(intel_dp);
1357 }
1358
1359 static void intel_post_disable_dp(struct intel_encoder *encoder)
1360 {
1361 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1362
1363 if (is_cpu_edp(intel_dp)) {
1364 intel_dp_link_down(intel_dp);
1365 ironlake_edp_pll_off(intel_dp);
1366 }
1367 }
1368
1369 static void intel_enable_dp(struct intel_encoder *encoder)
1370 {
1371 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1372 struct drm_device *dev = encoder->base.dev;
1373 struct drm_i915_private *dev_priv = dev->dev_private;
1374 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1375
1376 if (WARN_ON(dp_reg & DP_PORT_EN))
1377 return;
1378
1379 ironlake_edp_panel_vdd_on(intel_dp);
1380 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1381 intel_dp_start_link_train(intel_dp);
1382 ironlake_edp_panel_on(intel_dp);
1383 ironlake_edp_panel_vdd_off(intel_dp, true);
1384 intel_dp_complete_link_train(intel_dp);
1385 ironlake_edp_backlight_on(intel_dp);
1386 }
1387
1388 static void intel_pre_enable_dp(struct intel_encoder *encoder)
1389 {
1390 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1391
1392 if (is_cpu_edp(intel_dp))
1393 ironlake_edp_pll_on(intel_dp);
1394 }
1395
1396 /*
1397 * Native read with retry for link status and receiver capability reads for
1398 * cases where the sink may still be asleep.
1399 */
1400 static bool
1401 intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1402 uint8_t *recv, int recv_bytes)
1403 {
1404 int ret, i;
1405
1406 /*
1407 * Sinks are *supposed* to come up within 1ms from an off state,
1408 * but we're also supposed to retry 3 times per the spec.
1409 */
1410 for (i = 0; i < 3; i++) {
1411 ret = intel_dp_aux_native_read(intel_dp, address, recv,
1412 recv_bytes);
1413 if (ret == recv_bytes)
1414 return true;
1415 msleep(1);
1416 }
1417
1418 return false;
1419 }
1420
1421 /*
1422 * Fetch AUX CH registers 0x202 - 0x207 which contain
1423 * link status information
1424 */
1425 static bool
1426 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1427 {
1428 return intel_dp_aux_native_read_retry(intel_dp,
1429 DP_LANE0_1_STATUS,
1430 link_status,
1431 DP_LINK_STATUS_SIZE);
1432 }
1433
1434 #if 0
1435 static char *voltage_names[] = {
1436 "0.4V", "0.6V", "0.8V", "1.2V"
1437 };
1438 static char *pre_emph_names[] = {
1439 "0dB", "3.5dB", "6dB", "9.5dB"
1440 };
1441 static char *link_train_names[] = {
1442 "pattern 1", "pattern 2", "idle", "off"
1443 };
1444 #endif
1445
1446 /*
1447 * These are source-specific values; current Intel hardware supports
1448 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1449 */
1450
1451 static uint8_t
1452 intel_dp_voltage_max(struct intel_dp *intel_dp)
1453 {
1454 struct drm_device *dev = intel_dp->base.base.dev;
1455
1456 if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1457 return DP_TRAIN_VOLTAGE_SWING_800;
1458 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1459 return DP_TRAIN_VOLTAGE_SWING_1200;
1460 else
1461 return DP_TRAIN_VOLTAGE_SWING_800;
1462 }
1463
1464 static uint8_t
1465 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1466 {
1467 struct drm_device *dev = intel_dp->base.base.dev;
1468
1469 if (IS_HASWELL(dev)) {
1470 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1471 case DP_TRAIN_VOLTAGE_SWING_400:
1472 return DP_TRAIN_PRE_EMPHASIS_9_5;
1473 case DP_TRAIN_VOLTAGE_SWING_600:
1474 return DP_TRAIN_PRE_EMPHASIS_6;
1475 case DP_TRAIN_VOLTAGE_SWING_800:
1476 return DP_TRAIN_PRE_EMPHASIS_3_5;
1477 case DP_TRAIN_VOLTAGE_SWING_1200:
1478 default:
1479 return DP_TRAIN_PRE_EMPHASIS_0;
1480 }
1481 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1482 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1483 case DP_TRAIN_VOLTAGE_SWING_400:
1484 return DP_TRAIN_PRE_EMPHASIS_6;
1485 case DP_TRAIN_VOLTAGE_SWING_600:
1486 case DP_TRAIN_VOLTAGE_SWING_800:
1487 return DP_TRAIN_PRE_EMPHASIS_3_5;
1488 default:
1489 return DP_TRAIN_PRE_EMPHASIS_0;
1490 }
1491 } else {
1492 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1493 case DP_TRAIN_VOLTAGE_SWING_400:
1494 return DP_TRAIN_PRE_EMPHASIS_6;
1495 case DP_TRAIN_VOLTAGE_SWING_600:
1496 return DP_TRAIN_PRE_EMPHASIS_6;
1497 case DP_TRAIN_VOLTAGE_SWING_800:
1498 return DP_TRAIN_PRE_EMPHASIS_3_5;
1499 case DP_TRAIN_VOLTAGE_SWING_1200:
1500 default:
1501 return DP_TRAIN_PRE_EMPHASIS_0;
1502 }
1503 }
1504 }
1505
1506 static void
1507 intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1508 {
1509 uint8_t v = 0;
1510 uint8_t p = 0;
1511 int lane;
1512 uint8_t voltage_max;
1513 uint8_t preemph_max;
1514
1515 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1516 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
1517 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
1518
1519 if (this_v > v)
1520 v = this_v;
1521 if (this_p > p)
1522 p = this_p;
1523 }
1524
1525 voltage_max = intel_dp_voltage_max(intel_dp);
1526 if (v >= voltage_max)
1527 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1528
1529 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1530 if (p >= preemph_max)
1531 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1532
1533 for (lane = 0; lane < 4; lane++)
1534 intel_dp->train_set[lane] = v | p;
1535 }
1536
1537 static uint32_t
1538 intel_dp_signal_levels(uint8_t train_set)
1539 {
1540 uint32_t signal_levels = 0;
1541
1542 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1543 case DP_TRAIN_VOLTAGE_SWING_400:
1544 default:
1545 signal_levels |= DP_VOLTAGE_0_4;
1546 break;
1547 case DP_TRAIN_VOLTAGE_SWING_600:
1548 signal_levels |= DP_VOLTAGE_0_6;
1549 break;
1550 case DP_TRAIN_VOLTAGE_SWING_800:
1551 signal_levels |= DP_VOLTAGE_0_8;
1552 break;
1553 case DP_TRAIN_VOLTAGE_SWING_1200:
1554 signal_levels |= DP_VOLTAGE_1_2;
1555 break;
1556 }
1557 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1558 case DP_TRAIN_PRE_EMPHASIS_0:
1559 default:
1560 signal_levels |= DP_PRE_EMPHASIS_0;
1561 break;
1562 case DP_TRAIN_PRE_EMPHASIS_3_5:
1563 signal_levels |= DP_PRE_EMPHASIS_3_5;
1564 break;
1565 case DP_TRAIN_PRE_EMPHASIS_6:
1566 signal_levels |= DP_PRE_EMPHASIS_6;
1567 break;
1568 case DP_TRAIN_PRE_EMPHASIS_9_5:
1569 signal_levels |= DP_PRE_EMPHASIS_9_5;
1570 break;
1571 }
1572 return signal_levels;
1573 }
1574
1575 /* Gen6's DP voltage swing and pre-emphasis control */
1576 static uint32_t
1577 intel_gen6_edp_signal_levels(uint8_t train_set)
1578 {
1579 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1580 DP_TRAIN_PRE_EMPHASIS_MASK);
1581 switch (signal_levels) {
1582 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1583 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1584 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1585 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1586 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
1587 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1588 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1589 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
1590 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1591 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1592 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
1593 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1594 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1595 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
1596 default:
1597 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1598 "0x%x\n", signal_levels);
1599 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1600 }
1601 }
1602
1603 /* Gen7's DP voltage swing and pre-emphasis control */
1604 static uint32_t
1605 intel_gen7_edp_signal_levels(uint8_t train_set)
1606 {
1607 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1608 DP_TRAIN_PRE_EMPHASIS_MASK);
1609 switch (signal_levels) {
1610 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1611 return EDP_LINK_TRAIN_400MV_0DB_IVB;
1612 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1613 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1614 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1615 return EDP_LINK_TRAIN_400MV_6DB_IVB;
1616
1617 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1618 return EDP_LINK_TRAIN_600MV_0DB_IVB;
1619 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1620 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1621
1622 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1623 return EDP_LINK_TRAIN_800MV_0DB_IVB;
1624 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1625 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1626
1627 default:
1628 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1629 "0x%x\n", signal_levels);
1630 return EDP_LINK_TRAIN_500MV_0DB_IVB;
1631 }
1632 }
1633
1634 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
1635 static uint32_t
1636 intel_dp_signal_levels_hsw(uint8_t train_set)
1637 {
1638 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1639 DP_TRAIN_PRE_EMPHASIS_MASK);
1640 switch (signal_levels) {
1641 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1642 return DDI_BUF_EMP_400MV_0DB_HSW;
1643 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1644 return DDI_BUF_EMP_400MV_3_5DB_HSW;
1645 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1646 return DDI_BUF_EMP_400MV_6DB_HSW;
1647 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
1648 return DDI_BUF_EMP_400MV_9_5DB_HSW;
1649
1650 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1651 return DDI_BUF_EMP_600MV_0DB_HSW;
1652 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1653 return DDI_BUF_EMP_600MV_3_5DB_HSW;
1654 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1655 return DDI_BUF_EMP_600MV_6DB_HSW;
1656
1657 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1658 return DDI_BUF_EMP_800MV_0DB_HSW;
1659 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1660 return DDI_BUF_EMP_800MV_3_5DB_HSW;
1661 default:
1662 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1663 "0x%x\n", signal_levels);
1664 return DDI_BUF_EMP_400MV_0DB_HSW;
1665 }
1666 }
1667
1668 static bool
1669 intel_dp_set_link_train(struct intel_dp *intel_dp,
1670 uint32_t dp_reg_value,
1671 uint8_t dp_train_pat)
1672 {
1673 struct drm_device *dev = intel_dp->base.base.dev;
1674 struct drm_i915_private *dev_priv = dev->dev_private;
1675 int ret;
1676 uint32_t temp;
1677
1678 if (IS_HASWELL(dev)) {
1679 temp = I915_READ(DP_TP_CTL(intel_dp->port));
1680
1681 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1682 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
1683 else
1684 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
1685
1686 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1687 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1688 case DP_TRAINING_PATTERN_DISABLE:
1689 temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1690 I915_WRITE(DP_TP_CTL(intel_dp->port), temp);
1691
1692 if (wait_for((I915_READ(DP_TP_STATUS(intel_dp->port)) &
1693 DP_TP_STATUS_IDLE_DONE), 1))
1694 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1695
1696 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1697 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1698
1699 break;
1700 case DP_TRAINING_PATTERN_1:
1701 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
1702 break;
1703 case DP_TRAINING_PATTERN_2:
1704 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
1705 break;
1706 case DP_TRAINING_PATTERN_3:
1707 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
1708 break;
1709 }
1710 I915_WRITE(DP_TP_CTL(intel_dp->port), temp);
1711
1712 } else if (HAS_PCH_CPT(dev) &&
1713 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1714 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1715
1716 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1717 case DP_TRAINING_PATTERN_DISABLE:
1718 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
1719 break;
1720 case DP_TRAINING_PATTERN_1:
1721 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
1722 break;
1723 case DP_TRAINING_PATTERN_2:
1724 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1725 break;
1726 case DP_TRAINING_PATTERN_3:
1727 DRM_ERROR("DP training pattern 3 not supported\n");
1728 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1729 break;
1730 }
1731
1732 } else {
1733 dp_reg_value &= ~DP_LINK_TRAIN_MASK;
1734
1735 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1736 case DP_TRAINING_PATTERN_DISABLE:
1737 dp_reg_value |= DP_LINK_TRAIN_OFF;
1738 break;
1739 case DP_TRAINING_PATTERN_1:
1740 dp_reg_value |= DP_LINK_TRAIN_PAT_1;
1741 break;
1742 case DP_TRAINING_PATTERN_2:
1743 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1744 break;
1745 case DP_TRAINING_PATTERN_3:
1746 DRM_ERROR("DP training pattern 3 not supported\n");
1747 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1748 break;
1749 }
1750 }
1751
1752 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1753 POSTING_READ(intel_dp->output_reg);
1754
1755 intel_dp_aux_native_write_1(intel_dp,
1756 DP_TRAINING_PATTERN_SET,
1757 dp_train_pat);
1758
1759 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
1760 DP_TRAINING_PATTERN_DISABLE) {
1761 ret = intel_dp_aux_native_write(intel_dp,
1762 DP_TRAINING_LANE0_SET,
1763 intel_dp->train_set,
1764 intel_dp->lane_count);
1765 if (ret != intel_dp->lane_count)
1766 return false;
1767 }
1768
1769 return true;
1770 }
1771
1772 /* Enable corresponding port and start training pattern 1 */
1773 void
1774 intel_dp_start_link_train(struct intel_dp *intel_dp)
1775 {
1776 struct drm_encoder *encoder = &intel_dp->base.base;
1777 struct drm_device *dev = encoder->dev;
1778 int i;
1779 uint8_t voltage;
1780 bool clock_recovery = false;
1781 int voltage_tries, loop_tries;
1782 uint32_t DP = intel_dp->DP;
1783
1784 if (IS_HASWELL(dev))
1785 intel_ddi_prepare_link_retrain(encoder);
1786
1787 /* Write the link configuration data */
1788 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1789 intel_dp->link_configuration,
1790 DP_LINK_CONFIGURATION_SIZE);
1791
1792 DP |= DP_PORT_EN;
1793
1794 memset(intel_dp->train_set, 0, 4);
1795 voltage = 0xff;
1796 voltage_tries = 0;
1797 loop_tries = 0;
1798 clock_recovery = false;
1799 for (;;) {
1800 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1801 uint8_t link_status[DP_LINK_STATUS_SIZE];
1802 uint32_t signal_levels;
1803
1804 if (IS_HASWELL(dev)) {
1805 signal_levels = intel_dp_signal_levels_hsw(
1806 intel_dp->train_set[0]);
1807 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1808 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1809 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1810 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1811 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1812 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1813 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1814 } else {
1815 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1816 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1817 }
1818 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
1819 signal_levels);
1820
1821 /* Set training pattern 1 */
1822 if (!intel_dp_set_link_train(intel_dp, DP,
1823 DP_TRAINING_PATTERN_1 |
1824 DP_LINK_SCRAMBLING_DISABLE))
1825 break;
1826
1827 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
1828 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1829 DRM_ERROR("failed to get link status\n");
1830 break;
1831 }
1832
1833 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1834 DRM_DEBUG_KMS("clock recovery OK\n");
1835 clock_recovery = true;
1836 break;
1837 }
1838
1839 /* Check to see if we've tried the max voltage */
1840 for (i = 0; i < intel_dp->lane_count; i++)
1841 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1842 break;
1843 if (i == intel_dp->lane_count && voltage_tries == 5) {
1844 if (++loop_tries == 5) {
1845 DRM_DEBUG_KMS("too many full retries, give up\n");
1846 break;
1847 }
1848 memset(intel_dp->train_set, 0, 4);
1849 voltage_tries = 0;
1850 continue;
1851 }
1852
1853 /* Check to see if we've tried the same voltage 5 times */
1854 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
1855 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1856 voltage_tries = 0;
1857 } else
1858 ++voltage_tries;
1859
1860 /* Compute new intel_dp->train_set as requested by target */
1861 intel_get_adjust_train(intel_dp, link_status);
1862 }
1863
1864 intel_dp->DP = DP;
1865 }
1866
1867 void
1868 intel_dp_complete_link_train(struct intel_dp *intel_dp)
1869 {
1870 struct drm_device *dev = intel_dp->base.base.dev;
1871 bool channel_eq = false;
1872 int tries, cr_tries;
1873 uint32_t DP = intel_dp->DP;
1874
1875 /* channel equalization */
1876 tries = 0;
1877 cr_tries = 0;
1878 channel_eq = false;
1879 for (;;) {
1880 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1881 uint32_t signal_levels;
1882 uint8_t link_status[DP_LINK_STATUS_SIZE];
1883
1884 if (cr_tries > 5) {
1885 DRM_ERROR("failed to train DP, aborting\n");
1886 intel_dp_link_down(intel_dp);
1887 break;
1888 }
1889
1890 if (IS_HASWELL(dev)) {
1891 signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
1892 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1893 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1894 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1895 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1896 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1897 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1898 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1899 } else {
1900 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1901 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1902 }
1903
1904 /* channel eq pattern */
1905 if (!intel_dp_set_link_train(intel_dp, DP,
1906 DP_TRAINING_PATTERN_2 |
1907 DP_LINK_SCRAMBLING_DISABLE))
1908 break;
1909
1910 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
1911 if (!intel_dp_get_link_status(intel_dp, link_status))
1912 break;
1913
1914 /* Make sure clock is still ok */
1915 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1916 intel_dp_start_link_train(intel_dp);
1917 cr_tries++;
1918 continue;
1919 }
1920
1921 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
1922 channel_eq = true;
1923 break;
1924 }
1925
1926 /* Try 5 times, then try clock recovery if that fails */
1927 if (tries > 5) {
1928 intel_dp_link_down(intel_dp);
1929 intel_dp_start_link_train(intel_dp);
1930 tries = 0;
1931 cr_tries++;
1932 continue;
1933 }
1934
1935 /* Compute new intel_dp->train_set as requested by target */
1936 intel_get_adjust_train(intel_dp, link_status);
1937 ++tries;
1938 }
1939
1940 if (channel_eq)
1941 DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
1942
1943 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
1944 }
1945
1946 static void
1947 intel_dp_link_down(struct intel_dp *intel_dp)
1948 {
1949 struct drm_device *dev = intel_dp->base.base.dev;
1950 struct drm_i915_private *dev_priv = dev->dev_private;
1951 uint32_t DP = intel_dp->DP;
1952
1953 /*
1954 * DDI code has a strict mode set sequence and we should try to respect
1955 * it, otherwise we might hang the machine in many different ways. So we
1956 * really should be disabling the port only on a complete crtc_disable
1957 * sequence. This function is just called under two conditions on DDI
1958 * code:
1959 * - Link train failed while doing crtc_enable, and on this case we
1960 * really should respect the mode set sequence and wait for a
1961 * crtc_disable.
1962 * - Someone turned the monitor off and intel_dp_check_link_status
1963 * called us. We don't need to disable the whole port on this case, so
1964 * when someone turns the monitor on again,
1965 * intel_ddi_prepare_link_retrain will take care of redoing the link
1966 * train.
1967 */
1968 if (IS_HASWELL(dev))
1969 return;
1970
1971 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1972 return;
1973
1974 DRM_DEBUG_KMS("\n");
1975
1976 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1977 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1978 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1979 } else {
1980 DP &= ~DP_LINK_TRAIN_MASK;
1981 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
1982 }
1983 POSTING_READ(intel_dp->output_reg);
1984
1985 msleep(17);
1986
1987 if (HAS_PCH_IBX(dev) &&
1988 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1989 struct drm_crtc *crtc = intel_dp->base.base.crtc;
1990
1991 /* Hardware workaround: leaving our transcoder select
1992 * set to transcoder B while it's off will prevent the
1993 * corresponding HDMI output on transcoder A.
1994 *
1995 * Combine this with another hardware workaround:
1996 * transcoder select bit can only be cleared while the
1997 * port is enabled.
1998 */
1999 DP &= ~DP_PIPEB_SELECT;
2000 I915_WRITE(intel_dp->output_reg, DP);
2001
2002 /* Changes to enable or select take place the vblank
2003 * after being written.
2004 */
2005 if (crtc == NULL) {
2006 /* We can arrive here never having been attached
2007 * to a CRTC, for instance, due to inheriting
2008 * random state from the BIOS.
2009 *
2010 * If the pipe is not running, play safe and
2011 * wait for the clocks to stabilise before
2012 * continuing.
2013 */
2014 POSTING_READ(intel_dp->output_reg);
2015 msleep(50);
2016 } else
2017 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
2018 }
2019
2020 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
2021 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2022 POSTING_READ(intel_dp->output_reg);
2023 msleep(intel_dp->panel_power_down_delay);
2024 }
2025
2026 static bool
2027 intel_dp_get_dpcd(struct intel_dp *intel_dp)
2028 {
2029 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
2030 sizeof(intel_dp->dpcd)) == 0)
2031 return false; /* aux transfer failed */
2032
2033 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2034 return false; /* DPCD not present */
2035
2036 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2037 DP_DWN_STRM_PORT_PRESENT))
2038 return true; /* native DP sink */
2039
2040 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2041 return true; /* no per-port downstream info */
2042
2043 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2044 intel_dp->downstream_ports,
2045 DP_MAX_DOWNSTREAM_PORTS) == 0)
2046 return false; /* downstream port status fetch failed */
2047
2048 return true;
2049 }
2050
2051 static void
2052 intel_dp_probe_oui(struct intel_dp *intel_dp)
2053 {
2054 u8 buf[3];
2055
2056 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2057 return;
2058
2059 ironlake_edp_panel_vdd_on(intel_dp);
2060
2061 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2062 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2063 buf[0], buf[1], buf[2]);
2064
2065 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2066 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2067 buf[0], buf[1], buf[2]);
2068
2069 ironlake_edp_panel_vdd_off(intel_dp, false);
2070 }
2071
2072 static bool
2073 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2074 {
2075 int ret;
2076
2077 ret = intel_dp_aux_native_read_retry(intel_dp,
2078 DP_DEVICE_SERVICE_IRQ_VECTOR,
2079 sink_irq_vector, 1);
2080 if (!ret)
2081 return false;
2082
2083 return true;
2084 }
2085
2086 static void
2087 intel_dp_handle_test_request(struct intel_dp *intel_dp)
2088 {
2089 /* NAK by default */
2090 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
2091 }
2092
2093 /*
2094 * According to DP spec
2095 * 5.1.2:
2096 * 1. Read DPCD
2097 * 2. Configure link according to Receiver Capabilities
2098 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
2099 * 4. Check link status on receipt of hot-plug interrupt
2100 */
2101
2102 static void
2103 intel_dp_check_link_status(struct intel_dp *intel_dp)
2104 {
2105 u8 sink_irq_vector;
2106 u8 link_status[DP_LINK_STATUS_SIZE];
2107
2108 if (!intel_dp->base.connectors_active)
2109 return;
2110
2111 if (WARN_ON(!intel_dp->base.base.crtc))
2112 return;
2113
2114 /* Try to read receiver status if the link appears to be up */
2115 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2116 intel_dp_link_down(intel_dp);
2117 return;
2118 }
2119
2120 /* Now read the DPCD to see if it's actually running */
2121 if (!intel_dp_get_dpcd(intel_dp)) {
2122 intel_dp_link_down(intel_dp);
2123 return;
2124 }
2125
2126 /* Try to read the source of the interrupt */
2127 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2128 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2129 /* Clear interrupt source */
2130 intel_dp_aux_native_write_1(intel_dp,
2131 DP_DEVICE_SERVICE_IRQ_VECTOR,
2132 sink_irq_vector);
2133
2134 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2135 intel_dp_handle_test_request(intel_dp);
2136 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2137 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2138 }
2139
2140 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2141 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2142 drm_get_encoder_name(&intel_dp->base.base));
2143 intel_dp_start_link_train(intel_dp);
2144 intel_dp_complete_link_train(intel_dp);
2145 }
2146 }
2147
2148 /* XXX this is probably wrong for multiple downstream ports */
2149 static enum drm_connector_status
2150 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2151 {
2152 uint8_t *dpcd = intel_dp->dpcd;
2153 bool hpd;
2154 uint8_t type;
2155
2156 if (!intel_dp_get_dpcd(intel_dp))
2157 return connector_status_disconnected;
2158
2159 /* if there's no downstream port, we're done */
2160 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2161 return connector_status_connected;
2162
2163 /* If we're HPD-aware, SINK_COUNT changes dynamically */
2164 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
2165 if (hpd) {
2166 uint8_t reg;
2167 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2168 &reg, 1))
2169 return connector_status_unknown;
2170 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2171 : connector_status_disconnected;
2172 }
2173
2174 /* If no HPD, poke DDC gently */
2175 if (drm_probe_ddc(&intel_dp->adapter))
2176 return connector_status_connected;
2177
2178 /* Well we tried, say unknown for unreliable port types */
2179 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2180 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
2181 return connector_status_unknown;
2182
2183 /* Anything else is out of spec, warn and ignore */
2184 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2185 return connector_status_disconnected;
2186 }
2187
2188 static enum drm_connector_status
2189 ironlake_dp_detect(struct intel_dp *intel_dp)
2190 {
2191 enum drm_connector_status status;
2192
2193 /* Can't disconnect eDP, but you can close the lid... */
2194 if (is_edp(intel_dp)) {
2195 status = intel_panel_detect(intel_dp->base.base.dev);
2196 if (status == connector_status_unknown)
2197 status = connector_status_connected;
2198 return status;
2199 }
2200
2201 return intel_dp_detect_dpcd(intel_dp);
2202 }
2203
2204 static enum drm_connector_status
2205 g4x_dp_detect(struct intel_dp *intel_dp)
2206 {
2207 struct drm_device *dev = intel_dp->base.base.dev;
2208 struct drm_i915_private *dev_priv = dev->dev_private;
2209 uint32_t bit;
2210
2211 switch (intel_dp->output_reg) {
2212 case DP_B:
2213 bit = DPB_HOTPLUG_LIVE_STATUS;
2214 break;
2215 case DP_C:
2216 bit = DPC_HOTPLUG_LIVE_STATUS;
2217 break;
2218 case DP_D:
2219 bit = DPD_HOTPLUG_LIVE_STATUS;
2220 break;
2221 default:
2222 return connector_status_unknown;
2223 }
2224
2225 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2226 return connector_status_disconnected;
2227
2228 return intel_dp_detect_dpcd(intel_dp);
2229 }
2230
2231 static struct edid *
2232 intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2233 {
2234 struct intel_connector *intel_connector = to_intel_connector(connector);
2235
2236 /* use cached edid if we have one */
2237 if (intel_connector->edid) {
2238 struct edid *edid;
2239 int size;
2240
2241 /* invalid edid */
2242 if (IS_ERR(intel_connector->edid))
2243 return NULL;
2244
2245 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
2246 edid = kmalloc(size, GFP_KERNEL);
2247 if (!edid)
2248 return NULL;
2249
2250 memcpy(edid, intel_connector->edid, size);
2251 return edid;
2252 }
2253
2254 return drm_get_edid(connector, adapter);
2255 }
2256
2257 static int
2258 intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2259 {
2260 struct intel_connector *intel_connector = to_intel_connector(connector);
2261
2262 /* use cached edid if we have one */
2263 if (intel_connector->edid) {
2264 /* invalid edid */
2265 if (IS_ERR(intel_connector->edid))
2266 return 0;
2267
2268 return intel_connector_update_modes(connector,
2269 intel_connector->edid);
2270 }
2271
2272 return intel_ddc_get_modes(connector, adapter);
2273 }
2274
2275
2276 /**
2277 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
2278 *
2279 * \return true if DP port is connected.
2280 * \return false if DP port is disconnected.
2281 */
2282 static enum drm_connector_status
2283 intel_dp_detect(struct drm_connector *connector, bool force)
2284 {
2285 struct intel_dp *intel_dp = intel_attached_dp(connector);
2286 struct drm_device *dev = intel_dp->base.base.dev;
2287 enum drm_connector_status status;
2288 struct edid *edid = NULL;
2289
2290 intel_dp->has_audio = false;
2291
2292 if (HAS_PCH_SPLIT(dev))
2293 status = ironlake_dp_detect(intel_dp);
2294 else
2295 status = g4x_dp_detect(intel_dp);
2296
2297 DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
2298 intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
2299 intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
2300 intel_dp->dpcd[6], intel_dp->dpcd[7]);
2301
2302 if (status != connector_status_connected)
2303 return status;
2304
2305 intel_dp_probe_oui(intel_dp);
2306
2307 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2308 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
2309 } else {
2310 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2311 if (edid) {
2312 intel_dp->has_audio = drm_detect_monitor_audio(edid);
2313 kfree(edid);
2314 }
2315 }
2316
2317 return connector_status_connected;
2318 }
2319
2320 static int intel_dp_get_modes(struct drm_connector *connector)
2321 {
2322 struct intel_dp *intel_dp = intel_attached_dp(connector);
2323 struct intel_connector *intel_connector = to_intel_connector(connector);
2324 struct drm_device *dev = intel_dp->base.base.dev;
2325 int ret;
2326
2327 /* We should parse the EDID data and find out if it has an audio sink
2328 */
2329
2330 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
2331 if (ret)
2332 return ret;
2333
2334 /* if eDP has no EDID, fall back to fixed mode */
2335 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2336 struct drm_display_mode *mode;
2337 mode = drm_mode_duplicate(dev,
2338 intel_connector->panel.fixed_mode);
2339 if (mode) {
2340 drm_mode_probed_add(connector, mode);
2341 return 1;
2342 }
2343 }
2344 return 0;
2345 }
2346
2347 static bool
2348 intel_dp_detect_audio(struct drm_connector *connector)
2349 {
2350 struct intel_dp *intel_dp = intel_attached_dp(connector);
2351 struct edid *edid;
2352 bool has_audio = false;
2353
2354 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2355 if (edid) {
2356 has_audio = drm_detect_monitor_audio(edid);
2357 kfree(edid);
2358 }
2359
2360 return has_audio;
2361 }
2362
2363 static int
2364 intel_dp_set_property(struct drm_connector *connector,
2365 struct drm_property *property,
2366 uint64_t val)
2367 {
2368 struct drm_i915_private *dev_priv = connector->dev->dev_private;
2369 struct intel_dp *intel_dp = intel_attached_dp(connector);
2370 int ret;
2371
2372 ret = drm_connector_property_set_value(connector, property, val);
2373 if (ret)
2374 return ret;
2375
2376 if (property == dev_priv->force_audio_property) {
2377 int i = val;
2378 bool has_audio;
2379
2380 if (i == intel_dp->force_audio)
2381 return 0;
2382
2383 intel_dp->force_audio = i;
2384
2385 if (i == HDMI_AUDIO_AUTO)
2386 has_audio = intel_dp_detect_audio(connector);
2387 else
2388 has_audio = (i == HDMI_AUDIO_ON);
2389
2390 if (has_audio == intel_dp->has_audio)
2391 return 0;
2392
2393 intel_dp->has_audio = has_audio;
2394 goto done;
2395 }
2396
2397 if (property == dev_priv->broadcast_rgb_property) {
2398 if (val == !!intel_dp->color_range)
2399 return 0;
2400
2401 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
2402 goto done;
2403 }
2404
2405 return -EINVAL;
2406
2407 done:
2408 if (intel_dp->base.base.crtc) {
2409 struct drm_crtc *crtc = intel_dp->base.base.crtc;
2410 intel_set_mode(crtc, &crtc->mode,
2411 crtc->x, crtc->y, crtc->fb);
2412 }
2413
2414 return 0;
2415 }
2416
2417 static void
2418 intel_dp_destroy(struct drm_connector *connector)
2419 {
2420 struct drm_device *dev = connector->dev;
2421 struct intel_dp *intel_dp = intel_attached_dp(connector);
2422 struct intel_connector *intel_connector = to_intel_connector(connector);
2423
2424 if (!IS_ERR_OR_NULL(intel_connector->edid))
2425 kfree(intel_connector->edid);
2426
2427 if (is_edp(intel_dp)) {
2428 intel_panel_destroy_backlight(dev);
2429 intel_panel_fini(&intel_connector->panel);
2430 }
2431
2432 drm_sysfs_connector_remove(connector);
2433 drm_connector_cleanup(connector);
2434 kfree(connector);
2435 }
2436
2437 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2438 {
2439 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2440
2441 i2c_del_adapter(&intel_dp->adapter);
2442 drm_encoder_cleanup(encoder);
2443 if (is_edp(intel_dp)) {
2444 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2445 ironlake_panel_vdd_off_sync(intel_dp);
2446 }
2447 kfree(intel_dp);
2448 }
2449
2450 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2451 .mode_fixup = intel_dp_mode_fixup,
2452 .mode_set = intel_dp_mode_set,
2453 .disable = intel_encoder_noop,
2454 };
2455
2456 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs_hsw = {
2457 .mode_fixup = intel_dp_mode_fixup,
2458 .mode_set = intel_ddi_mode_set,
2459 .disable = intel_encoder_noop,
2460 };
2461
2462 static const struct drm_connector_funcs intel_dp_connector_funcs = {
2463 .dpms = intel_connector_dpms,
2464 .detect = intel_dp_detect,
2465 .fill_modes = drm_helper_probe_single_connector_modes,
2466 .set_property = intel_dp_set_property,
2467 .destroy = intel_dp_destroy,
2468 };
2469
2470 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
2471 .get_modes = intel_dp_get_modes,
2472 .mode_valid = intel_dp_mode_valid,
2473 .best_encoder = intel_best_encoder,
2474 };
2475
2476 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
2477 .destroy = intel_dp_encoder_destroy,
2478 };
2479
2480 static void
2481 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
2482 {
2483 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
2484
2485 intel_dp_check_link_status(intel_dp);
2486 }
2487
2488 /* Return which DP Port should be selected for Transcoder DP control */
2489 int
2490 intel_trans_dp_port_sel(struct drm_crtc *crtc)
2491 {
2492 struct drm_device *dev = crtc->dev;
2493 struct intel_encoder *encoder;
2494
2495 for_each_encoder_on_crtc(dev, crtc, encoder) {
2496 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2497
2498 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
2499 intel_dp->base.type == INTEL_OUTPUT_EDP)
2500 return intel_dp->output_reg;
2501 }
2502
2503 return -1;
2504 }
2505
2506 /* check the VBT to see whether the eDP is on DP-D port */
2507 bool intel_dpd_is_edp(struct drm_device *dev)
2508 {
2509 struct drm_i915_private *dev_priv = dev->dev_private;
2510 struct child_device_config *p_child;
2511 int i;
2512
2513 if (!dev_priv->child_dev_num)
2514 return false;
2515
2516 for (i = 0; i < dev_priv->child_dev_num; i++) {
2517 p_child = dev_priv->child_dev + i;
2518
2519 if (p_child->dvo_port == PORT_IDPD &&
2520 p_child->device_type == DEVICE_TYPE_eDP)
2521 return true;
2522 }
2523 return false;
2524 }
2525
2526 static void
2527 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2528 {
2529 intel_attach_force_audio_property(connector);
2530 intel_attach_broadcast_rgb_property(connector);
2531 }
2532
2533 void
2534 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2535 {
2536 struct drm_i915_private *dev_priv = dev->dev_private;
2537 struct drm_connector *connector;
2538 struct intel_dp *intel_dp;
2539 struct intel_encoder *intel_encoder;
2540 struct intel_connector *intel_connector;
2541 struct drm_display_mode *fixed_mode = NULL;
2542 const char *name = NULL;
2543 int type;
2544
2545 intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
2546 if (!intel_dp)
2547 return;
2548
2549 intel_dp->output_reg = output_reg;
2550 intel_dp->port = port;
2551 /* Preserve the current hw state. */
2552 intel_dp->DP = I915_READ(intel_dp->output_reg);
2553
2554 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
2555 if (!intel_connector) {
2556 kfree(intel_dp);
2557 return;
2558 }
2559 intel_encoder = &intel_dp->base;
2560 intel_dp->attached_connector = intel_connector;
2561
2562 if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
2563 if (intel_dpd_is_edp(dev))
2564 intel_dp->is_pch_edp = true;
2565
2566 /*
2567 * FIXME : We need to initialize built-in panels before external panels.
2568 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
2569 */
2570 if (IS_VALLEYVIEW(dev) && output_reg == DP_C) {
2571 type = DRM_MODE_CONNECTOR_eDP;
2572 intel_encoder->type = INTEL_OUTPUT_EDP;
2573 } else if (output_reg == DP_A || is_pch_edp(intel_dp)) {
2574 type = DRM_MODE_CONNECTOR_eDP;
2575 intel_encoder->type = INTEL_OUTPUT_EDP;
2576 } else {
2577 type = DRM_MODE_CONNECTOR_DisplayPort;
2578 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2579 }
2580
2581 connector = &intel_connector->base;
2582 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
2583 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
2584
2585 connector->polled = DRM_CONNECTOR_POLL_HPD;
2586
2587 intel_encoder->cloneable = false;
2588
2589 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
2590 ironlake_panel_vdd_work);
2591
2592 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2593
2594 connector->interlace_allowed = true;
2595 connector->doublescan_allowed = 0;
2596
2597 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
2598 DRM_MODE_ENCODER_TMDS);
2599
2600 if (IS_HASWELL(dev))
2601 drm_encoder_helper_add(&intel_encoder->base,
2602 &intel_dp_helper_funcs_hsw);
2603 else
2604 drm_encoder_helper_add(&intel_encoder->base,
2605 &intel_dp_helper_funcs);
2606
2607 intel_connector_attach_encoder(intel_connector, intel_encoder);
2608 drm_sysfs_connector_add(connector);
2609
2610 if (IS_HASWELL(dev)) {
2611 intel_encoder->enable = intel_enable_ddi;
2612 intel_encoder->pre_enable = intel_ddi_pre_enable;
2613 intel_encoder->disable = intel_disable_ddi;
2614 intel_encoder->post_disable = intel_ddi_post_disable;
2615 intel_encoder->get_hw_state = intel_ddi_get_hw_state;
2616 } else {
2617 intel_encoder->enable = intel_enable_dp;
2618 intel_encoder->pre_enable = intel_pre_enable_dp;
2619 intel_encoder->disable = intel_disable_dp;
2620 intel_encoder->post_disable = intel_post_disable_dp;
2621 intel_encoder->get_hw_state = intel_dp_get_hw_state;
2622 }
2623 intel_connector->get_hw_state = intel_connector_get_hw_state;
2624
2625 /* Set up the DDC bus. */
2626 switch (port) {
2627 case PORT_A:
2628 name = "DPDDC-A";
2629 break;
2630 case PORT_B:
2631 dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
2632 name = "DPDDC-B";
2633 break;
2634 case PORT_C:
2635 dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
2636 name = "DPDDC-C";
2637 break;
2638 case PORT_D:
2639 dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
2640 name = "DPDDC-D";
2641 break;
2642 default:
2643 WARN(1, "Invalid port %c\n", port_name(port));
2644 break;
2645 }
2646
2647 /* Cache some DPCD data in the eDP case */
2648 if (is_edp(intel_dp)) {
2649 struct edp_power_seq cur, vbt;
2650 u32 pp_on, pp_off, pp_div;
2651
2652 pp_on = I915_READ(PCH_PP_ON_DELAYS);
2653 pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2654 pp_div = I915_READ(PCH_PP_DIVISOR);
2655
2656 if (!pp_on || !pp_off || !pp_div) {
2657 DRM_INFO("bad panel power sequencing delays, disabling panel\n");
2658 intel_dp_encoder_destroy(&intel_dp->base.base);
2659 intel_dp_destroy(&intel_connector->base);
2660 return;
2661 }
2662
2663 /* Pull timing values out of registers */
2664 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2665 PANEL_POWER_UP_DELAY_SHIFT;
2666
2667 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2668 PANEL_LIGHT_ON_DELAY_SHIFT;
2669
2670 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2671 PANEL_LIGHT_OFF_DELAY_SHIFT;
2672
2673 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2674 PANEL_POWER_DOWN_DELAY_SHIFT;
2675
2676 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2677 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2678
2679 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2680 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2681
2682 vbt = dev_priv->edp.pps;
2683
2684 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2685 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2686
2687 #define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
2688
2689 intel_dp->panel_power_up_delay = get_delay(t1_t3);
2690 intel_dp->backlight_on_delay = get_delay(t8);
2691 intel_dp->backlight_off_delay = get_delay(t9);
2692 intel_dp->panel_power_down_delay = get_delay(t10);
2693 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2694
2695 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2696 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2697 intel_dp->panel_power_cycle_delay);
2698
2699 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2700 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2701 }
2702
2703 intel_dp_i2c_init(intel_dp, intel_connector, name);
2704
2705 if (is_edp(intel_dp)) {
2706 bool ret;
2707 struct drm_display_mode *scan;
2708 struct edid *edid;
2709
2710 ironlake_edp_panel_vdd_on(intel_dp);
2711 ret = intel_dp_get_dpcd(intel_dp);
2712 ironlake_edp_panel_vdd_off(intel_dp, false);
2713
2714 if (ret) {
2715 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2716 dev_priv->no_aux_handshake =
2717 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
2718 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
2719 } else {
2720 /* if this fails, presume the device is a ghost */
2721 DRM_INFO("failed to retrieve link info, disabling eDP\n");
2722 intel_dp_encoder_destroy(&intel_dp->base.base);
2723 intel_dp_destroy(&intel_connector->base);
2724 return;
2725 }
2726
2727 ironlake_edp_panel_vdd_on(intel_dp);
2728 edid = drm_get_edid(connector, &intel_dp->adapter);
2729 if (edid) {
2730 if (drm_add_edid_modes(connector, edid)) {
2731 drm_mode_connector_update_edid_property(connector, edid);
2732 drm_edid_to_eld(connector, edid);
2733 } else {
2734 kfree(edid);
2735 edid = ERR_PTR(-EINVAL);
2736 }
2737 } else {
2738 edid = ERR_PTR(-ENOENT);
2739 }
2740 intel_connector->edid = edid;
2741
2742 /* prefer fixed mode from EDID if available */
2743 list_for_each_entry(scan, &connector->probed_modes, head) {
2744 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
2745 fixed_mode = drm_mode_duplicate(dev, scan);
2746 break;
2747 }
2748 }
2749
2750 /* fallback to VBT if available for eDP */
2751 if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
2752 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
2753 if (fixed_mode)
2754 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
2755 }
2756
2757 ironlake_edp_panel_vdd_off(intel_dp, false);
2758 }
2759
2760 intel_encoder->hot_plug = intel_dp_hot_plug;
2761
2762 if (is_edp(intel_dp)) {
2763 intel_panel_init(&intel_connector->panel, fixed_mode);
2764 intel_panel_setup_backlight(connector);
2765 }
2766
2767 intel_dp_add_properties(intel_dp, connector);
2768
2769 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
2770 * 0xd. Failure to do so will result in spurious interrupts being
2771 * generated on the port when a cable is not attached.
2772 */
2773 if (IS_G4X(dev) && !IS_GM45(dev)) {
2774 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
2775 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
2776 }
2777 }
This page took 0.08847 seconds and 5 git commands to generate.