drm: Pass 'name' to drm_encoder_init()
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51 int clock;
52 struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56 { 162000,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { 270000,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63 { 162000,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { 270000,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70 { 162000,
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72 { 270000,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80 static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
99
100 /**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107 static bool is_edp(struct intel_dp *intel_dp)
108 {
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112 }
113
114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115 {
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
119 }
120
121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122 {
123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124 }
125
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
132
133 static unsigned int intel_dp_unused_lane_mask(int lane_count)
134 {
135 return ~((1 << lane_count) - 1) & 0xf;
136 }
137
138 static int
139 intel_dp_max_link_bw(struct intel_dp *intel_dp)
140 {
141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
146 case DP_LINK_BW_5_4:
147 break;
148 default:
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155 }
156
157 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158 {
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 struct drm_device *dev = intel_dig_port->base.base.dev;
161 u8 source_max, sink_max;
162
163 source_max = 4;
164 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 source_max = 2;
167
168 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170 return min(source_max, sink_max);
171 }
172
173 /*
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
176 *
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178 *
179 * 270000 * 1 * 8 / 10 == 216000
180 *
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
185 *
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
188 */
189
190 static int
191 intel_dp_link_required(int pixel_clock, int bpp)
192 {
193 return (pixel_clock * bpp + 9) / 10;
194 }
195
196 static int
197 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198 {
199 return (max_link_clock * max_lanes * 8) / 10;
200 }
201
202 static enum drm_mode_status
203 intel_dp_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
205 {
206 struct intel_dp *intel_dp = intel_attached_dp(connector);
207 struct intel_connector *intel_connector = to_intel_connector(connector);
208 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
209 int target_clock = mode->clock;
210 int max_rate, mode_rate, max_lanes, max_link_clock;
211
212 if (is_edp(intel_dp) && fixed_mode) {
213 if (mode->hdisplay > fixed_mode->hdisplay)
214 return MODE_PANEL;
215
216 if (mode->vdisplay > fixed_mode->vdisplay)
217 return MODE_PANEL;
218
219 target_clock = fixed_mode->clock;
220 }
221
222 max_link_clock = intel_dp_max_link_rate(intel_dp);
223 max_lanes = intel_dp_max_lane_count(intel_dp);
224
225 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 mode_rate = intel_dp_link_required(target_clock, 18);
227
228 if (mode_rate > max_rate)
229 return MODE_CLOCK_HIGH;
230
231 if (mode->clock < 10000)
232 return MODE_CLOCK_LOW;
233
234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 return MODE_H_ILLEGAL;
236
237 return MODE_OK;
238 }
239
240 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
241 {
242 int i;
243 uint32_t v = 0;
244
245 if (src_bytes > 4)
246 src_bytes = 4;
247 for (i = 0; i < src_bytes; i++)
248 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249 return v;
250 }
251
252 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
253 {
254 int i;
255 if (dst_bytes > 4)
256 dst_bytes = 4;
257 for (i = 0; i < dst_bytes; i++)
258 dst[i] = src >> ((3-i) * 8);
259 }
260
261 static void
262 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
263 struct intel_dp *intel_dp);
264 static void
265 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
266 struct intel_dp *intel_dp);
267
268 static void pps_lock(struct intel_dp *intel_dp)
269 {
270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 struct intel_encoder *encoder = &intel_dig_port->base;
272 struct drm_device *dev = encoder->base.dev;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 enum intel_display_power_domain power_domain;
275
276 /*
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
279 */
280 power_domain = intel_display_port_aux_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain);
282
283 mutex_lock(&dev_priv->pps_mutex);
284 }
285
286 static void pps_unlock(struct intel_dp *intel_dp)
287 {
288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 struct intel_encoder *encoder = &intel_dig_port->base;
290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 enum intel_display_power_domain power_domain;
293
294 mutex_unlock(&dev_priv->pps_mutex);
295
296 power_domain = intel_display_port_aux_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain);
298 }
299
300 static void
301 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302 {
303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 struct drm_device *dev = intel_dig_port->base.base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum pipe pipe = intel_dp->pps_pipe;
307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
310 uint32_t DP;
311
312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe), port_name(intel_dig_port->port)))
315 return;
316
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe), port_name(intel_dig_port->port));
319
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
322 */
323 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 DP |= DP_PORT_WIDTH(1);
326 DP |= DP_LINK_TRAIN_PAT_1;
327
328 if (IS_CHERRYVIEW(dev))
329 DP |= DP_PIPE_SELECT_CHV(pipe);
330 else if (pipe == PIPE_B)
331 DP |= DP_PIPEB_SELECT;
332
333 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335 /*
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
338 */
339 if (!pll_enabled) {
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
345 }
346
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
361
362 if (!pll_enabled) {
363 vlv_force_pll_off(dev, pipe);
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
368 }
369
370 static enum pipe
371 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372 {
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
378 enum pipe pipe;
379
380 lockdep_assert_held(&dev_priv->pps_mutex);
381
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393 base.head) {
394 struct intel_dp *tmp;
395
396 if (encoder->type != INTEL_OUTPUT_EDP)
397 continue;
398
399 tmp = enc_to_intel_dp(&encoder->base);
400
401 if (tmp->pps_pipe != INVALID_PIPE)
402 pipes &= ~(1 << tmp->pps_pipe);
403 }
404
405 /*
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
408 */
409 if (WARN_ON(pipes == 0))
410 pipe = PIPE_A;
411 else
412 pipe = ffs(pipes) - 1;
413
414 vlv_steal_power_sequencer(dev, pipe);
415 intel_dp->pps_pipe = pipe;
416
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp->pps_pipe),
419 port_name(intel_dig_port->port));
420
421 /* init power sequencer on this pipe and port */
422 intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
424
425 /*
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
428 */
429 vlv_power_sequencer_kick(intel_dp);
430
431 return intel_dp->pps_pipe;
432 }
433
434 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 enum pipe pipe);
436
437 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439 {
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441 }
442
443 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445 {
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447 }
448
449 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450 enum pipe pipe)
451 {
452 return true;
453 }
454
455 static enum pipe
456 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 enum port port,
458 vlv_pipe_check pipe_check)
459 {
460 enum pipe pipe;
461
462 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 PANEL_PORT_SELECT_MASK;
465
466 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 continue;
468
469 if (!pipe_check(dev_priv, pipe))
470 continue;
471
472 return pipe;
473 }
474
475 return INVALID_PIPE;
476 }
477
478 static void
479 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480 {
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
484 enum port port = intel_dig_port->port;
485
486 lockdep_assert_held(&dev_priv->pps_mutex);
487
488 /* try to find a pipe with this port selected */
489 /* first pick one where the panel is on */
490 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 vlv_pipe_has_pp_on);
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp->pps_pipe == INVALID_PIPE)
494 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 vlv_pipe_has_vdd_on);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp->pps_pipe == INVALID_PIPE)
498 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 vlv_pipe_any);
500
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp->pps_pipe == INVALID_PIPE) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504 port_name(port));
505 return;
506 }
507
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port), pipe_name(intel_dp->pps_pipe));
510
511 intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
513 }
514
515 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516 {
517 struct drm_device *dev = dev_priv->dev;
518 struct intel_encoder *encoder;
519
520 if (WARN_ON(!IS_VALLEYVIEW(dev)))
521 return;
522
523 /*
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
531 */
532
533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 struct intel_dp *intel_dp;
535
536 if (encoder->type != INTEL_OUTPUT_EDP)
537 continue;
538
539 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE;
541 }
542 }
543
544 static i915_reg_t
545 _pp_ctrl_reg(struct intel_dp *intel_dp)
546 {
547 struct drm_device *dev = intel_dp_to_dev(intel_dp);
548
549 if (IS_BROXTON(dev))
550 return BXT_PP_CONTROL(0);
551 else if (HAS_PCH_SPLIT(dev))
552 return PCH_PP_CONTROL;
553 else
554 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
555 }
556
557 static i915_reg_t
558 _pp_stat_reg(struct intel_dp *intel_dp)
559 {
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562 if (IS_BROXTON(dev))
563 return BXT_PP_STATUS(0);
564 else if (HAS_PCH_SPLIT(dev))
565 return PCH_PP_STATUS;
566 else
567 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
568 }
569
570 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
571 This function only applicable when panel PM state is not to be tracked */
572 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
573 void *unused)
574 {
575 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
576 edp_notifier);
577 struct drm_device *dev = intel_dp_to_dev(intel_dp);
578 struct drm_i915_private *dev_priv = dev->dev_private;
579
580 if (!is_edp(intel_dp) || code != SYS_RESTART)
581 return 0;
582
583 pps_lock(intel_dp);
584
585 if (IS_VALLEYVIEW(dev)) {
586 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
587 i915_reg_t pp_ctrl_reg, pp_div_reg;
588 u32 pp_div;
589
590 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
591 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
592 pp_div = I915_READ(pp_div_reg);
593 pp_div &= PP_REFERENCE_DIVIDER_MASK;
594
595 /* 0x1F write to PP_DIV_REG sets max cycle delay */
596 I915_WRITE(pp_div_reg, pp_div | 0x1F);
597 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
598 msleep(intel_dp->panel_power_cycle_delay);
599 }
600
601 pps_unlock(intel_dp);
602
603 return 0;
604 }
605
606 static bool edp_have_panel_power(struct intel_dp *intel_dp)
607 {
608 struct drm_device *dev = intel_dp_to_dev(intel_dp);
609 struct drm_i915_private *dev_priv = dev->dev_private;
610
611 lockdep_assert_held(&dev_priv->pps_mutex);
612
613 if (IS_VALLEYVIEW(dev) &&
614 intel_dp->pps_pipe == INVALID_PIPE)
615 return false;
616
617 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
618 }
619
620 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
621 {
622 struct drm_device *dev = intel_dp_to_dev(intel_dp);
623 struct drm_i915_private *dev_priv = dev->dev_private;
624
625 lockdep_assert_held(&dev_priv->pps_mutex);
626
627 if (IS_VALLEYVIEW(dev) &&
628 intel_dp->pps_pipe == INVALID_PIPE)
629 return false;
630
631 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
632 }
633
634 static void
635 intel_dp_check_edp(struct intel_dp *intel_dp)
636 {
637 struct drm_device *dev = intel_dp_to_dev(intel_dp);
638 struct drm_i915_private *dev_priv = dev->dev_private;
639
640 if (!is_edp(intel_dp))
641 return;
642
643 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
644 WARN(1, "eDP powered off while attempting aux channel communication.\n");
645 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
646 I915_READ(_pp_stat_reg(intel_dp)),
647 I915_READ(_pp_ctrl_reg(intel_dp)));
648 }
649 }
650
651 static uint32_t
652 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
653 {
654 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
655 struct drm_device *dev = intel_dig_port->base.base.dev;
656 struct drm_i915_private *dev_priv = dev->dev_private;
657 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
658 uint32_t status;
659 bool done;
660
661 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
662 if (has_aux_irq)
663 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
664 msecs_to_jiffies_timeout(10));
665 else
666 done = wait_for_atomic(C, 10) == 0;
667 if (!done)
668 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
669 has_aux_irq);
670 #undef C
671
672 return status;
673 }
674
675 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
676 {
677 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
678 struct drm_device *dev = intel_dig_port->base.base.dev;
679
680 /*
681 * The clock divider is based off the hrawclk, and would like to run at
682 * 2MHz. So, take the hrawclk value and divide by 2 and use that
683 */
684 return index ? 0 : intel_hrawclk(dev) / 2;
685 }
686
687 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
688 {
689 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
690 struct drm_device *dev = intel_dig_port->base.base.dev;
691 struct drm_i915_private *dev_priv = dev->dev_private;
692
693 if (index)
694 return 0;
695
696 if (intel_dig_port->port == PORT_A) {
697 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
698
699 } else {
700 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
701 }
702 }
703
704 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705 {
706 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707 struct drm_device *dev = intel_dig_port->base.base.dev;
708 struct drm_i915_private *dev_priv = dev->dev_private;
709
710 if (intel_dig_port->port == PORT_A) {
711 if (index)
712 return 0;
713 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
714 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
715 /* Workaround for non-ULT HSW */
716 switch (index) {
717 case 0: return 63;
718 case 1: return 72;
719 default: return 0;
720 }
721 } else {
722 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
723 }
724 }
725
726 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
727 {
728 return index ? 0 : 100;
729 }
730
731 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
732 {
733 /*
734 * SKL doesn't need us to program the AUX clock divider (Hardware will
735 * derive the clock from CDCLK automatically). We still implement the
736 * get_aux_clock_divider vfunc to plug-in into the existing code.
737 */
738 return index ? 0 : 1;
739 }
740
741 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
742 bool has_aux_irq,
743 int send_bytes,
744 uint32_t aux_clock_divider)
745 {
746 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
747 struct drm_device *dev = intel_dig_port->base.base.dev;
748 uint32_t precharge, timeout;
749
750 if (IS_GEN6(dev))
751 precharge = 3;
752 else
753 precharge = 5;
754
755 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
756 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
757 else
758 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
759
760 return DP_AUX_CH_CTL_SEND_BUSY |
761 DP_AUX_CH_CTL_DONE |
762 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
763 DP_AUX_CH_CTL_TIME_OUT_ERROR |
764 timeout |
765 DP_AUX_CH_CTL_RECEIVE_ERROR |
766 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
767 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
768 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
769 }
770
771 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
772 bool has_aux_irq,
773 int send_bytes,
774 uint32_t unused)
775 {
776 return DP_AUX_CH_CTL_SEND_BUSY |
777 DP_AUX_CH_CTL_DONE |
778 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
779 DP_AUX_CH_CTL_TIME_OUT_ERROR |
780 DP_AUX_CH_CTL_TIME_OUT_1600us |
781 DP_AUX_CH_CTL_RECEIVE_ERROR |
782 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
783 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
784 }
785
786 static int
787 intel_dp_aux_ch(struct intel_dp *intel_dp,
788 const uint8_t *send, int send_bytes,
789 uint8_t *recv, int recv_size)
790 {
791 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
792 struct drm_device *dev = intel_dig_port->base.base.dev;
793 struct drm_i915_private *dev_priv = dev->dev_private;
794 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
795 uint32_t aux_clock_divider;
796 int i, ret, recv_bytes;
797 uint32_t status;
798 int try, clock = 0;
799 bool has_aux_irq = HAS_AUX_IRQ(dev);
800 bool vdd;
801
802 pps_lock(intel_dp);
803
804 /*
805 * We will be called with VDD already enabled for dpcd/edid/oui reads.
806 * In such cases we want to leave VDD enabled and it's up to upper layers
807 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
808 * ourselves.
809 */
810 vdd = edp_panel_vdd_on(intel_dp);
811
812 /* dp aux is extremely sensitive to irq latency, hence request the
813 * lowest possible wakeup latency and so prevent the cpu from going into
814 * deep sleep states.
815 */
816 pm_qos_update_request(&dev_priv->pm_qos, 0);
817
818 intel_dp_check_edp(intel_dp);
819
820 /* Try to wait for any previous AUX channel activity */
821 for (try = 0; try < 3; try++) {
822 status = I915_READ_NOTRACE(ch_ctl);
823 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
824 break;
825 msleep(1);
826 }
827
828 if (try == 3) {
829 static u32 last_status = -1;
830 const u32 status = I915_READ(ch_ctl);
831
832 if (status != last_status) {
833 WARN(1, "dp_aux_ch not started status 0x%08x\n",
834 status);
835 last_status = status;
836 }
837
838 ret = -EBUSY;
839 goto out;
840 }
841
842 /* Only 5 data registers! */
843 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
844 ret = -E2BIG;
845 goto out;
846 }
847
848 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
849 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
850 has_aux_irq,
851 send_bytes,
852 aux_clock_divider);
853
854 /* Must try at least 3 times according to DP spec */
855 for (try = 0; try < 5; try++) {
856 /* Load the send data into the aux channel data registers */
857 for (i = 0; i < send_bytes; i += 4)
858 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
859 intel_dp_pack_aux(send + i,
860 send_bytes - i));
861
862 /* Send the command and wait for it to complete */
863 I915_WRITE(ch_ctl, send_ctl);
864
865 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
866
867 /* Clear done status and any errors */
868 I915_WRITE(ch_ctl,
869 status |
870 DP_AUX_CH_CTL_DONE |
871 DP_AUX_CH_CTL_TIME_OUT_ERROR |
872 DP_AUX_CH_CTL_RECEIVE_ERROR);
873
874 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
875 continue;
876
877 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
878 * 400us delay required for errors and timeouts
879 * Timeout errors from the HW already meet this
880 * requirement so skip to next iteration
881 */
882 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
883 usleep_range(400, 500);
884 continue;
885 }
886 if (status & DP_AUX_CH_CTL_DONE)
887 goto done;
888 }
889 }
890
891 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
892 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
893 ret = -EBUSY;
894 goto out;
895 }
896
897 done:
898 /* Check for timeout or receive error.
899 * Timeouts occur when the sink is not connected
900 */
901 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
902 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
903 ret = -EIO;
904 goto out;
905 }
906
907 /* Timeouts occur when the device isn't connected, so they're
908 * "normal" -- don't fill the kernel log with these */
909 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
910 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
911 ret = -ETIMEDOUT;
912 goto out;
913 }
914
915 /* Unload any bytes sent back from the other side */
916 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
917 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
918 if (recv_bytes > recv_size)
919 recv_bytes = recv_size;
920
921 for (i = 0; i < recv_bytes; i += 4)
922 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
923 recv + i, recv_bytes - i);
924
925 ret = recv_bytes;
926 out:
927 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
928
929 if (vdd)
930 edp_panel_vdd_off(intel_dp, false);
931
932 pps_unlock(intel_dp);
933
934 return ret;
935 }
936
937 #define BARE_ADDRESS_SIZE 3
938 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
939 static ssize_t
940 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
941 {
942 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
943 uint8_t txbuf[20], rxbuf[20];
944 size_t txsize, rxsize;
945 int ret;
946
947 txbuf[0] = (msg->request << 4) |
948 ((msg->address >> 16) & 0xf);
949 txbuf[1] = (msg->address >> 8) & 0xff;
950 txbuf[2] = msg->address & 0xff;
951 txbuf[3] = msg->size - 1;
952
953 switch (msg->request & ~DP_AUX_I2C_MOT) {
954 case DP_AUX_NATIVE_WRITE:
955 case DP_AUX_I2C_WRITE:
956 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
957 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
958 rxsize = 2; /* 0 or 1 data bytes */
959
960 if (WARN_ON(txsize > 20))
961 return -E2BIG;
962
963 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
964
965 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
966 if (ret > 0) {
967 msg->reply = rxbuf[0] >> 4;
968
969 if (ret > 1) {
970 /* Number of bytes written in a short write. */
971 ret = clamp_t(int, rxbuf[1], 0, msg->size);
972 } else {
973 /* Return payload size. */
974 ret = msg->size;
975 }
976 }
977 break;
978
979 case DP_AUX_NATIVE_READ:
980 case DP_AUX_I2C_READ:
981 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
982 rxsize = msg->size + 1;
983
984 if (WARN_ON(rxsize > 20))
985 return -E2BIG;
986
987 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
988 if (ret > 0) {
989 msg->reply = rxbuf[0] >> 4;
990 /*
991 * Assume happy day, and copy the data. The caller is
992 * expected to check msg->reply before touching it.
993 *
994 * Return payload size.
995 */
996 ret--;
997 memcpy(msg->buffer, rxbuf + 1, ret);
998 }
999 break;
1000
1001 default:
1002 ret = -EINVAL;
1003 break;
1004 }
1005
1006 return ret;
1007 }
1008
1009 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1010 enum port port)
1011 {
1012 switch (port) {
1013 case PORT_B:
1014 case PORT_C:
1015 case PORT_D:
1016 return DP_AUX_CH_CTL(port);
1017 default:
1018 MISSING_CASE(port);
1019 return DP_AUX_CH_CTL(PORT_B);
1020 }
1021 }
1022
1023 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1024 enum port port, int index)
1025 {
1026 switch (port) {
1027 case PORT_B:
1028 case PORT_C:
1029 case PORT_D:
1030 return DP_AUX_CH_DATA(port, index);
1031 default:
1032 MISSING_CASE(port);
1033 return DP_AUX_CH_DATA(PORT_B, index);
1034 }
1035 }
1036
1037 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1038 enum port port)
1039 {
1040 switch (port) {
1041 case PORT_A:
1042 return DP_AUX_CH_CTL(port);
1043 case PORT_B:
1044 case PORT_C:
1045 case PORT_D:
1046 return PCH_DP_AUX_CH_CTL(port);
1047 default:
1048 MISSING_CASE(port);
1049 return DP_AUX_CH_CTL(PORT_A);
1050 }
1051 }
1052
1053 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1054 enum port port, int index)
1055 {
1056 switch (port) {
1057 case PORT_A:
1058 return DP_AUX_CH_DATA(port, index);
1059 case PORT_B:
1060 case PORT_C:
1061 case PORT_D:
1062 return PCH_DP_AUX_CH_DATA(port, index);
1063 default:
1064 MISSING_CASE(port);
1065 return DP_AUX_CH_DATA(PORT_A, index);
1066 }
1067 }
1068
1069 /*
1070 * On SKL we don't have Aux for port E so we rely
1071 * on VBT to set a proper alternate aux channel.
1072 */
1073 static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1074 {
1075 const struct ddi_vbt_port_info *info =
1076 &dev_priv->vbt.ddi_port_info[PORT_E];
1077
1078 switch (info->alternate_aux_channel) {
1079 case DP_AUX_A:
1080 return PORT_A;
1081 case DP_AUX_B:
1082 return PORT_B;
1083 case DP_AUX_C:
1084 return PORT_C;
1085 case DP_AUX_D:
1086 return PORT_D;
1087 default:
1088 MISSING_CASE(info->alternate_aux_channel);
1089 return PORT_A;
1090 }
1091 }
1092
1093 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1094 enum port port)
1095 {
1096 if (port == PORT_E)
1097 port = skl_porte_aux_port(dev_priv);
1098
1099 switch (port) {
1100 case PORT_A:
1101 case PORT_B:
1102 case PORT_C:
1103 case PORT_D:
1104 return DP_AUX_CH_CTL(port);
1105 default:
1106 MISSING_CASE(port);
1107 return DP_AUX_CH_CTL(PORT_A);
1108 }
1109 }
1110
1111 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1112 enum port port, int index)
1113 {
1114 if (port == PORT_E)
1115 port = skl_porte_aux_port(dev_priv);
1116
1117 switch (port) {
1118 case PORT_A:
1119 case PORT_B:
1120 case PORT_C:
1121 case PORT_D:
1122 return DP_AUX_CH_DATA(port, index);
1123 default:
1124 MISSING_CASE(port);
1125 return DP_AUX_CH_DATA(PORT_A, index);
1126 }
1127 }
1128
1129 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1130 enum port port)
1131 {
1132 if (INTEL_INFO(dev_priv)->gen >= 9)
1133 return skl_aux_ctl_reg(dev_priv, port);
1134 else if (HAS_PCH_SPLIT(dev_priv))
1135 return ilk_aux_ctl_reg(dev_priv, port);
1136 else
1137 return g4x_aux_ctl_reg(dev_priv, port);
1138 }
1139
1140 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1141 enum port port, int index)
1142 {
1143 if (INTEL_INFO(dev_priv)->gen >= 9)
1144 return skl_aux_data_reg(dev_priv, port, index);
1145 else if (HAS_PCH_SPLIT(dev_priv))
1146 return ilk_aux_data_reg(dev_priv, port, index);
1147 else
1148 return g4x_aux_data_reg(dev_priv, port, index);
1149 }
1150
1151 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1152 {
1153 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1154 enum port port = dp_to_dig_port(intel_dp)->port;
1155 int i;
1156
1157 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1158 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1159 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1160 }
1161
1162 static void
1163 intel_dp_aux_fini(struct intel_dp *intel_dp)
1164 {
1165 drm_dp_aux_unregister(&intel_dp->aux);
1166 kfree(intel_dp->aux.name);
1167 }
1168
1169 static int
1170 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1171 {
1172 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1173 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1174 enum port port = intel_dig_port->port;
1175 int ret;
1176
1177 intel_aux_reg_init(intel_dp);
1178
1179 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1180 if (!intel_dp->aux.name)
1181 return -ENOMEM;
1182
1183 intel_dp->aux.dev = dev->dev;
1184 intel_dp->aux.transfer = intel_dp_aux_transfer;
1185
1186 DRM_DEBUG_KMS("registering %s bus for %s\n",
1187 intel_dp->aux.name,
1188 connector->base.kdev->kobj.name);
1189
1190 ret = drm_dp_aux_register(&intel_dp->aux);
1191 if (ret < 0) {
1192 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1193 intel_dp->aux.name, ret);
1194 kfree(intel_dp->aux.name);
1195 return ret;
1196 }
1197
1198 ret = sysfs_create_link(&connector->base.kdev->kobj,
1199 &intel_dp->aux.ddc.dev.kobj,
1200 intel_dp->aux.ddc.dev.kobj.name);
1201 if (ret < 0) {
1202 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1203 intel_dp->aux.name, ret);
1204 intel_dp_aux_fini(intel_dp);
1205 return ret;
1206 }
1207
1208 return 0;
1209 }
1210
1211 static void
1212 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1213 {
1214 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1215
1216 if (!intel_connector->mst_port)
1217 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1218 intel_dp->aux.ddc.dev.kobj.name);
1219 intel_connector_unregister(intel_connector);
1220 }
1221
1222 static void
1223 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1224 {
1225 u32 ctrl1;
1226
1227 memset(&pipe_config->dpll_hw_state, 0,
1228 sizeof(pipe_config->dpll_hw_state));
1229
1230 pipe_config->ddi_pll_sel = SKL_DPLL0;
1231 pipe_config->dpll_hw_state.cfgcr1 = 0;
1232 pipe_config->dpll_hw_state.cfgcr2 = 0;
1233
1234 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1235 switch (pipe_config->port_clock / 2) {
1236 case 81000:
1237 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1238 SKL_DPLL0);
1239 break;
1240 case 135000:
1241 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1242 SKL_DPLL0);
1243 break;
1244 case 270000:
1245 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1246 SKL_DPLL0);
1247 break;
1248 case 162000:
1249 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1250 SKL_DPLL0);
1251 break;
1252 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1253 results in CDCLK change. Need to handle the change of CDCLK by
1254 disabling pipes and re-enabling them */
1255 case 108000:
1256 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1257 SKL_DPLL0);
1258 break;
1259 case 216000:
1260 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1261 SKL_DPLL0);
1262 break;
1263
1264 }
1265 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1266 }
1267
1268 void
1269 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1270 {
1271 memset(&pipe_config->dpll_hw_state, 0,
1272 sizeof(pipe_config->dpll_hw_state));
1273
1274 switch (pipe_config->port_clock / 2) {
1275 case 81000:
1276 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1277 break;
1278 case 135000:
1279 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1280 break;
1281 case 270000:
1282 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1283 break;
1284 }
1285 }
1286
1287 static int
1288 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1289 {
1290 if (intel_dp->num_sink_rates) {
1291 *sink_rates = intel_dp->sink_rates;
1292 return intel_dp->num_sink_rates;
1293 }
1294
1295 *sink_rates = default_rates;
1296
1297 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1298 }
1299
1300 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1301 {
1302 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1303 struct drm_device *dev = dig_port->base.base.dev;
1304
1305 /* WaDisableHBR2:skl */
1306 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1307 return false;
1308
1309 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1310 (INTEL_INFO(dev)->gen >= 9))
1311 return true;
1312 else
1313 return false;
1314 }
1315
1316 static int
1317 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1318 {
1319 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1320 struct drm_device *dev = dig_port->base.base.dev;
1321 int size;
1322
1323 if (IS_BROXTON(dev)) {
1324 *source_rates = bxt_rates;
1325 size = ARRAY_SIZE(bxt_rates);
1326 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1327 *source_rates = skl_rates;
1328 size = ARRAY_SIZE(skl_rates);
1329 } else {
1330 *source_rates = default_rates;
1331 size = ARRAY_SIZE(default_rates);
1332 }
1333
1334 /* This depends on the fact that 5.4 is last value in the array */
1335 if (!intel_dp_source_supports_hbr2(intel_dp))
1336 size--;
1337
1338 return size;
1339 }
1340
1341 static void
1342 intel_dp_set_clock(struct intel_encoder *encoder,
1343 struct intel_crtc_state *pipe_config)
1344 {
1345 struct drm_device *dev = encoder->base.dev;
1346 const struct dp_link_dpll *divisor = NULL;
1347 int i, count = 0;
1348
1349 if (IS_G4X(dev)) {
1350 divisor = gen4_dpll;
1351 count = ARRAY_SIZE(gen4_dpll);
1352 } else if (HAS_PCH_SPLIT(dev)) {
1353 divisor = pch_dpll;
1354 count = ARRAY_SIZE(pch_dpll);
1355 } else if (IS_CHERRYVIEW(dev)) {
1356 divisor = chv_dpll;
1357 count = ARRAY_SIZE(chv_dpll);
1358 } else if (IS_VALLEYVIEW(dev)) {
1359 divisor = vlv_dpll;
1360 count = ARRAY_SIZE(vlv_dpll);
1361 }
1362
1363 if (divisor && count) {
1364 for (i = 0; i < count; i++) {
1365 if (pipe_config->port_clock == divisor[i].clock) {
1366 pipe_config->dpll = divisor[i].dpll;
1367 pipe_config->clock_set = true;
1368 break;
1369 }
1370 }
1371 }
1372 }
1373
1374 static int intersect_rates(const int *source_rates, int source_len,
1375 const int *sink_rates, int sink_len,
1376 int *common_rates)
1377 {
1378 int i = 0, j = 0, k = 0;
1379
1380 while (i < source_len && j < sink_len) {
1381 if (source_rates[i] == sink_rates[j]) {
1382 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1383 return k;
1384 common_rates[k] = source_rates[i];
1385 ++k;
1386 ++i;
1387 ++j;
1388 } else if (source_rates[i] < sink_rates[j]) {
1389 ++i;
1390 } else {
1391 ++j;
1392 }
1393 }
1394 return k;
1395 }
1396
1397 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1398 int *common_rates)
1399 {
1400 const int *source_rates, *sink_rates;
1401 int source_len, sink_len;
1402
1403 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1404 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1405
1406 return intersect_rates(source_rates, source_len,
1407 sink_rates, sink_len,
1408 common_rates);
1409 }
1410
1411 static void snprintf_int_array(char *str, size_t len,
1412 const int *array, int nelem)
1413 {
1414 int i;
1415
1416 str[0] = '\0';
1417
1418 for (i = 0; i < nelem; i++) {
1419 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1420 if (r >= len)
1421 return;
1422 str += r;
1423 len -= r;
1424 }
1425 }
1426
1427 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1428 {
1429 const int *source_rates, *sink_rates;
1430 int source_len, sink_len, common_len;
1431 int common_rates[DP_MAX_SUPPORTED_RATES];
1432 char str[128]; /* FIXME: too big for stack? */
1433
1434 if ((drm_debug & DRM_UT_KMS) == 0)
1435 return;
1436
1437 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1438 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1439 DRM_DEBUG_KMS("source rates: %s\n", str);
1440
1441 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1442 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1443 DRM_DEBUG_KMS("sink rates: %s\n", str);
1444
1445 common_len = intel_dp_common_rates(intel_dp, common_rates);
1446 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1447 DRM_DEBUG_KMS("common rates: %s\n", str);
1448 }
1449
1450 static int rate_to_index(int find, const int *rates)
1451 {
1452 int i = 0;
1453
1454 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1455 if (find == rates[i])
1456 break;
1457
1458 return i;
1459 }
1460
1461 int
1462 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1463 {
1464 int rates[DP_MAX_SUPPORTED_RATES] = {};
1465 int len;
1466
1467 len = intel_dp_common_rates(intel_dp, rates);
1468 if (WARN_ON(len <= 0))
1469 return 162000;
1470
1471 return rates[rate_to_index(0, rates) - 1];
1472 }
1473
1474 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1475 {
1476 return rate_to_index(rate, intel_dp->sink_rates);
1477 }
1478
1479 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1480 uint8_t *link_bw, uint8_t *rate_select)
1481 {
1482 if (intel_dp->num_sink_rates) {
1483 *link_bw = 0;
1484 *rate_select =
1485 intel_dp_rate_select(intel_dp, port_clock);
1486 } else {
1487 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1488 *rate_select = 0;
1489 }
1490 }
1491
1492 bool
1493 intel_dp_compute_config(struct intel_encoder *encoder,
1494 struct intel_crtc_state *pipe_config)
1495 {
1496 struct drm_device *dev = encoder->base.dev;
1497 struct drm_i915_private *dev_priv = dev->dev_private;
1498 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1499 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1500 enum port port = dp_to_dig_port(intel_dp)->port;
1501 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1502 struct intel_connector *intel_connector = intel_dp->attached_connector;
1503 int lane_count, clock;
1504 int min_lane_count = 1;
1505 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1506 /* Conveniently, the link BW constants become indices with a shift...*/
1507 int min_clock = 0;
1508 int max_clock;
1509 int bpp, mode_rate;
1510 int link_avail, link_clock;
1511 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1512 int common_len;
1513 uint8_t link_bw, rate_select;
1514
1515 common_len = intel_dp_common_rates(intel_dp, common_rates);
1516
1517 /* No common link rates between source and sink */
1518 WARN_ON(common_len <= 0);
1519
1520 max_clock = common_len - 1;
1521
1522 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1523 pipe_config->has_pch_encoder = true;
1524
1525 pipe_config->has_dp_encoder = true;
1526 pipe_config->has_drrs = false;
1527 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1528
1529 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1530 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1531 adjusted_mode);
1532
1533 if (INTEL_INFO(dev)->gen >= 9) {
1534 int ret;
1535 ret = skl_update_scaler_crtc(pipe_config);
1536 if (ret)
1537 return ret;
1538 }
1539
1540 if (HAS_GMCH_DISPLAY(dev))
1541 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1542 intel_connector->panel.fitting_mode);
1543 else
1544 intel_pch_panel_fitting(intel_crtc, pipe_config,
1545 intel_connector->panel.fitting_mode);
1546 }
1547
1548 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1549 return false;
1550
1551 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1552 "max bw %d pixel clock %iKHz\n",
1553 max_lane_count, common_rates[max_clock],
1554 adjusted_mode->crtc_clock);
1555
1556 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1557 * bpc in between. */
1558 bpp = pipe_config->pipe_bpp;
1559 if (is_edp(intel_dp)) {
1560
1561 /* Get bpp from vbt only for panels that dont have bpp in edid */
1562 if (intel_connector->base.display_info.bpc == 0 &&
1563 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1564 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1565 dev_priv->vbt.edp_bpp);
1566 bpp = dev_priv->vbt.edp_bpp;
1567 }
1568
1569 /*
1570 * Use the maximum clock and number of lanes the eDP panel
1571 * advertizes being capable of. The panels are generally
1572 * designed to support only a single clock and lane
1573 * configuration, and typically these values correspond to the
1574 * native resolution of the panel.
1575 */
1576 min_lane_count = max_lane_count;
1577 min_clock = max_clock;
1578 }
1579
1580 for (; bpp >= 6*3; bpp -= 2*3) {
1581 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1582 bpp);
1583
1584 for (clock = min_clock; clock <= max_clock; clock++) {
1585 for (lane_count = min_lane_count;
1586 lane_count <= max_lane_count;
1587 lane_count <<= 1) {
1588
1589 link_clock = common_rates[clock];
1590 link_avail = intel_dp_max_data_rate(link_clock,
1591 lane_count);
1592
1593 if (mode_rate <= link_avail) {
1594 goto found;
1595 }
1596 }
1597 }
1598 }
1599
1600 return false;
1601
1602 found:
1603 if (intel_dp->color_range_auto) {
1604 /*
1605 * See:
1606 * CEA-861-E - 5.1 Default Encoding Parameters
1607 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1608 */
1609 pipe_config->limited_color_range =
1610 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1611 } else {
1612 pipe_config->limited_color_range =
1613 intel_dp->limited_color_range;
1614 }
1615
1616 pipe_config->lane_count = lane_count;
1617
1618 pipe_config->pipe_bpp = bpp;
1619 pipe_config->port_clock = common_rates[clock];
1620
1621 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1622 &link_bw, &rate_select);
1623
1624 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1625 link_bw, rate_select, pipe_config->lane_count,
1626 pipe_config->port_clock, bpp);
1627 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1628 mode_rate, link_avail);
1629
1630 intel_link_compute_m_n(bpp, lane_count,
1631 adjusted_mode->crtc_clock,
1632 pipe_config->port_clock,
1633 &pipe_config->dp_m_n);
1634
1635 if (intel_connector->panel.downclock_mode != NULL &&
1636 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1637 pipe_config->has_drrs = true;
1638 intel_link_compute_m_n(bpp, lane_count,
1639 intel_connector->panel.downclock_mode->clock,
1640 pipe_config->port_clock,
1641 &pipe_config->dp_m2_n2);
1642 }
1643
1644 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1645 skl_edp_set_pll_config(pipe_config);
1646 else if (IS_BROXTON(dev))
1647 /* handled in ddi */;
1648 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1649 hsw_dp_set_ddi_pll_sel(pipe_config);
1650 else
1651 intel_dp_set_clock(encoder, pipe_config);
1652
1653 return true;
1654 }
1655
1656 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1657 const struct intel_crtc_state *pipe_config)
1658 {
1659 intel_dp->link_rate = pipe_config->port_clock;
1660 intel_dp->lane_count = pipe_config->lane_count;
1661 }
1662
1663 static void intel_dp_prepare(struct intel_encoder *encoder)
1664 {
1665 struct drm_device *dev = encoder->base.dev;
1666 struct drm_i915_private *dev_priv = dev->dev_private;
1667 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1668 enum port port = dp_to_dig_port(intel_dp)->port;
1669 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1670 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1671
1672 intel_dp_set_link_params(intel_dp, crtc->config);
1673
1674 /*
1675 * There are four kinds of DP registers:
1676 *
1677 * IBX PCH
1678 * SNB CPU
1679 * IVB CPU
1680 * CPT PCH
1681 *
1682 * IBX PCH and CPU are the same for almost everything,
1683 * except that the CPU DP PLL is configured in this
1684 * register
1685 *
1686 * CPT PCH is quite different, having many bits moved
1687 * to the TRANS_DP_CTL register instead. That
1688 * configuration happens (oddly) in ironlake_pch_enable
1689 */
1690
1691 /* Preserve the BIOS-computed detected bit. This is
1692 * supposed to be read-only.
1693 */
1694 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1695
1696 /* Handle DP bits in common between all three register formats */
1697 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1698 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1699
1700 /* Split out the IBX/CPU vs CPT settings */
1701
1702 if (IS_GEN7(dev) && port == PORT_A) {
1703 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1704 intel_dp->DP |= DP_SYNC_HS_HIGH;
1705 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1706 intel_dp->DP |= DP_SYNC_VS_HIGH;
1707 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1708
1709 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1710 intel_dp->DP |= DP_ENHANCED_FRAMING;
1711
1712 intel_dp->DP |= crtc->pipe << 29;
1713 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1714 u32 trans_dp;
1715
1716 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1717
1718 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1719 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1720 trans_dp |= TRANS_DP_ENH_FRAMING;
1721 else
1722 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1723 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1724 } else {
1725 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1726 crtc->config->limited_color_range)
1727 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1728
1729 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1730 intel_dp->DP |= DP_SYNC_HS_HIGH;
1731 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1732 intel_dp->DP |= DP_SYNC_VS_HIGH;
1733 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1734
1735 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1736 intel_dp->DP |= DP_ENHANCED_FRAMING;
1737
1738 if (IS_CHERRYVIEW(dev))
1739 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1740 else if (crtc->pipe == PIPE_B)
1741 intel_dp->DP |= DP_PIPEB_SELECT;
1742 }
1743 }
1744
1745 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1746 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1747
1748 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1749 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1750
1751 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1752 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1753
1754 static void wait_panel_status(struct intel_dp *intel_dp,
1755 u32 mask,
1756 u32 value)
1757 {
1758 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1759 struct drm_i915_private *dev_priv = dev->dev_private;
1760 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1761
1762 lockdep_assert_held(&dev_priv->pps_mutex);
1763
1764 pp_stat_reg = _pp_stat_reg(intel_dp);
1765 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1766
1767 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1768 mask, value,
1769 I915_READ(pp_stat_reg),
1770 I915_READ(pp_ctrl_reg));
1771
1772 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1773 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1774 I915_READ(pp_stat_reg),
1775 I915_READ(pp_ctrl_reg));
1776 }
1777
1778 DRM_DEBUG_KMS("Wait complete\n");
1779 }
1780
1781 static void wait_panel_on(struct intel_dp *intel_dp)
1782 {
1783 DRM_DEBUG_KMS("Wait for panel power on\n");
1784 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1785 }
1786
1787 static void wait_panel_off(struct intel_dp *intel_dp)
1788 {
1789 DRM_DEBUG_KMS("Wait for panel power off time\n");
1790 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1791 }
1792
1793 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1794 {
1795 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1796
1797 /* When we disable the VDD override bit last we have to do the manual
1798 * wait. */
1799 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1800 intel_dp->panel_power_cycle_delay);
1801
1802 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1803 }
1804
1805 static void wait_backlight_on(struct intel_dp *intel_dp)
1806 {
1807 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1808 intel_dp->backlight_on_delay);
1809 }
1810
1811 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1812 {
1813 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1814 intel_dp->backlight_off_delay);
1815 }
1816
1817 /* Read the current pp_control value, unlocking the register if it
1818 * is locked
1819 */
1820
1821 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1822 {
1823 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1824 struct drm_i915_private *dev_priv = dev->dev_private;
1825 u32 control;
1826
1827 lockdep_assert_held(&dev_priv->pps_mutex);
1828
1829 control = I915_READ(_pp_ctrl_reg(intel_dp));
1830 if (!IS_BROXTON(dev)) {
1831 control &= ~PANEL_UNLOCK_MASK;
1832 control |= PANEL_UNLOCK_REGS;
1833 }
1834 return control;
1835 }
1836
1837 /*
1838 * Must be paired with edp_panel_vdd_off().
1839 * Must hold pps_mutex around the whole on/off sequence.
1840 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1841 */
1842 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1843 {
1844 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1845 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1846 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1847 struct drm_i915_private *dev_priv = dev->dev_private;
1848 enum intel_display_power_domain power_domain;
1849 u32 pp;
1850 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1851 bool need_to_disable = !intel_dp->want_panel_vdd;
1852
1853 lockdep_assert_held(&dev_priv->pps_mutex);
1854
1855 if (!is_edp(intel_dp))
1856 return false;
1857
1858 cancel_delayed_work(&intel_dp->panel_vdd_work);
1859 intel_dp->want_panel_vdd = true;
1860
1861 if (edp_have_panel_vdd(intel_dp))
1862 return need_to_disable;
1863
1864 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1865 intel_display_power_get(dev_priv, power_domain);
1866
1867 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1868 port_name(intel_dig_port->port));
1869
1870 if (!edp_have_panel_power(intel_dp))
1871 wait_panel_power_cycle(intel_dp);
1872
1873 pp = ironlake_get_pp_control(intel_dp);
1874 pp |= EDP_FORCE_VDD;
1875
1876 pp_stat_reg = _pp_stat_reg(intel_dp);
1877 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1878
1879 I915_WRITE(pp_ctrl_reg, pp);
1880 POSTING_READ(pp_ctrl_reg);
1881 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1882 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1883 /*
1884 * If the panel wasn't on, delay before accessing aux channel
1885 */
1886 if (!edp_have_panel_power(intel_dp)) {
1887 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1888 port_name(intel_dig_port->port));
1889 msleep(intel_dp->panel_power_up_delay);
1890 }
1891
1892 return need_to_disable;
1893 }
1894
1895 /*
1896 * Must be paired with intel_edp_panel_vdd_off() or
1897 * intel_edp_panel_off().
1898 * Nested calls to these functions are not allowed since
1899 * we drop the lock. Caller must use some higher level
1900 * locking to prevent nested calls from other threads.
1901 */
1902 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1903 {
1904 bool vdd;
1905
1906 if (!is_edp(intel_dp))
1907 return;
1908
1909 pps_lock(intel_dp);
1910 vdd = edp_panel_vdd_on(intel_dp);
1911 pps_unlock(intel_dp);
1912
1913 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1914 port_name(dp_to_dig_port(intel_dp)->port));
1915 }
1916
1917 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1918 {
1919 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1920 struct drm_i915_private *dev_priv = dev->dev_private;
1921 struct intel_digital_port *intel_dig_port =
1922 dp_to_dig_port(intel_dp);
1923 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1924 enum intel_display_power_domain power_domain;
1925 u32 pp;
1926 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1927
1928 lockdep_assert_held(&dev_priv->pps_mutex);
1929
1930 WARN_ON(intel_dp->want_panel_vdd);
1931
1932 if (!edp_have_panel_vdd(intel_dp))
1933 return;
1934
1935 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1936 port_name(intel_dig_port->port));
1937
1938 pp = ironlake_get_pp_control(intel_dp);
1939 pp &= ~EDP_FORCE_VDD;
1940
1941 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1942 pp_stat_reg = _pp_stat_reg(intel_dp);
1943
1944 I915_WRITE(pp_ctrl_reg, pp);
1945 POSTING_READ(pp_ctrl_reg);
1946
1947 /* Make sure sequencer is idle before allowing subsequent activity */
1948 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1949 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1950
1951 if ((pp & POWER_TARGET_ON) == 0)
1952 intel_dp->last_power_cycle = jiffies;
1953
1954 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1955 intel_display_power_put(dev_priv, power_domain);
1956 }
1957
1958 static void edp_panel_vdd_work(struct work_struct *__work)
1959 {
1960 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1961 struct intel_dp, panel_vdd_work);
1962
1963 pps_lock(intel_dp);
1964 if (!intel_dp->want_panel_vdd)
1965 edp_panel_vdd_off_sync(intel_dp);
1966 pps_unlock(intel_dp);
1967 }
1968
1969 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1970 {
1971 unsigned long delay;
1972
1973 /*
1974 * Queue the timer to fire a long time from now (relative to the power
1975 * down delay) to keep the panel power up across a sequence of
1976 * operations.
1977 */
1978 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1979 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1980 }
1981
1982 /*
1983 * Must be paired with edp_panel_vdd_on().
1984 * Must hold pps_mutex around the whole on/off sequence.
1985 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1986 */
1987 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1988 {
1989 struct drm_i915_private *dev_priv =
1990 intel_dp_to_dev(intel_dp)->dev_private;
1991
1992 lockdep_assert_held(&dev_priv->pps_mutex);
1993
1994 if (!is_edp(intel_dp))
1995 return;
1996
1997 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1998 port_name(dp_to_dig_port(intel_dp)->port));
1999
2000 intel_dp->want_panel_vdd = false;
2001
2002 if (sync)
2003 edp_panel_vdd_off_sync(intel_dp);
2004 else
2005 edp_panel_vdd_schedule_off(intel_dp);
2006 }
2007
2008 static void edp_panel_on(struct intel_dp *intel_dp)
2009 {
2010 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2011 struct drm_i915_private *dev_priv = dev->dev_private;
2012 u32 pp;
2013 i915_reg_t pp_ctrl_reg;
2014
2015 lockdep_assert_held(&dev_priv->pps_mutex);
2016
2017 if (!is_edp(intel_dp))
2018 return;
2019
2020 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2021 port_name(dp_to_dig_port(intel_dp)->port));
2022
2023 if (WARN(edp_have_panel_power(intel_dp),
2024 "eDP port %c panel power already on\n",
2025 port_name(dp_to_dig_port(intel_dp)->port)))
2026 return;
2027
2028 wait_panel_power_cycle(intel_dp);
2029
2030 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2031 pp = ironlake_get_pp_control(intel_dp);
2032 if (IS_GEN5(dev)) {
2033 /* ILK workaround: disable reset around power sequence */
2034 pp &= ~PANEL_POWER_RESET;
2035 I915_WRITE(pp_ctrl_reg, pp);
2036 POSTING_READ(pp_ctrl_reg);
2037 }
2038
2039 pp |= POWER_TARGET_ON;
2040 if (!IS_GEN5(dev))
2041 pp |= PANEL_POWER_RESET;
2042
2043 I915_WRITE(pp_ctrl_reg, pp);
2044 POSTING_READ(pp_ctrl_reg);
2045
2046 wait_panel_on(intel_dp);
2047 intel_dp->last_power_on = jiffies;
2048
2049 if (IS_GEN5(dev)) {
2050 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2051 I915_WRITE(pp_ctrl_reg, pp);
2052 POSTING_READ(pp_ctrl_reg);
2053 }
2054 }
2055
2056 void intel_edp_panel_on(struct intel_dp *intel_dp)
2057 {
2058 if (!is_edp(intel_dp))
2059 return;
2060
2061 pps_lock(intel_dp);
2062 edp_panel_on(intel_dp);
2063 pps_unlock(intel_dp);
2064 }
2065
2066
2067 static void edp_panel_off(struct intel_dp *intel_dp)
2068 {
2069 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2070 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2071 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2072 struct drm_i915_private *dev_priv = dev->dev_private;
2073 enum intel_display_power_domain power_domain;
2074 u32 pp;
2075 i915_reg_t pp_ctrl_reg;
2076
2077 lockdep_assert_held(&dev_priv->pps_mutex);
2078
2079 if (!is_edp(intel_dp))
2080 return;
2081
2082 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2083 port_name(dp_to_dig_port(intel_dp)->port));
2084
2085 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2086 port_name(dp_to_dig_port(intel_dp)->port));
2087
2088 pp = ironlake_get_pp_control(intel_dp);
2089 /* We need to switch off panel power _and_ force vdd, for otherwise some
2090 * panels get very unhappy and cease to work. */
2091 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2092 EDP_BLC_ENABLE);
2093
2094 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2095
2096 intel_dp->want_panel_vdd = false;
2097
2098 I915_WRITE(pp_ctrl_reg, pp);
2099 POSTING_READ(pp_ctrl_reg);
2100
2101 intel_dp->last_power_cycle = jiffies;
2102 wait_panel_off(intel_dp);
2103
2104 /* We got a reference when we enabled the VDD. */
2105 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2106 intel_display_power_put(dev_priv, power_domain);
2107 }
2108
2109 void intel_edp_panel_off(struct intel_dp *intel_dp)
2110 {
2111 if (!is_edp(intel_dp))
2112 return;
2113
2114 pps_lock(intel_dp);
2115 edp_panel_off(intel_dp);
2116 pps_unlock(intel_dp);
2117 }
2118
2119 /* Enable backlight in the panel power control. */
2120 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2121 {
2122 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2123 struct drm_device *dev = intel_dig_port->base.base.dev;
2124 struct drm_i915_private *dev_priv = dev->dev_private;
2125 u32 pp;
2126 i915_reg_t pp_ctrl_reg;
2127
2128 /*
2129 * If we enable the backlight right away following a panel power
2130 * on, we may see slight flicker as the panel syncs with the eDP
2131 * link. So delay a bit to make sure the image is solid before
2132 * allowing it to appear.
2133 */
2134 wait_backlight_on(intel_dp);
2135
2136 pps_lock(intel_dp);
2137
2138 pp = ironlake_get_pp_control(intel_dp);
2139 pp |= EDP_BLC_ENABLE;
2140
2141 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2142
2143 I915_WRITE(pp_ctrl_reg, pp);
2144 POSTING_READ(pp_ctrl_reg);
2145
2146 pps_unlock(intel_dp);
2147 }
2148
2149 /* Enable backlight PWM and backlight PP control. */
2150 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2151 {
2152 if (!is_edp(intel_dp))
2153 return;
2154
2155 DRM_DEBUG_KMS("\n");
2156
2157 intel_panel_enable_backlight(intel_dp->attached_connector);
2158 _intel_edp_backlight_on(intel_dp);
2159 }
2160
2161 /* Disable backlight in the panel power control. */
2162 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2163 {
2164 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2165 struct drm_i915_private *dev_priv = dev->dev_private;
2166 u32 pp;
2167 i915_reg_t pp_ctrl_reg;
2168
2169 if (!is_edp(intel_dp))
2170 return;
2171
2172 pps_lock(intel_dp);
2173
2174 pp = ironlake_get_pp_control(intel_dp);
2175 pp &= ~EDP_BLC_ENABLE;
2176
2177 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2178
2179 I915_WRITE(pp_ctrl_reg, pp);
2180 POSTING_READ(pp_ctrl_reg);
2181
2182 pps_unlock(intel_dp);
2183
2184 intel_dp->last_backlight_off = jiffies;
2185 edp_wait_backlight_off(intel_dp);
2186 }
2187
2188 /* Disable backlight PP control and backlight PWM. */
2189 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2190 {
2191 if (!is_edp(intel_dp))
2192 return;
2193
2194 DRM_DEBUG_KMS("\n");
2195
2196 _intel_edp_backlight_off(intel_dp);
2197 intel_panel_disable_backlight(intel_dp->attached_connector);
2198 }
2199
2200 /*
2201 * Hook for controlling the panel power control backlight through the bl_power
2202 * sysfs attribute. Take care to handle multiple calls.
2203 */
2204 static void intel_edp_backlight_power(struct intel_connector *connector,
2205 bool enable)
2206 {
2207 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2208 bool is_enabled;
2209
2210 pps_lock(intel_dp);
2211 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2212 pps_unlock(intel_dp);
2213
2214 if (is_enabled == enable)
2215 return;
2216
2217 DRM_DEBUG_KMS("panel power control backlight %s\n",
2218 enable ? "enable" : "disable");
2219
2220 if (enable)
2221 _intel_edp_backlight_on(intel_dp);
2222 else
2223 _intel_edp_backlight_off(intel_dp);
2224 }
2225
2226 static const char *state_string(bool enabled)
2227 {
2228 return enabled ? "on" : "off";
2229 }
2230
2231 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2232 {
2233 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2234 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2235 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2236
2237 I915_STATE_WARN(cur_state != state,
2238 "DP port %c state assertion failure (expected %s, current %s)\n",
2239 port_name(dig_port->port),
2240 state_string(state), state_string(cur_state));
2241 }
2242 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2243
2244 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2245 {
2246 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2247
2248 I915_STATE_WARN(cur_state != state,
2249 "eDP PLL state assertion failure (expected %s, current %s)\n",
2250 state_string(state), state_string(cur_state));
2251 }
2252 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2253 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2254
2255 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2256 {
2257 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2258 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2259 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2260
2261 assert_pipe_disabled(dev_priv, crtc->pipe);
2262 assert_dp_port_disabled(intel_dp);
2263 assert_edp_pll_disabled(dev_priv);
2264
2265 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2266 crtc->config->port_clock);
2267
2268 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2269
2270 if (crtc->config->port_clock == 162000)
2271 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2272 else
2273 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2274
2275 I915_WRITE(DP_A, intel_dp->DP);
2276 POSTING_READ(DP_A);
2277 udelay(500);
2278
2279 intel_dp->DP |= DP_PLL_ENABLE;
2280
2281 I915_WRITE(DP_A, intel_dp->DP);
2282 POSTING_READ(DP_A);
2283 udelay(200);
2284 }
2285
2286 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2287 {
2288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2289 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2290 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2291
2292 assert_pipe_disabled(dev_priv, crtc->pipe);
2293 assert_dp_port_disabled(intel_dp);
2294 assert_edp_pll_enabled(dev_priv);
2295
2296 DRM_DEBUG_KMS("disabling eDP PLL\n");
2297
2298 intel_dp->DP &= ~DP_PLL_ENABLE;
2299
2300 I915_WRITE(DP_A, intel_dp->DP);
2301 POSTING_READ(DP_A);
2302 udelay(200);
2303 }
2304
2305 /* If the sink supports it, try to set the power state appropriately */
2306 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2307 {
2308 int ret, i;
2309
2310 /* Should have a valid DPCD by this point */
2311 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2312 return;
2313
2314 if (mode != DRM_MODE_DPMS_ON) {
2315 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2316 DP_SET_POWER_D3);
2317 } else {
2318 /*
2319 * When turning on, we need to retry for 1ms to give the sink
2320 * time to wake up.
2321 */
2322 for (i = 0; i < 3; i++) {
2323 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2324 DP_SET_POWER_D0);
2325 if (ret == 1)
2326 break;
2327 msleep(1);
2328 }
2329 }
2330
2331 if (ret != 1)
2332 DRM_DEBUG_KMS("failed to %s sink power state\n",
2333 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2334 }
2335
2336 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2337 enum pipe *pipe)
2338 {
2339 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2340 enum port port = dp_to_dig_port(intel_dp)->port;
2341 struct drm_device *dev = encoder->base.dev;
2342 struct drm_i915_private *dev_priv = dev->dev_private;
2343 enum intel_display_power_domain power_domain;
2344 u32 tmp;
2345
2346 power_domain = intel_display_port_power_domain(encoder);
2347 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2348 return false;
2349
2350 tmp = I915_READ(intel_dp->output_reg);
2351
2352 if (!(tmp & DP_PORT_EN))
2353 return false;
2354
2355 if (IS_GEN7(dev) && port == PORT_A) {
2356 *pipe = PORT_TO_PIPE_CPT(tmp);
2357 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2358 enum pipe p;
2359
2360 for_each_pipe(dev_priv, p) {
2361 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2362 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2363 *pipe = p;
2364 return true;
2365 }
2366 }
2367
2368 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2369 i915_mmio_reg_offset(intel_dp->output_reg));
2370 } else if (IS_CHERRYVIEW(dev)) {
2371 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2372 } else {
2373 *pipe = PORT_TO_PIPE(tmp);
2374 }
2375
2376 return true;
2377 }
2378
2379 static void intel_dp_get_config(struct intel_encoder *encoder,
2380 struct intel_crtc_state *pipe_config)
2381 {
2382 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2383 u32 tmp, flags = 0;
2384 struct drm_device *dev = encoder->base.dev;
2385 struct drm_i915_private *dev_priv = dev->dev_private;
2386 enum port port = dp_to_dig_port(intel_dp)->port;
2387 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2388 int dotclock;
2389
2390 tmp = I915_READ(intel_dp->output_reg);
2391
2392 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2393
2394 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2395 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2396
2397 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2398 flags |= DRM_MODE_FLAG_PHSYNC;
2399 else
2400 flags |= DRM_MODE_FLAG_NHSYNC;
2401
2402 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2403 flags |= DRM_MODE_FLAG_PVSYNC;
2404 else
2405 flags |= DRM_MODE_FLAG_NVSYNC;
2406 } else {
2407 if (tmp & DP_SYNC_HS_HIGH)
2408 flags |= DRM_MODE_FLAG_PHSYNC;
2409 else
2410 flags |= DRM_MODE_FLAG_NHSYNC;
2411
2412 if (tmp & DP_SYNC_VS_HIGH)
2413 flags |= DRM_MODE_FLAG_PVSYNC;
2414 else
2415 flags |= DRM_MODE_FLAG_NVSYNC;
2416 }
2417
2418 pipe_config->base.adjusted_mode.flags |= flags;
2419
2420 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2421 tmp & DP_COLOR_RANGE_16_235)
2422 pipe_config->limited_color_range = true;
2423
2424 pipe_config->has_dp_encoder = true;
2425
2426 pipe_config->lane_count =
2427 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2428
2429 intel_dp_get_m_n(crtc, pipe_config);
2430
2431 if (port == PORT_A) {
2432 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2433 pipe_config->port_clock = 162000;
2434 else
2435 pipe_config->port_clock = 270000;
2436 }
2437
2438 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2439 &pipe_config->dp_m_n);
2440
2441 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2442 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2443
2444 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2445
2446 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2447 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2448 /*
2449 * This is a big fat ugly hack.
2450 *
2451 * Some machines in UEFI boot mode provide us a VBT that has 18
2452 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2453 * unknown we fail to light up. Yet the same BIOS boots up with
2454 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2455 * max, not what it tells us to use.
2456 *
2457 * Note: This will still be broken if the eDP panel is not lit
2458 * up by the BIOS, and thus we can't get the mode at module
2459 * load.
2460 */
2461 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2462 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2463 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2464 }
2465 }
2466
2467 static void intel_disable_dp(struct intel_encoder *encoder)
2468 {
2469 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2470 struct drm_device *dev = encoder->base.dev;
2471 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2472
2473 if (crtc->config->has_audio)
2474 intel_audio_codec_disable(encoder);
2475
2476 if (HAS_PSR(dev) && !HAS_DDI(dev))
2477 intel_psr_disable(intel_dp);
2478
2479 /* Make sure the panel is off before trying to change the mode. But also
2480 * ensure that we have vdd while we switch off the panel. */
2481 intel_edp_panel_vdd_on(intel_dp);
2482 intel_edp_backlight_off(intel_dp);
2483 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2484 intel_edp_panel_off(intel_dp);
2485
2486 /* disable the port before the pipe on g4x */
2487 if (INTEL_INFO(dev)->gen < 5)
2488 intel_dp_link_down(intel_dp);
2489 }
2490
2491 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2492 {
2493 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2494 enum port port = dp_to_dig_port(intel_dp)->port;
2495
2496 intel_dp_link_down(intel_dp);
2497
2498 /* Only ilk+ has port A */
2499 if (port == PORT_A)
2500 ironlake_edp_pll_off(intel_dp);
2501 }
2502
2503 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2504 {
2505 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2506
2507 intel_dp_link_down(intel_dp);
2508 }
2509
2510 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2511 bool reset)
2512 {
2513 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2514 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2515 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2516 enum pipe pipe = crtc->pipe;
2517 uint32_t val;
2518
2519 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2520 if (reset)
2521 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2522 else
2523 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2524 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2525
2526 if (crtc->config->lane_count > 2) {
2527 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2528 if (reset)
2529 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2530 else
2531 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2532 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2533 }
2534
2535 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2536 val |= CHV_PCS_REQ_SOFTRESET_EN;
2537 if (reset)
2538 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2539 else
2540 val |= DPIO_PCS_CLK_SOFT_RESET;
2541 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2542
2543 if (crtc->config->lane_count > 2) {
2544 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2545 val |= CHV_PCS_REQ_SOFTRESET_EN;
2546 if (reset)
2547 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2548 else
2549 val |= DPIO_PCS_CLK_SOFT_RESET;
2550 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2551 }
2552 }
2553
2554 static void chv_post_disable_dp(struct intel_encoder *encoder)
2555 {
2556 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2557 struct drm_device *dev = encoder->base.dev;
2558 struct drm_i915_private *dev_priv = dev->dev_private;
2559
2560 intel_dp_link_down(intel_dp);
2561
2562 mutex_lock(&dev_priv->sb_lock);
2563
2564 /* Assert data lane reset */
2565 chv_data_lane_soft_reset(encoder, true);
2566
2567 mutex_unlock(&dev_priv->sb_lock);
2568 }
2569
2570 static void
2571 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2572 uint32_t *DP,
2573 uint8_t dp_train_pat)
2574 {
2575 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2576 struct drm_device *dev = intel_dig_port->base.base.dev;
2577 struct drm_i915_private *dev_priv = dev->dev_private;
2578 enum port port = intel_dig_port->port;
2579
2580 if (HAS_DDI(dev)) {
2581 uint32_t temp = I915_READ(DP_TP_CTL(port));
2582
2583 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2584 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2585 else
2586 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2587
2588 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2589 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2590 case DP_TRAINING_PATTERN_DISABLE:
2591 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2592
2593 break;
2594 case DP_TRAINING_PATTERN_1:
2595 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2596 break;
2597 case DP_TRAINING_PATTERN_2:
2598 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2599 break;
2600 case DP_TRAINING_PATTERN_3:
2601 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2602 break;
2603 }
2604 I915_WRITE(DP_TP_CTL(port), temp);
2605
2606 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2607 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2608 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2609
2610 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2611 case DP_TRAINING_PATTERN_DISABLE:
2612 *DP |= DP_LINK_TRAIN_OFF_CPT;
2613 break;
2614 case DP_TRAINING_PATTERN_1:
2615 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2616 break;
2617 case DP_TRAINING_PATTERN_2:
2618 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2619 break;
2620 case DP_TRAINING_PATTERN_3:
2621 DRM_ERROR("DP training pattern 3 not supported\n");
2622 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2623 break;
2624 }
2625
2626 } else {
2627 if (IS_CHERRYVIEW(dev))
2628 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2629 else
2630 *DP &= ~DP_LINK_TRAIN_MASK;
2631
2632 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2633 case DP_TRAINING_PATTERN_DISABLE:
2634 *DP |= DP_LINK_TRAIN_OFF;
2635 break;
2636 case DP_TRAINING_PATTERN_1:
2637 *DP |= DP_LINK_TRAIN_PAT_1;
2638 break;
2639 case DP_TRAINING_PATTERN_2:
2640 *DP |= DP_LINK_TRAIN_PAT_2;
2641 break;
2642 case DP_TRAINING_PATTERN_3:
2643 if (IS_CHERRYVIEW(dev)) {
2644 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2645 } else {
2646 DRM_ERROR("DP training pattern 3 not supported\n");
2647 *DP |= DP_LINK_TRAIN_PAT_2;
2648 }
2649 break;
2650 }
2651 }
2652 }
2653
2654 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2655 {
2656 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2657 struct drm_i915_private *dev_priv = dev->dev_private;
2658 struct intel_crtc *crtc =
2659 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2660
2661 /* enable with pattern 1 (as per spec) */
2662 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2663 DP_TRAINING_PATTERN_1);
2664
2665 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2666 POSTING_READ(intel_dp->output_reg);
2667
2668 /*
2669 * Magic for VLV/CHV. We _must_ first set up the register
2670 * without actually enabling the port, and then do another
2671 * write to enable the port. Otherwise link training will
2672 * fail when the power sequencer is freshly used for this port.
2673 */
2674 intel_dp->DP |= DP_PORT_EN;
2675 if (crtc->config->has_audio)
2676 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2677
2678 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2679 POSTING_READ(intel_dp->output_reg);
2680 }
2681
2682 static void intel_enable_dp(struct intel_encoder *encoder)
2683 {
2684 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2685 struct drm_device *dev = encoder->base.dev;
2686 struct drm_i915_private *dev_priv = dev->dev_private;
2687 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2688 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2689 enum port port = dp_to_dig_port(intel_dp)->port;
2690 enum pipe pipe = crtc->pipe;
2691
2692 if (WARN_ON(dp_reg & DP_PORT_EN))
2693 return;
2694
2695 pps_lock(intel_dp);
2696
2697 if (IS_VALLEYVIEW(dev))
2698 vlv_init_panel_power_sequencer(intel_dp);
2699
2700 intel_dp_enable_port(intel_dp);
2701
2702 if (port == PORT_A && IS_GEN5(dev_priv)) {
2703 /*
2704 * Underrun reporting for the other pipe was disabled in
2705 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2706 * enabled, so it's now safe to re-enable underrun reporting.
2707 */
2708 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2709 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2710 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2711 }
2712
2713 edp_panel_vdd_on(intel_dp);
2714 edp_panel_on(intel_dp);
2715 edp_panel_vdd_off(intel_dp, true);
2716
2717 pps_unlock(intel_dp);
2718
2719 if (IS_VALLEYVIEW(dev)) {
2720 unsigned int lane_mask = 0x0;
2721
2722 if (IS_CHERRYVIEW(dev))
2723 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2724
2725 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2726 lane_mask);
2727 }
2728
2729 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2730 intel_dp_start_link_train(intel_dp);
2731 intel_dp_stop_link_train(intel_dp);
2732
2733 if (crtc->config->has_audio) {
2734 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2735 pipe_name(pipe));
2736 intel_audio_codec_enable(encoder);
2737 }
2738 }
2739
2740 static void g4x_enable_dp(struct intel_encoder *encoder)
2741 {
2742 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2743
2744 intel_enable_dp(encoder);
2745 intel_edp_backlight_on(intel_dp);
2746 }
2747
2748 static void vlv_enable_dp(struct intel_encoder *encoder)
2749 {
2750 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2751
2752 intel_edp_backlight_on(intel_dp);
2753 intel_psr_enable(intel_dp);
2754 }
2755
2756 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2757 {
2758 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2759 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2760 enum port port = dp_to_dig_port(intel_dp)->port;
2761 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2762
2763 intel_dp_prepare(encoder);
2764
2765 if (port == PORT_A && IS_GEN5(dev_priv)) {
2766 /*
2767 * We get FIFO underruns on the other pipe when
2768 * enabling the CPU eDP PLL, and when enabling CPU
2769 * eDP port. We could potentially avoid the PLL
2770 * underrun with a vblank wait just prior to enabling
2771 * the PLL, but that doesn't appear to help the port
2772 * enable case. Just sweep it all under the rug.
2773 */
2774 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2775 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2776 }
2777
2778 /* Only ilk+ has port A */
2779 if (port == PORT_A)
2780 ironlake_edp_pll_on(intel_dp);
2781 }
2782
2783 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2784 {
2785 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2786 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2787 enum pipe pipe = intel_dp->pps_pipe;
2788 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2789
2790 edp_panel_vdd_off_sync(intel_dp);
2791
2792 /*
2793 * VLV seems to get confused when multiple power seqeuencers
2794 * have the same port selected (even if only one has power/vdd
2795 * enabled). The failure manifests as vlv_wait_port_ready() failing
2796 * CHV on the other hand doesn't seem to mind having the same port
2797 * selected in multiple power seqeuencers, but let's clear the
2798 * port select always when logically disconnecting a power sequencer
2799 * from a port.
2800 */
2801 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2802 pipe_name(pipe), port_name(intel_dig_port->port));
2803 I915_WRITE(pp_on_reg, 0);
2804 POSTING_READ(pp_on_reg);
2805
2806 intel_dp->pps_pipe = INVALID_PIPE;
2807 }
2808
2809 static void vlv_steal_power_sequencer(struct drm_device *dev,
2810 enum pipe pipe)
2811 {
2812 struct drm_i915_private *dev_priv = dev->dev_private;
2813 struct intel_encoder *encoder;
2814
2815 lockdep_assert_held(&dev_priv->pps_mutex);
2816
2817 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2818 return;
2819
2820 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2821 base.head) {
2822 struct intel_dp *intel_dp;
2823 enum port port;
2824
2825 if (encoder->type != INTEL_OUTPUT_EDP)
2826 continue;
2827
2828 intel_dp = enc_to_intel_dp(&encoder->base);
2829 port = dp_to_dig_port(intel_dp)->port;
2830
2831 if (intel_dp->pps_pipe != pipe)
2832 continue;
2833
2834 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2835 pipe_name(pipe), port_name(port));
2836
2837 WARN(encoder->base.crtc,
2838 "stealing pipe %c power sequencer from active eDP port %c\n",
2839 pipe_name(pipe), port_name(port));
2840
2841 /* make sure vdd is off before we steal it */
2842 vlv_detach_power_sequencer(intel_dp);
2843 }
2844 }
2845
2846 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2847 {
2848 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2849 struct intel_encoder *encoder = &intel_dig_port->base;
2850 struct drm_device *dev = encoder->base.dev;
2851 struct drm_i915_private *dev_priv = dev->dev_private;
2852 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2853
2854 lockdep_assert_held(&dev_priv->pps_mutex);
2855
2856 if (!is_edp(intel_dp))
2857 return;
2858
2859 if (intel_dp->pps_pipe == crtc->pipe)
2860 return;
2861
2862 /*
2863 * If another power sequencer was being used on this
2864 * port previously make sure to turn off vdd there while
2865 * we still have control of it.
2866 */
2867 if (intel_dp->pps_pipe != INVALID_PIPE)
2868 vlv_detach_power_sequencer(intel_dp);
2869
2870 /*
2871 * We may be stealing the power
2872 * sequencer from another port.
2873 */
2874 vlv_steal_power_sequencer(dev, crtc->pipe);
2875
2876 /* now it's all ours */
2877 intel_dp->pps_pipe = crtc->pipe;
2878
2879 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2880 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2881
2882 /* init power sequencer on this pipe and port */
2883 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2884 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2885 }
2886
2887 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2888 {
2889 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2890 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2891 struct drm_device *dev = encoder->base.dev;
2892 struct drm_i915_private *dev_priv = dev->dev_private;
2893 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2894 enum dpio_channel port = vlv_dport_to_channel(dport);
2895 int pipe = intel_crtc->pipe;
2896 u32 val;
2897
2898 mutex_lock(&dev_priv->sb_lock);
2899
2900 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2901 val = 0;
2902 if (pipe)
2903 val |= (1<<21);
2904 else
2905 val &= ~(1<<21);
2906 val |= 0x001000c4;
2907 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2908 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2909 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2910
2911 mutex_unlock(&dev_priv->sb_lock);
2912
2913 intel_enable_dp(encoder);
2914 }
2915
2916 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2917 {
2918 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2919 struct drm_device *dev = encoder->base.dev;
2920 struct drm_i915_private *dev_priv = dev->dev_private;
2921 struct intel_crtc *intel_crtc =
2922 to_intel_crtc(encoder->base.crtc);
2923 enum dpio_channel port = vlv_dport_to_channel(dport);
2924 int pipe = intel_crtc->pipe;
2925
2926 intel_dp_prepare(encoder);
2927
2928 /* Program Tx lane resets to default */
2929 mutex_lock(&dev_priv->sb_lock);
2930 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2931 DPIO_PCS_TX_LANE2_RESET |
2932 DPIO_PCS_TX_LANE1_RESET);
2933 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2934 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2935 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2936 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2937 DPIO_PCS_CLK_SOFT_RESET);
2938
2939 /* Fix up inter-pair skew failure */
2940 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2941 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2942 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2943 mutex_unlock(&dev_priv->sb_lock);
2944 }
2945
2946 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2947 {
2948 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2949 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2950 struct drm_device *dev = encoder->base.dev;
2951 struct drm_i915_private *dev_priv = dev->dev_private;
2952 struct intel_crtc *intel_crtc =
2953 to_intel_crtc(encoder->base.crtc);
2954 enum dpio_channel ch = vlv_dport_to_channel(dport);
2955 int pipe = intel_crtc->pipe;
2956 int data, i, stagger;
2957 u32 val;
2958
2959 mutex_lock(&dev_priv->sb_lock);
2960
2961 /* allow hardware to manage TX FIFO reset source */
2962 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2963 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2964 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2965
2966 if (intel_crtc->config->lane_count > 2) {
2967 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2968 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2969 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2970 }
2971
2972 /* Program Tx lane latency optimal setting*/
2973 for (i = 0; i < intel_crtc->config->lane_count; i++) {
2974 /* Set the upar bit */
2975 if (intel_crtc->config->lane_count == 1)
2976 data = 0x0;
2977 else
2978 data = (i == 1) ? 0x0 : 0x1;
2979 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2980 data << DPIO_UPAR_SHIFT);
2981 }
2982
2983 /* Data lane stagger programming */
2984 if (intel_crtc->config->port_clock > 270000)
2985 stagger = 0x18;
2986 else if (intel_crtc->config->port_clock > 135000)
2987 stagger = 0xd;
2988 else if (intel_crtc->config->port_clock > 67500)
2989 stagger = 0x7;
2990 else if (intel_crtc->config->port_clock > 33750)
2991 stagger = 0x4;
2992 else
2993 stagger = 0x2;
2994
2995 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2996 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2997 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2998
2999 if (intel_crtc->config->lane_count > 2) {
3000 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3001 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3002 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3003 }
3004
3005 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3006 DPIO_LANESTAGGER_STRAP(stagger) |
3007 DPIO_LANESTAGGER_STRAP_OVRD |
3008 DPIO_TX1_STAGGER_MASK(0x1f) |
3009 DPIO_TX1_STAGGER_MULT(6) |
3010 DPIO_TX2_STAGGER_MULT(0));
3011
3012 if (intel_crtc->config->lane_count > 2) {
3013 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3014 DPIO_LANESTAGGER_STRAP(stagger) |
3015 DPIO_LANESTAGGER_STRAP_OVRD |
3016 DPIO_TX1_STAGGER_MASK(0x1f) |
3017 DPIO_TX1_STAGGER_MULT(7) |
3018 DPIO_TX2_STAGGER_MULT(5));
3019 }
3020
3021 /* Deassert data lane reset */
3022 chv_data_lane_soft_reset(encoder, false);
3023
3024 mutex_unlock(&dev_priv->sb_lock);
3025
3026 intel_enable_dp(encoder);
3027
3028 /* Second common lane will stay alive on its own now */
3029 if (dport->release_cl2_override) {
3030 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3031 dport->release_cl2_override = false;
3032 }
3033 }
3034
3035 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3036 {
3037 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3038 struct drm_device *dev = encoder->base.dev;
3039 struct drm_i915_private *dev_priv = dev->dev_private;
3040 struct intel_crtc *intel_crtc =
3041 to_intel_crtc(encoder->base.crtc);
3042 enum dpio_channel ch = vlv_dport_to_channel(dport);
3043 enum pipe pipe = intel_crtc->pipe;
3044 unsigned int lane_mask =
3045 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
3046 u32 val;
3047
3048 intel_dp_prepare(encoder);
3049
3050 /*
3051 * Must trick the second common lane into life.
3052 * Otherwise we can't even access the PLL.
3053 */
3054 if (ch == DPIO_CH0 && pipe == PIPE_B)
3055 dport->release_cl2_override =
3056 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3057
3058 chv_phy_powergate_lanes(encoder, true, lane_mask);
3059
3060 mutex_lock(&dev_priv->sb_lock);
3061
3062 /* Assert data lane reset */
3063 chv_data_lane_soft_reset(encoder, true);
3064
3065 /* program left/right clock distribution */
3066 if (pipe != PIPE_B) {
3067 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3068 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3069 if (ch == DPIO_CH0)
3070 val |= CHV_BUFLEFTENA1_FORCE;
3071 if (ch == DPIO_CH1)
3072 val |= CHV_BUFRIGHTENA1_FORCE;
3073 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3074 } else {
3075 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3076 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3077 if (ch == DPIO_CH0)
3078 val |= CHV_BUFLEFTENA2_FORCE;
3079 if (ch == DPIO_CH1)
3080 val |= CHV_BUFRIGHTENA2_FORCE;
3081 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3082 }
3083
3084 /* program clock channel usage */
3085 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3086 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3087 if (pipe != PIPE_B)
3088 val &= ~CHV_PCS_USEDCLKCHANNEL;
3089 else
3090 val |= CHV_PCS_USEDCLKCHANNEL;
3091 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3092
3093 if (intel_crtc->config->lane_count > 2) {
3094 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3095 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3096 if (pipe != PIPE_B)
3097 val &= ~CHV_PCS_USEDCLKCHANNEL;
3098 else
3099 val |= CHV_PCS_USEDCLKCHANNEL;
3100 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3101 }
3102
3103 /*
3104 * This a a bit weird since generally CL
3105 * matches the pipe, but here we need to
3106 * pick the CL based on the port.
3107 */
3108 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3109 if (pipe != PIPE_B)
3110 val &= ~CHV_CMN_USEDCLKCHANNEL;
3111 else
3112 val |= CHV_CMN_USEDCLKCHANNEL;
3113 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3114
3115 mutex_unlock(&dev_priv->sb_lock);
3116 }
3117
3118 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3119 {
3120 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3121 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3122 u32 val;
3123
3124 mutex_lock(&dev_priv->sb_lock);
3125
3126 /* disable left/right clock distribution */
3127 if (pipe != PIPE_B) {
3128 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3129 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3130 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3131 } else {
3132 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3133 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3134 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3135 }
3136
3137 mutex_unlock(&dev_priv->sb_lock);
3138
3139 /*
3140 * Leave the power down bit cleared for at least one
3141 * lane so that chv_powergate_phy_ch() will power
3142 * on something when the channel is otherwise unused.
3143 * When the port is off and the override is removed
3144 * the lanes power down anyway, so otherwise it doesn't
3145 * really matter what the state of power down bits is
3146 * after this.
3147 */
3148 chv_phy_powergate_lanes(encoder, false, 0x0);
3149 }
3150
3151 /*
3152 * Native read with retry for link status and receiver capability reads for
3153 * cases where the sink may still be asleep.
3154 *
3155 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3156 * supposed to retry 3 times per the spec.
3157 */
3158 static ssize_t
3159 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3160 void *buffer, size_t size)
3161 {
3162 ssize_t ret;
3163 int i;
3164
3165 /*
3166 * Sometime we just get the same incorrect byte repeated
3167 * over the entire buffer. Doing just one throw away read
3168 * initially seems to "solve" it.
3169 */
3170 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3171
3172 for (i = 0; i < 3; i++) {
3173 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3174 if (ret == size)
3175 return ret;
3176 msleep(1);
3177 }
3178
3179 return ret;
3180 }
3181
3182 /*
3183 * Fetch AUX CH registers 0x202 - 0x207 which contain
3184 * link status information
3185 */
3186 bool
3187 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3188 {
3189 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3190 DP_LANE0_1_STATUS,
3191 link_status,
3192 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3193 }
3194
3195 /* These are source-specific values. */
3196 uint8_t
3197 intel_dp_voltage_max(struct intel_dp *intel_dp)
3198 {
3199 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3200 struct drm_i915_private *dev_priv = dev->dev_private;
3201 enum port port = dp_to_dig_port(intel_dp)->port;
3202
3203 if (IS_BROXTON(dev))
3204 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3205 else if (INTEL_INFO(dev)->gen >= 9) {
3206 if (dev_priv->edp_low_vswing && port == PORT_A)
3207 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3208 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3209 } else if (IS_VALLEYVIEW(dev))
3210 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3211 else if (IS_GEN7(dev) && port == PORT_A)
3212 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3213 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3214 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3215 else
3216 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3217 }
3218
3219 uint8_t
3220 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3221 {
3222 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3223 enum port port = dp_to_dig_port(intel_dp)->port;
3224
3225 if (INTEL_INFO(dev)->gen >= 9) {
3226 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3227 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3228 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3229 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3230 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3231 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3232 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3233 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3234 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3235 default:
3236 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3237 }
3238 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3239 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3240 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3241 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3242 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3243 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3244 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3245 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3246 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3247 default:
3248 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3249 }
3250 } else if (IS_VALLEYVIEW(dev)) {
3251 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3253 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3254 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3255 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3256 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3257 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3258 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3259 default:
3260 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3261 }
3262 } else if (IS_GEN7(dev) && port == PORT_A) {
3263 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3265 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3267 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3268 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3269 default:
3270 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3271 }
3272 } else {
3273 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3274 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3275 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3277 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3279 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3281 default:
3282 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3283 }
3284 }
3285 }
3286
3287 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3288 {
3289 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3290 struct drm_i915_private *dev_priv = dev->dev_private;
3291 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3292 struct intel_crtc *intel_crtc =
3293 to_intel_crtc(dport->base.base.crtc);
3294 unsigned long demph_reg_value, preemph_reg_value,
3295 uniqtranscale_reg_value;
3296 uint8_t train_set = intel_dp->train_set[0];
3297 enum dpio_channel port = vlv_dport_to_channel(dport);
3298 int pipe = intel_crtc->pipe;
3299
3300 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3301 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3302 preemph_reg_value = 0x0004000;
3303 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3305 demph_reg_value = 0x2B405555;
3306 uniqtranscale_reg_value = 0x552AB83A;
3307 break;
3308 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3309 demph_reg_value = 0x2B404040;
3310 uniqtranscale_reg_value = 0x5548B83A;
3311 break;
3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3313 demph_reg_value = 0x2B245555;
3314 uniqtranscale_reg_value = 0x5560B83A;
3315 break;
3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3317 demph_reg_value = 0x2B405555;
3318 uniqtranscale_reg_value = 0x5598DA3A;
3319 break;
3320 default:
3321 return 0;
3322 }
3323 break;
3324 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3325 preemph_reg_value = 0x0002000;
3326 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3327 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3328 demph_reg_value = 0x2B404040;
3329 uniqtranscale_reg_value = 0x5552B83A;
3330 break;
3331 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3332 demph_reg_value = 0x2B404848;
3333 uniqtranscale_reg_value = 0x5580B83A;
3334 break;
3335 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3336 demph_reg_value = 0x2B404040;
3337 uniqtranscale_reg_value = 0x55ADDA3A;
3338 break;
3339 default:
3340 return 0;
3341 }
3342 break;
3343 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3344 preemph_reg_value = 0x0000000;
3345 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3346 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3347 demph_reg_value = 0x2B305555;
3348 uniqtranscale_reg_value = 0x5570B83A;
3349 break;
3350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3351 demph_reg_value = 0x2B2B4040;
3352 uniqtranscale_reg_value = 0x55ADDA3A;
3353 break;
3354 default:
3355 return 0;
3356 }
3357 break;
3358 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3359 preemph_reg_value = 0x0006000;
3360 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3361 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3362 demph_reg_value = 0x1B405555;
3363 uniqtranscale_reg_value = 0x55ADDA3A;
3364 break;
3365 default:
3366 return 0;
3367 }
3368 break;
3369 default:
3370 return 0;
3371 }
3372
3373 mutex_lock(&dev_priv->sb_lock);
3374 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3375 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3376 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3377 uniqtranscale_reg_value);
3378 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3379 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3380 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3381 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3382 mutex_unlock(&dev_priv->sb_lock);
3383
3384 return 0;
3385 }
3386
3387 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3388 {
3389 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3390 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3391 }
3392
3393 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3394 {
3395 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3396 struct drm_i915_private *dev_priv = dev->dev_private;
3397 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3398 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3399 u32 deemph_reg_value, margin_reg_value, val;
3400 uint8_t train_set = intel_dp->train_set[0];
3401 enum dpio_channel ch = vlv_dport_to_channel(dport);
3402 enum pipe pipe = intel_crtc->pipe;
3403 int i;
3404
3405 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3406 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3407 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3408 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3409 deemph_reg_value = 128;
3410 margin_reg_value = 52;
3411 break;
3412 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3413 deemph_reg_value = 128;
3414 margin_reg_value = 77;
3415 break;
3416 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3417 deemph_reg_value = 128;
3418 margin_reg_value = 102;
3419 break;
3420 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3421 deemph_reg_value = 128;
3422 margin_reg_value = 154;
3423 /* FIXME extra to set for 1200 */
3424 break;
3425 default:
3426 return 0;
3427 }
3428 break;
3429 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3430 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3431 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3432 deemph_reg_value = 85;
3433 margin_reg_value = 78;
3434 break;
3435 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3436 deemph_reg_value = 85;
3437 margin_reg_value = 116;
3438 break;
3439 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3440 deemph_reg_value = 85;
3441 margin_reg_value = 154;
3442 break;
3443 default:
3444 return 0;
3445 }
3446 break;
3447 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3448 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3449 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3450 deemph_reg_value = 64;
3451 margin_reg_value = 104;
3452 break;
3453 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3454 deemph_reg_value = 64;
3455 margin_reg_value = 154;
3456 break;
3457 default:
3458 return 0;
3459 }
3460 break;
3461 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3462 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3464 deemph_reg_value = 43;
3465 margin_reg_value = 154;
3466 break;
3467 default:
3468 return 0;
3469 }
3470 break;
3471 default:
3472 return 0;
3473 }
3474
3475 mutex_lock(&dev_priv->sb_lock);
3476
3477 /* Clear calc init */
3478 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3479 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3480 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3481 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3482 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3483
3484 if (intel_crtc->config->lane_count > 2) {
3485 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3486 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3487 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3488 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3489 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3490 }
3491
3492 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3493 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3494 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3495 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3496
3497 if (intel_crtc->config->lane_count > 2) {
3498 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3499 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3500 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3501 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3502 }
3503
3504 /* Program swing deemph */
3505 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3506 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3507 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3508 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3509 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3510 }
3511
3512 /* Program swing margin */
3513 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3514 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3515
3516 val &= ~DPIO_SWING_MARGIN000_MASK;
3517 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3518
3519 /*
3520 * Supposedly this value shouldn't matter when unique transition
3521 * scale is disabled, but in fact it does matter. Let's just
3522 * always program the same value and hope it's OK.
3523 */
3524 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3525 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3526
3527 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3528 }
3529
3530 /*
3531 * The document said it needs to set bit 27 for ch0 and bit 26
3532 * for ch1. Might be a typo in the doc.
3533 * For now, for this unique transition scale selection, set bit
3534 * 27 for ch0 and ch1.
3535 */
3536 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3537 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3538 if (chv_need_uniq_trans_scale(train_set))
3539 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3540 else
3541 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3542 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3543 }
3544
3545 /* Start swing calculation */
3546 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3547 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3548 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3549
3550 if (intel_crtc->config->lane_count > 2) {
3551 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3552 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3553 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3554 }
3555
3556 mutex_unlock(&dev_priv->sb_lock);
3557
3558 return 0;
3559 }
3560
3561 static uint32_t
3562 gen4_signal_levels(uint8_t train_set)
3563 {
3564 uint32_t signal_levels = 0;
3565
3566 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3567 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3568 default:
3569 signal_levels |= DP_VOLTAGE_0_4;
3570 break;
3571 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3572 signal_levels |= DP_VOLTAGE_0_6;
3573 break;
3574 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3575 signal_levels |= DP_VOLTAGE_0_8;
3576 break;
3577 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3578 signal_levels |= DP_VOLTAGE_1_2;
3579 break;
3580 }
3581 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3582 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3583 default:
3584 signal_levels |= DP_PRE_EMPHASIS_0;
3585 break;
3586 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3587 signal_levels |= DP_PRE_EMPHASIS_3_5;
3588 break;
3589 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3590 signal_levels |= DP_PRE_EMPHASIS_6;
3591 break;
3592 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3593 signal_levels |= DP_PRE_EMPHASIS_9_5;
3594 break;
3595 }
3596 return signal_levels;
3597 }
3598
3599 /* Gen6's DP voltage swing and pre-emphasis control */
3600 static uint32_t
3601 gen6_edp_signal_levels(uint8_t train_set)
3602 {
3603 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3604 DP_TRAIN_PRE_EMPHASIS_MASK);
3605 switch (signal_levels) {
3606 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3607 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3608 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3609 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3610 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3611 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3612 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3613 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3614 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3615 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3616 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3617 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3618 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3619 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3620 default:
3621 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3622 "0x%x\n", signal_levels);
3623 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3624 }
3625 }
3626
3627 /* Gen7's DP voltage swing and pre-emphasis control */
3628 static uint32_t
3629 gen7_edp_signal_levels(uint8_t train_set)
3630 {
3631 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3632 DP_TRAIN_PRE_EMPHASIS_MASK);
3633 switch (signal_levels) {
3634 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3635 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3636 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3637 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3638 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3639 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3640
3641 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3642 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3643 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3644 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3645
3646 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3647 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3648 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3649 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3650
3651 default:
3652 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3653 "0x%x\n", signal_levels);
3654 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3655 }
3656 }
3657
3658 void
3659 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3660 {
3661 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3662 enum port port = intel_dig_port->port;
3663 struct drm_device *dev = intel_dig_port->base.base.dev;
3664 struct drm_i915_private *dev_priv = to_i915(dev);
3665 uint32_t signal_levels, mask = 0;
3666 uint8_t train_set = intel_dp->train_set[0];
3667
3668 if (HAS_DDI(dev)) {
3669 signal_levels = ddi_signal_levels(intel_dp);
3670
3671 if (IS_BROXTON(dev))
3672 signal_levels = 0;
3673 else
3674 mask = DDI_BUF_EMP_MASK;
3675 } else if (IS_CHERRYVIEW(dev)) {
3676 signal_levels = chv_signal_levels(intel_dp);
3677 } else if (IS_VALLEYVIEW(dev)) {
3678 signal_levels = vlv_signal_levels(intel_dp);
3679 } else if (IS_GEN7(dev) && port == PORT_A) {
3680 signal_levels = gen7_edp_signal_levels(train_set);
3681 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3682 } else if (IS_GEN6(dev) && port == PORT_A) {
3683 signal_levels = gen6_edp_signal_levels(train_set);
3684 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3685 } else {
3686 signal_levels = gen4_signal_levels(train_set);
3687 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3688 }
3689
3690 if (mask)
3691 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3692
3693 DRM_DEBUG_KMS("Using vswing level %d\n",
3694 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3695 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3696 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3697 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3698
3699 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3700
3701 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3702 POSTING_READ(intel_dp->output_reg);
3703 }
3704
3705 void
3706 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3707 uint8_t dp_train_pat)
3708 {
3709 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3710 struct drm_i915_private *dev_priv =
3711 to_i915(intel_dig_port->base.base.dev);
3712
3713 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3714
3715 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3716 POSTING_READ(intel_dp->output_reg);
3717 }
3718
3719 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3720 {
3721 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3722 struct drm_device *dev = intel_dig_port->base.base.dev;
3723 struct drm_i915_private *dev_priv = dev->dev_private;
3724 enum port port = intel_dig_port->port;
3725 uint32_t val;
3726
3727 if (!HAS_DDI(dev))
3728 return;
3729
3730 val = I915_READ(DP_TP_CTL(port));
3731 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3732 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3733 I915_WRITE(DP_TP_CTL(port), val);
3734
3735 /*
3736 * On PORT_A we can have only eDP in SST mode. There the only reason
3737 * we need to set idle transmission mode is to work around a HW issue
3738 * where we enable the pipe while not in idle link-training mode.
3739 * In this case there is requirement to wait for a minimum number of
3740 * idle patterns to be sent.
3741 */
3742 if (port == PORT_A)
3743 return;
3744
3745 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3746 1))
3747 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3748 }
3749
3750 static void
3751 intel_dp_link_down(struct intel_dp *intel_dp)
3752 {
3753 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3754 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3755 enum port port = intel_dig_port->port;
3756 struct drm_device *dev = intel_dig_port->base.base.dev;
3757 struct drm_i915_private *dev_priv = dev->dev_private;
3758 uint32_t DP = intel_dp->DP;
3759
3760 if (WARN_ON(HAS_DDI(dev)))
3761 return;
3762
3763 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3764 return;
3765
3766 DRM_DEBUG_KMS("\n");
3767
3768 if ((IS_GEN7(dev) && port == PORT_A) ||
3769 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3770 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3771 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3772 } else {
3773 if (IS_CHERRYVIEW(dev))
3774 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3775 else
3776 DP &= ~DP_LINK_TRAIN_MASK;
3777 DP |= DP_LINK_TRAIN_PAT_IDLE;
3778 }
3779 I915_WRITE(intel_dp->output_reg, DP);
3780 POSTING_READ(intel_dp->output_reg);
3781
3782 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3783 I915_WRITE(intel_dp->output_reg, DP);
3784 POSTING_READ(intel_dp->output_reg);
3785
3786 /*
3787 * HW workaround for IBX, we need to move the port
3788 * to transcoder A after disabling it to allow the
3789 * matching HDMI port to be enabled on transcoder A.
3790 */
3791 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3792 /*
3793 * We get CPU/PCH FIFO underruns on the other pipe when
3794 * doing the workaround. Sweep them under the rug.
3795 */
3796 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3797 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3798
3799 /* always enable with pattern 1 (as per spec) */
3800 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3801 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3802 I915_WRITE(intel_dp->output_reg, DP);
3803 POSTING_READ(intel_dp->output_reg);
3804
3805 DP &= ~DP_PORT_EN;
3806 I915_WRITE(intel_dp->output_reg, DP);
3807 POSTING_READ(intel_dp->output_reg);
3808
3809 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3810 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3811 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3812 }
3813
3814 msleep(intel_dp->panel_power_down_delay);
3815
3816 intel_dp->DP = DP;
3817 }
3818
3819 static bool
3820 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3821 {
3822 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3823 struct drm_device *dev = dig_port->base.base.dev;
3824 struct drm_i915_private *dev_priv = dev->dev_private;
3825 uint8_t rev;
3826
3827 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3828 sizeof(intel_dp->dpcd)) < 0)
3829 return false; /* aux transfer failed */
3830
3831 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3832
3833 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3834 return false; /* DPCD not present */
3835
3836 /* Check if the panel supports PSR */
3837 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3838 if (is_edp(intel_dp)) {
3839 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3840 intel_dp->psr_dpcd,
3841 sizeof(intel_dp->psr_dpcd));
3842 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3843 dev_priv->psr.sink_support = true;
3844 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3845 }
3846
3847 if (INTEL_INFO(dev)->gen >= 9 &&
3848 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3849 uint8_t frame_sync_cap;
3850
3851 dev_priv->psr.sink_support = true;
3852 intel_dp_dpcd_read_wake(&intel_dp->aux,
3853 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3854 &frame_sync_cap, 1);
3855 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3856 /* PSR2 needs frame sync as well */
3857 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3858 DRM_DEBUG_KMS("PSR2 %s on sink",
3859 dev_priv->psr.psr2_support ? "supported" : "not supported");
3860 }
3861 }
3862
3863 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3864 yesno(intel_dp_source_supports_hbr2(intel_dp)),
3865 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3866
3867 /* Intermediate frequency support */
3868 if (is_edp(intel_dp) &&
3869 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3870 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3871 (rev >= 0x03)) { /* eDp v1.4 or higher */
3872 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3873 int i;
3874
3875 intel_dp_dpcd_read_wake(&intel_dp->aux,
3876 DP_SUPPORTED_LINK_RATES,
3877 sink_rates,
3878 sizeof(sink_rates));
3879
3880 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3881 int val = le16_to_cpu(sink_rates[i]);
3882
3883 if (val == 0)
3884 break;
3885
3886 /* Value read is in kHz while drm clock is saved in deca-kHz */
3887 intel_dp->sink_rates[i] = (val * 200) / 10;
3888 }
3889 intel_dp->num_sink_rates = i;
3890 }
3891
3892 intel_dp_print_rates(intel_dp);
3893
3894 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3895 DP_DWN_STRM_PORT_PRESENT))
3896 return true; /* native DP sink */
3897
3898 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3899 return true; /* no per-port downstream info */
3900
3901 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3902 intel_dp->downstream_ports,
3903 DP_MAX_DOWNSTREAM_PORTS) < 0)
3904 return false; /* downstream port status fetch failed */
3905
3906 return true;
3907 }
3908
3909 static void
3910 intel_dp_probe_oui(struct intel_dp *intel_dp)
3911 {
3912 u8 buf[3];
3913
3914 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3915 return;
3916
3917 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3918 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3919 buf[0], buf[1], buf[2]);
3920
3921 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3922 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3923 buf[0], buf[1], buf[2]);
3924 }
3925
3926 static bool
3927 intel_dp_probe_mst(struct intel_dp *intel_dp)
3928 {
3929 u8 buf[1];
3930
3931 if (!intel_dp->can_mst)
3932 return false;
3933
3934 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3935 return false;
3936
3937 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3938 if (buf[0] & DP_MST_CAP) {
3939 DRM_DEBUG_KMS("Sink is MST capable\n");
3940 intel_dp->is_mst = true;
3941 } else {
3942 DRM_DEBUG_KMS("Sink is not MST capable\n");
3943 intel_dp->is_mst = false;
3944 }
3945 }
3946
3947 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3948 return intel_dp->is_mst;
3949 }
3950
3951 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3952 {
3953 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3954 struct drm_device *dev = dig_port->base.base.dev;
3955 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3956 u8 buf;
3957 int ret = 0;
3958 int count = 0;
3959 int attempts = 10;
3960
3961 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3962 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3963 ret = -EIO;
3964 goto out;
3965 }
3966
3967 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3968 buf & ~DP_TEST_SINK_START) < 0) {
3969 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3970 ret = -EIO;
3971 goto out;
3972 }
3973
3974 do {
3975 intel_wait_for_vblank(dev, intel_crtc->pipe);
3976
3977 if (drm_dp_dpcd_readb(&intel_dp->aux,
3978 DP_TEST_SINK_MISC, &buf) < 0) {
3979 ret = -EIO;
3980 goto out;
3981 }
3982 count = buf & DP_TEST_COUNT_MASK;
3983 } while (--attempts && count);
3984
3985 if (attempts == 0) {
3986 DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
3987 ret = -ETIMEDOUT;
3988 }
3989
3990 out:
3991 hsw_enable_ips(intel_crtc);
3992 return ret;
3993 }
3994
3995 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3996 {
3997 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3998 struct drm_device *dev = dig_port->base.base.dev;
3999 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4000 u8 buf;
4001 int ret;
4002
4003 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4004 return -EIO;
4005
4006 if (!(buf & DP_TEST_CRC_SUPPORTED))
4007 return -ENOTTY;
4008
4009 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4010 return -EIO;
4011
4012 if (buf & DP_TEST_SINK_START) {
4013 ret = intel_dp_sink_crc_stop(intel_dp);
4014 if (ret)
4015 return ret;
4016 }
4017
4018 hsw_disable_ips(intel_crtc);
4019
4020 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4021 buf | DP_TEST_SINK_START) < 0) {
4022 hsw_enable_ips(intel_crtc);
4023 return -EIO;
4024 }
4025
4026 intel_wait_for_vblank(dev, intel_crtc->pipe);
4027 return 0;
4028 }
4029
4030 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4031 {
4032 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4033 struct drm_device *dev = dig_port->base.base.dev;
4034 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4035 u8 buf;
4036 int count, ret;
4037 int attempts = 6;
4038
4039 ret = intel_dp_sink_crc_start(intel_dp);
4040 if (ret)
4041 return ret;
4042
4043 do {
4044 intel_wait_for_vblank(dev, intel_crtc->pipe);
4045
4046 if (drm_dp_dpcd_readb(&intel_dp->aux,
4047 DP_TEST_SINK_MISC, &buf) < 0) {
4048 ret = -EIO;
4049 goto stop;
4050 }
4051 count = buf & DP_TEST_COUNT_MASK;
4052
4053 } while (--attempts && count == 0);
4054
4055 if (attempts == 0) {
4056 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4057 ret = -ETIMEDOUT;
4058 goto stop;
4059 }
4060
4061 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4062 ret = -EIO;
4063 goto stop;
4064 }
4065
4066 stop:
4067 intel_dp_sink_crc_stop(intel_dp);
4068 return ret;
4069 }
4070
4071 static bool
4072 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4073 {
4074 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4075 DP_DEVICE_SERVICE_IRQ_VECTOR,
4076 sink_irq_vector, 1) == 1;
4077 }
4078
4079 static bool
4080 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4081 {
4082 int ret;
4083
4084 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4085 DP_SINK_COUNT_ESI,
4086 sink_irq_vector, 14);
4087 if (ret != 14)
4088 return false;
4089
4090 return true;
4091 }
4092
4093 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4094 {
4095 uint8_t test_result = DP_TEST_ACK;
4096 return test_result;
4097 }
4098
4099 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4100 {
4101 uint8_t test_result = DP_TEST_NAK;
4102 return test_result;
4103 }
4104
4105 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4106 {
4107 uint8_t test_result = DP_TEST_NAK;
4108 struct intel_connector *intel_connector = intel_dp->attached_connector;
4109 struct drm_connector *connector = &intel_connector->base;
4110
4111 if (intel_connector->detect_edid == NULL ||
4112 connector->edid_corrupt ||
4113 intel_dp->aux.i2c_defer_count > 6) {
4114 /* Check EDID read for NACKs, DEFERs and corruption
4115 * (DP CTS 1.2 Core r1.1)
4116 * 4.2.2.4 : Failed EDID read, I2C_NAK
4117 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4118 * 4.2.2.6 : EDID corruption detected
4119 * Use failsafe mode for all cases
4120 */
4121 if (intel_dp->aux.i2c_nack_count > 0 ||
4122 intel_dp->aux.i2c_defer_count > 0)
4123 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4124 intel_dp->aux.i2c_nack_count,
4125 intel_dp->aux.i2c_defer_count);
4126 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4127 } else {
4128 struct edid *block = intel_connector->detect_edid;
4129
4130 /* We have to write the checksum
4131 * of the last block read
4132 */
4133 block += intel_connector->detect_edid->extensions;
4134
4135 if (!drm_dp_dpcd_write(&intel_dp->aux,
4136 DP_TEST_EDID_CHECKSUM,
4137 &block->checksum,
4138 1))
4139 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4140
4141 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4142 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4143 }
4144
4145 /* Set test active flag here so userspace doesn't interrupt things */
4146 intel_dp->compliance_test_active = 1;
4147
4148 return test_result;
4149 }
4150
4151 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4152 {
4153 uint8_t test_result = DP_TEST_NAK;
4154 return test_result;
4155 }
4156
4157 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4158 {
4159 uint8_t response = DP_TEST_NAK;
4160 uint8_t rxdata = 0;
4161 int status = 0;
4162
4163 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4164 if (status <= 0) {
4165 DRM_DEBUG_KMS("Could not read test request from sink\n");
4166 goto update_status;
4167 }
4168
4169 switch (rxdata) {
4170 case DP_TEST_LINK_TRAINING:
4171 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4172 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4173 response = intel_dp_autotest_link_training(intel_dp);
4174 break;
4175 case DP_TEST_LINK_VIDEO_PATTERN:
4176 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4177 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4178 response = intel_dp_autotest_video_pattern(intel_dp);
4179 break;
4180 case DP_TEST_LINK_EDID_READ:
4181 DRM_DEBUG_KMS("EDID test requested\n");
4182 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4183 response = intel_dp_autotest_edid(intel_dp);
4184 break;
4185 case DP_TEST_LINK_PHY_TEST_PATTERN:
4186 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4187 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4188 response = intel_dp_autotest_phy_pattern(intel_dp);
4189 break;
4190 default:
4191 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4192 break;
4193 }
4194
4195 update_status:
4196 status = drm_dp_dpcd_write(&intel_dp->aux,
4197 DP_TEST_RESPONSE,
4198 &response, 1);
4199 if (status <= 0)
4200 DRM_DEBUG_KMS("Could not write test response to sink\n");
4201 }
4202
4203 static int
4204 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4205 {
4206 bool bret;
4207
4208 if (intel_dp->is_mst) {
4209 u8 esi[16] = { 0 };
4210 int ret = 0;
4211 int retry;
4212 bool handled;
4213 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4214 go_again:
4215 if (bret == true) {
4216
4217 /* check link status - esi[10] = 0x200c */
4218 if (intel_dp->active_mst_links &&
4219 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4220 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4221 intel_dp_start_link_train(intel_dp);
4222 intel_dp_stop_link_train(intel_dp);
4223 }
4224
4225 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4226 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4227
4228 if (handled) {
4229 for (retry = 0; retry < 3; retry++) {
4230 int wret;
4231 wret = drm_dp_dpcd_write(&intel_dp->aux,
4232 DP_SINK_COUNT_ESI+1,
4233 &esi[1], 3);
4234 if (wret == 3) {
4235 break;
4236 }
4237 }
4238
4239 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4240 if (bret == true) {
4241 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4242 goto go_again;
4243 }
4244 } else
4245 ret = 0;
4246
4247 return ret;
4248 } else {
4249 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4250 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4251 intel_dp->is_mst = false;
4252 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4253 /* send a hotplug event */
4254 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4255 }
4256 }
4257 return -EINVAL;
4258 }
4259
4260 /*
4261 * According to DP spec
4262 * 5.1.2:
4263 * 1. Read DPCD
4264 * 2. Configure link according to Receiver Capabilities
4265 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4266 * 4. Check link status on receipt of hot-plug interrupt
4267 */
4268 static void
4269 intel_dp_check_link_status(struct intel_dp *intel_dp)
4270 {
4271 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4272 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4273 u8 sink_irq_vector;
4274 u8 link_status[DP_LINK_STATUS_SIZE];
4275
4276 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4277
4278 /*
4279 * Clearing compliance test variables to allow capturing
4280 * of values for next automated test request.
4281 */
4282 intel_dp->compliance_test_active = 0;
4283 intel_dp->compliance_test_type = 0;
4284 intel_dp->compliance_test_data = 0;
4285
4286 if (!intel_encoder->base.crtc)
4287 return;
4288
4289 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4290 return;
4291
4292 /* Try to read receiver status if the link appears to be up */
4293 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4294 return;
4295 }
4296
4297 /* Now read the DPCD to see if it's actually running */
4298 if (!intel_dp_get_dpcd(intel_dp)) {
4299 return;
4300 }
4301
4302 /* Try to read the source of the interrupt */
4303 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4304 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4305 /* Clear interrupt source */
4306 drm_dp_dpcd_writeb(&intel_dp->aux,
4307 DP_DEVICE_SERVICE_IRQ_VECTOR,
4308 sink_irq_vector);
4309
4310 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4311 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4312 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4313 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4314 }
4315
4316 /* if link training is requested we should perform it always */
4317 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4318 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4319 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4320 intel_encoder->base.name);
4321 intel_dp_start_link_train(intel_dp);
4322 intel_dp_stop_link_train(intel_dp);
4323 }
4324 }
4325
4326 /* XXX this is probably wrong for multiple downstream ports */
4327 static enum drm_connector_status
4328 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4329 {
4330 uint8_t *dpcd = intel_dp->dpcd;
4331 uint8_t type;
4332
4333 if (!intel_dp_get_dpcd(intel_dp))
4334 return connector_status_disconnected;
4335
4336 /* if there's no downstream port, we're done */
4337 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4338 return connector_status_connected;
4339
4340 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4341 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4342 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4343 uint8_t reg;
4344
4345 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4346 &reg, 1) < 0)
4347 return connector_status_unknown;
4348
4349 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4350 : connector_status_disconnected;
4351 }
4352
4353 /* If no HPD, poke DDC gently */
4354 if (drm_probe_ddc(&intel_dp->aux.ddc))
4355 return connector_status_connected;
4356
4357 /* Well we tried, say unknown for unreliable port types */
4358 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4359 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4360 if (type == DP_DS_PORT_TYPE_VGA ||
4361 type == DP_DS_PORT_TYPE_NON_EDID)
4362 return connector_status_unknown;
4363 } else {
4364 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4365 DP_DWN_STRM_PORT_TYPE_MASK;
4366 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4367 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4368 return connector_status_unknown;
4369 }
4370
4371 /* Anything else is out of spec, warn and ignore */
4372 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4373 return connector_status_disconnected;
4374 }
4375
4376 static enum drm_connector_status
4377 edp_detect(struct intel_dp *intel_dp)
4378 {
4379 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4380 enum drm_connector_status status;
4381
4382 status = intel_panel_detect(dev);
4383 if (status == connector_status_unknown)
4384 status = connector_status_connected;
4385
4386 return status;
4387 }
4388
4389 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4390 struct intel_digital_port *port)
4391 {
4392 u32 bit;
4393
4394 switch (port->port) {
4395 case PORT_A:
4396 return true;
4397 case PORT_B:
4398 bit = SDE_PORTB_HOTPLUG;
4399 break;
4400 case PORT_C:
4401 bit = SDE_PORTC_HOTPLUG;
4402 break;
4403 case PORT_D:
4404 bit = SDE_PORTD_HOTPLUG;
4405 break;
4406 default:
4407 MISSING_CASE(port->port);
4408 return false;
4409 }
4410
4411 return I915_READ(SDEISR) & bit;
4412 }
4413
4414 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4415 struct intel_digital_port *port)
4416 {
4417 u32 bit;
4418
4419 switch (port->port) {
4420 case PORT_A:
4421 return true;
4422 case PORT_B:
4423 bit = SDE_PORTB_HOTPLUG_CPT;
4424 break;
4425 case PORT_C:
4426 bit = SDE_PORTC_HOTPLUG_CPT;
4427 break;
4428 case PORT_D:
4429 bit = SDE_PORTD_HOTPLUG_CPT;
4430 break;
4431 case PORT_E:
4432 bit = SDE_PORTE_HOTPLUG_SPT;
4433 break;
4434 default:
4435 MISSING_CASE(port->port);
4436 return false;
4437 }
4438
4439 return I915_READ(SDEISR) & bit;
4440 }
4441
4442 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4443 struct intel_digital_port *port)
4444 {
4445 u32 bit;
4446
4447 switch (port->port) {
4448 case PORT_B:
4449 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4450 break;
4451 case PORT_C:
4452 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4453 break;
4454 case PORT_D:
4455 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4456 break;
4457 default:
4458 MISSING_CASE(port->port);
4459 return false;
4460 }
4461
4462 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4463 }
4464
4465 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4466 struct intel_digital_port *port)
4467 {
4468 u32 bit;
4469
4470 switch (port->port) {
4471 case PORT_B:
4472 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4473 break;
4474 case PORT_C:
4475 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4476 break;
4477 case PORT_D:
4478 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4479 break;
4480 default:
4481 MISSING_CASE(port->port);
4482 return false;
4483 }
4484
4485 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4486 }
4487
4488 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4489 struct intel_digital_port *intel_dig_port)
4490 {
4491 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4492 enum port port;
4493 u32 bit;
4494
4495 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4496 switch (port) {
4497 case PORT_A:
4498 bit = BXT_DE_PORT_HP_DDIA;
4499 break;
4500 case PORT_B:
4501 bit = BXT_DE_PORT_HP_DDIB;
4502 break;
4503 case PORT_C:
4504 bit = BXT_DE_PORT_HP_DDIC;
4505 break;
4506 default:
4507 MISSING_CASE(port);
4508 return false;
4509 }
4510
4511 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4512 }
4513
4514 /*
4515 * intel_digital_port_connected - is the specified port connected?
4516 * @dev_priv: i915 private structure
4517 * @port: the port to test
4518 *
4519 * Return %true if @port is connected, %false otherwise.
4520 */
4521 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4522 struct intel_digital_port *port)
4523 {
4524 if (HAS_PCH_IBX(dev_priv))
4525 return ibx_digital_port_connected(dev_priv, port);
4526 if (HAS_PCH_SPLIT(dev_priv))
4527 return cpt_digital_port_connected(dev_priv, port);
4528 else if (IS_BROXTON(dev_priv))
4529 return bxt_digital_port_connected(dev_priv, port);
4530 else if (IS_VALLEYVIEW(dev_priv))
4531 return vlv_digital_port_connected(dev_priv, port);
4532 else
4533 return g4x_digital_port_connected(dev_priv, port);
4534 }
4535
4536 static struct edid *
4537 intel_dp_get_edid(struct intel_dp *intel_dp)
4538 {
4539 struct intel_connector *intel_connector = intel_dp->attached_connector;
4540
4541 /* use cached edid if we have one */
4542 if (intel_connector->edid) {
4543 /* invalid edid */
4544 if (IS_ERR(intel_connector->edid))
4545 return NULL;
4546
4547 return drm_edid_duplicate(intel_connector->edid);
4548 } else
4549 return drm_get_edid(&intel_connector->base,
4550 &intel_dp->aux.ddc);
4551 }
4552
4553 static void
4554 intel_dp_set_edid(struct intel_dp *intel_dp)
4555 {
4556 struct intel_connector *intel_connector = intel_dp->attached_connector;
4557 struct edid *edid;
4558
4559 edid = intel_dp_get_edid(intel_dp);
4560 intel_connector->detect_edid = edid;
4561
4562 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4563 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4564 else
4565 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4566 }
4567
4568 static void
4569 intel_dp_unset_edid(struct intel_dp *intel_dp)
4570 {
4571 struct intel_connector *intel_connector = intel_dp->attached_connector;
4572
4573 kfree(intel_connector->detect_edid);
4574 intel_connector->detect_edid = NULL;
4575
4576 intel_dp->has_audio = false;
4577 }
4578
4579 static enum drm_connector_status
4580 intel_dp_detect(struct drm_connector *connector, bool force)
4581 {
4582 struct intel_dp *intel_dp = intel_attached_dp(connector);
4583 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4584 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4585 struct drm_device *dev = connector->dev;
4586 enum drm_connector_status status;
4587 enum intel_display_power_domain power_domain;
4588 bool ret;
4589 u8 sink_irq_vector;
4590
4591 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4592 connector->base.id, connector->name);
4593 intel_dp_unset_edid(intel_dp);
4594
4595 if (intel_dp->is_mst) {
4596 /* MST devices are disconnected from a monitor POV */
4597 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4598 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4599 return connector_status_disconnected;
4600 }
4601
4602 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4603 intel_display_power_get(to_i915(dev), power_domain);
4604
4605 /* Can't disconnect eDP, but you can close the lid... */
4606 if (is_edp(intel_dp))
4607 status = edp_detect(intel_dp);
4608 else if (intel_digital_port_connected(to_i915(dev),
4609 dp_to_dig_port(intel_dp)))
4610 status = intel_dp_detect_dpcd(intel_dp);
4611 else
4612 status = connector_status_disconnected;
4613
4614 if (status != connector_status_connected) {
4615 intel_dp->compliance_test_active = 0;
4616 intel_dp->compliance_test_type = 0;
4617 intel_dp->compliance_test_data = 0;
4618
4619 goto out;
4620 }
4621
4622 intel_dp_probe_oui(intel_dp);
4623
4624 ret = intel_dp_probe_mst(intel_dp);
4625 if (ret) {
4626 /* if we are in MST mode then this connector
4627 won't appear connected or have anything with EDID on it */
4628 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4629 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4630 status = connector_status_disconnected;
4631 goto out;
4632 }
4633
4634 /*
4635 * Clearing NACK and defer counts to get their exact values
4636 * while reading EDID which are required by Compliance tests
4637 * 4.2.2.4 and 4.2.2.5
4638 */
4639 intel_dp->aux.i2c_nack_count = 0;
4640 intel_dp->aux.i2c_defer_count = 0;
4641
4642 intel_dp_set_edid(intel_dp);
4643
4644 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4645 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4646 status = connector_status_connected;
4647
4648 /* Try to read the source of the interrupt */
4649 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4650 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4651 /* Clear interrupt source */
4652 drm_dp_dpcd_writeb(&intel_dp->aux,
4653 DP_DEVICE_SERVICE_IRQ_VECTOR,
4654 sink_irq_vector);
4655
4656 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4657 intel_dp_handle_test_request(intel_dp);
4658 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4659 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4660 }
4661
4662 out:
4663 intel_display_power_put(to_i915(dev), power_domain);
4664 return status;
4665 }
4666
4667 static void
4668 intel_dp_force(struct drm_connector *connector)
4669 {
4670 struct intel_dp *intel_dp = intel_attached_dp(connector);
4671 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4672 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4673 enum intel_display_power_domain power_domain;
4674
4675 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4676 connector->base.id, connector->name);
4677 intel_dp_unset_edid(intel_dp);
4678
4679 if (connector->status != connector_status_connected)
4680 return;
4681
4682 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4683 intel_display_power_get(dev_priv, power_domain);
4684
4685 intel_dp_set_edid(intel_dp);
4686
4687 intel_display_power_put(dev_priv, power_domain);
4688
4689 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4690 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4691 }
4692
4693 static int intel_dp_get_modes(struct drm_connector *connector)
4694 {
4695 struct intel_connector *intel_connector = to_intel_connector(connector);
4696 struct edid *edid;
4697
4698 edid = intel_connector->detect_edid;
4699 if (edid) {
4700 int ret = intel_connector_update_modes(connector, edid);
4701 if (ret)
4702 return ret;
4703 }
4704
4705 /* if eDP has no EDID, fall back to fixed mode */
4706 if (is_edp(intel_attached_dp(connector)) &&
4707 intel_connector->panel.fixed_mode) {
4708 struct drm_display_mode *mode;
4709
4710 mode = drm_mode_duplicate(connector->dev,
4711 intel_connector->panel.fixed_mode);
4712 if (mode) {
4713 drm_mode_probed_add(connector, mode);
4714 return 1;
4715 }
4716 }
4717
4718 return 0;
4719 }
4720
4721 static bool
4722 intel_dp_detect_audio(struct drm_connector *connector)
4723 {
4724 bool has_audio = false;
4725 struct edid *edid;
4726
4727 edid = to_intel_connector(connector)->detect_edid;
4728 if (edid)
4729 has_audio = drm_detect_monitor_audio(edid);
4730
4731 return has_audio;
4732 }
4733
4734 static int
4735 intel_dp_set_property(struct drm_connector *connector,
4736 struct drm_property *property,
4737 uint64_t val)
4738 {
4739 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4740 struct intel_connector *intel_connector = to_intel_connector(connector);
4741 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4742 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4743 int ret;
4744
4745 ret = drm_object_property_set_value(&connector->base, property, val);
4746 if (ret)
4747 return ret;
4748
4749 if (property == dev_priv->force_audio_property) {
4750 int i = val;
4751 bool has_audio;
4752
4753 if (i == intel_dp->force_audio)
4754 return 0;
4755
4756 intel_dp->force_audio = i;
4757
4758 if (i == HDMI_AUDIO_AUTO)
4759 has_audio = intel_dp_detect_audio(connector);
4760 else
4761 has_audio = (i == HDMI_AUDIO_ON);
4762
4763 if (has_audio == intel_dp->has_audio)
4764 return 0;
4765
4766 intel_dp->has_audio = has_audio;
4767 goto done;
4768 }
4769
4770 if (property == dev_priv->broadcast_rgb_property) {
4771 bool old_auto = intel_dp->color_range_auto;
4772 bool old_range = intel_dp->limited_color_range;
4773
4774 switch (val) {
4775 case INTEL_BROADCAST_RGB_AUTO:
4776 intel_dp->color_range_auto = true;
4777 break;
4778 case INTEL_BROADCAST_RGB_FULL:
4779 intel_dp->color_range_auto = false;
4780 intel_dp->limited_color_range = false;
4781 break;
4782 case INTEL_BROADCAST_RGB_LIMITED:
4783 intel_dp->color_range_auto = false;
4784 intel_dp->limited_color_range = true;
4785 break;
4786 default:
4787 return -EINVAL;
4788 }
4789
4790 if (old_auto == intel_dp->color_range_auto &&
4791 old_range == intel_dp->limited_color_range)
4792 return 0;
4793
4794 goto done;
4795 }
4796
4797 if (is_edp(intel_dp) &&
4798 property == connector->dev->mode_config.scaling_mode_property) {
4799 if (val == DRM_MODE_SCALE_NONE) {
4800 DRM_DEBUG_KMS("no scaling not supported\n");
4801 return -EINVAL;
4802 }
4803
4804 if (intel_connector->panel.fitting_mode == val) {
4805 /* the eDP scaling property is not changed */
4806 return 0;
4807 }
4808 intel_connector->panel.fitting_mode = val;
4809
4810 goto done;
4811 }
4812
4813 return -EINVAL;
4814
4815 done:
4816 if (intel_encoder->base.crtc)
4817 intel_crtc_restore_mode(intel_encoder->base.crtc);
4818
4819 return 0;
4820 }
4821
4822 static void
4823 intel_dp_connector_destroy(struct drm_connector *connector)
4824 {
4825 struct intel_connector *intel_connector = to_intel_connector(connector);
4826
4827 kfree(intel_connector->detect_edid);
4828
4829 if (!IS_ERR_OR_NULL(intel_connector->edid))
4830 kfree(intel_connector->edid);
4831
4832 /* Can't call is_edp() since the encoder may have been destroyed
4833 * already. */
4834 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4835 intel_panel_fini(&intel_connector->panel);
4836
4837 drm_connector_cleanup(connector);
4838 kfree(connector);
4839 }
4840
4841 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4842 {
4843 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4844 struct intel_dp *intel_dp = &intel_dig_port->dp;
4845
4846 intel_dp_aux_fini(intel_dp);
4847 intel_dp_mst_encoder_cleanup(intel_dig_port);
4848 if (is_edp(intel_dp)) {
4849 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4850 /*
4851 * vdd might still be enabled do to the delayed vdd off.
4852 * Make sure vdd is actually turned off here.
4853 */
4854 pps_lock(intel_dp);
4855 edp_panel_vdd_off_sync(intel_dp);
4856 pps_unlock(intel_dp);
4857
4858 if (intel_dp->edp_notifier.notifier_call) {
4859 unregister_reboot_notifier(&intel_dp->edp_notifier);
4860 intel_dp->edp_notifier.notifier_call = NULL;
4861 }
4862 }
4863 drm_encoder_cleanup(encoder);
4864 kfree(intel_dig_port);
4865 }
4866
4867 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4868 {
4869 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4870
4871 if (!is_edp(intel_dp))
4872 return;
4873
4874 /*
4875 * vdd might still be enabled do to the delayed vdd off.
4876 * Make sure vdd is actually turned off here.
4877 */
4878 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4879 pps_lock(intel_dp);
4880 edp_panel_vdd_off_sync(intel_dp);
4881 pps_unlock(intel_dp);
4882 }
4883
4884 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4885 {
4886 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4887 struct drm_device *dev = intel_dig_port->base.base.dev;
4888 struct drm_i915_private *dev_priv = dev->dev_private;
4889 enum intel_display_power_domain power_domain;
4890
4891 lockdep_assert_held(&dev_priv->pps_mutex);
4892
4893 if (!edp_have_panel_vdd(intel_dp))
4894 return;
4895
4896 /*
4897 * The VDD bit needs a power domain reference, so if the bit is
4898 * already enabled when we boot or resume, grab this reference and
4899 * schedule a vdd off, so we don't hold on to the reference
4900 * indefinitely.
4901 */
4902 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4903 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4904 intel_display_power_get(dev_priv, power_domain);
4905
4906 edp_panel_vdd_schedule_off(intel_dp);
4907 }
4908
4909 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4910 {
4911 struct intel_dp *intel_dp;
4912
4913 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4914 return;
4915
4916 intel_dp = enc_to_intel_dp(encoder);
4917
4918 pps_lock(intel_dp);
4919
4920 /*
4921 * Read out the current power sequencer assignment,
4922 * in case the BIOS did something with it.
4923 */
4924 if (IS_VALLEYVIEW(encoder->dev))
4925 vlv_initial_power_sequencer_setup(intel_dp);
4926
4927 intel_edp_panel_vdd_sanitize(intel_dp);
4928
4929 pps_unlock(intel_dp);
4930 }
4931
4932 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4933 .dpms = drm_atomic_helper_connector_dpms,
4934 .detect = intel_dp_detect,
4935 .force = intel_dp_force,
4936 .fill_modes = drm_helper_probe_single_connector_modes,
4937 .set_property = intel_dp_set_property,
4938 .atomic_get_property = intel_connector_atomic_get_property,
4939 .destroy = intel_dp_connector_destroy,
4940 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4941 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4942 };
4943
4944 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4945 .get_modes = intel_dp_get_modes,
4946 .mode_valid = intel_dp_mode_valid,
4947 .best_encoder = intel_best_encoder,
4948 };
4949
4950 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4951 .reset = intel_dp_encoder_reset,
4952 .destroy = intel_dp_encoder_destroy,
4953 };
4954
4955 enum irqreturn
4956 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4957 {
4958 struct intel_dp *intel_dp = &intel_dig_port->dp;
4959 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4960 struct drm_device *dev = intel_dig_port->base.base.dev;
4961 struct drm_i915_private *dev_priv = dev->dev_private;
4962 enum intel_display_power_domain power_domain;
4963 enum irqreturn ret = IRQ_NONE;
4964
4965 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4966 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
4967 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4968
4969 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4970 /*
4971 * vdd off can generate a long pulse on eDP which
4972 * would require vdd on to handle it, and thus we
4973 * would end up in an endless cycle of
4974 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4975 */
4976 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4977 port_name(intel_dig_port->port));
4978 return IRQ_HANDLED;
4979 }
4980
4981 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4982 port_name(intel_dig_port->port),
4983 long_hpd ? "long" : "short");
4984
4985 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4986 intel_display_power_get(dev_priv, power_domain);
4987
4988 if (long_hpd) {
4989 /* indicate that we need to restart link training */
4990 intel_dp->train_set_valid = false;
4991
4992 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4993 goto mst_fail;
4994
4995 if (!intel_dp_get_dpcd(intel_dp)) {
4996 goto mst_fail;
4997 }
4998
4999 intel_dp_probe_oui(intel_dp);
5000
5001 if (!intel_dp_probe_mst(intel_dp)) {
5002 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5003 intel_dp_check_link_status(intel_dp);
5004 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5005 goto mst_fail;
5006 }
5007 } else {
5008 if (intel_dp->is_mst) {
5009 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5010 goto mst_fail;
5011 }
5012
5013 if (!intel_dp->is_mst) {
5014 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5015 intel_dp_check_link_status(intel_dp);
5016 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5017 }
5018 }
5019
5020 ret = IRQ_HANDLED;
5021
5022 goto put_power;
5023 mst_fail:
5024 /* if we were in MST mode, and device is not there get out of MST mode */
5025 if (intel_dp->is_mst) {
5026 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5027 intel_dp->is_mst = false;
5028 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5029 }
5030 put_power:
5031 intel_display_power_put(dev_priv, power_domain);
5032
5033 return ret;
5034 }
5035
5036 /* check the VBT to see whether the eDP is on another port */
5037 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5038 {
5039 struct drm_i915_private *dev_priv = dev->dev_private;
5040 union child_device_config *p_child;
5041 int i;
5042 static const short port_mapping[] = {
5043 [PORT_B] = DVO_PORT_DPB,
5044 [PORT_C] = DVO_PORT_DPC,
5045 [PORT_D] = DVO_PORT_DPD,
5046 [PORT_E] = DVO_PORT_DPE,
5047 };
5048
5049 /*
5050 * eDP not supported on g4x. so bail out early just
5051 * for a bit extra safety in case the VBT is bonkers.
5052 */
5053 if (INTEL_INFO(dev)->gen < 5)
5054 return false;
5055
5056 if (port == PORT_A)
5057 return true;
5058
5059 if (!dev_priv->vbt.child_dev_num)
5060 return false;
5061
5062 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5063 p_child = dev_priv->vbt.child_dev + i;
5064
5065 if (p_child->common.dvo_port == port_mapping[port] &&
5066 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5067 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5068 return true;
5069 }
5070 return false;
5071 }
5072
5073 void
5074 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5075 {
5076 struct intel_connector *intel_connector = to_intel_connector(connector);
5077
5078 intel_attach_force_audio_property(connector);
5079 intel_attach_broadcast_rgb_property(connector);
5080 intel_dp->color_range_auto = true;
5081
5082 if (is_edp(intel_dp)) {
5083 drm_mode_create_scaling_mode_property(connector->dev);
5084 drm_object_attach_property(
5085 &connector->base,
5086 connector->dev->mode_config.scaling_mode_property,
5087 DRM_MODE_SCALE_ASPECT);
5088 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5089 }
5090 }
5091
5092 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5093 {
5094 intel_dp->last_power_cycle = jiffies;
5095 intel_dp->last_power_on = jiffies;
5096 intel_dp->last_backlight_off = jiffies;
5097 }
5098
5099 static void
5100 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5101 struct intel_dp *intel_dp)
5102 {
5103 struct drm_i915_private *dev_priv = dev->dev_private;
5104 struct edp_power_seq cur, vbt, spec,
5105 *final = &intel_dp->pps_delays;
5106 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5107 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5108
5109 lockdep_assert_held(&dev_priv->pps_mutex);
5110
5111 /* already initialized? */
5112 if (final->t11_t12 != 0)
5113 return;
5114
5115 if (IS_BROXTON(dev)) {
5116 /*
5117 * TODO: BXT has 2 sets of PPS registers.
5118 * Correct Register for Broxton need to be identified
5119 * using VBT. hardcoding for now
5120 */
5121 pp_ctrl_reg = BXT_PP_CONTROL(0);
5122 pp_on_reg = BXT_PP_ON_DELAYS(0);
5123 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5124 } else if (HAS_PCH_SPLIT(dev)) {
5125 pp_ctrl_reg = PCH_PP_CONTROL;
5126 pp_on_reg = PCH_PP_ON_DELAYS;
5127 pp_off_reg = PCH_PP_OFF_DELAYS;
5128 pp_div_reg = PCH_PP_DIVISOR;
5129 } else {
5130 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5131
5132 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5133 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5134 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5135 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5136 }
5137
5138 /* Workaround: Need to write PP_CONTROL with the unlock key as
5139 * the very first thing. */
5140 pp_ctl = ironlake_get_pp_control(intel_dp);
5141
5142 pp_on = I915_READ(pp_on_reg);
5143 pp_off = I915_READ(pp_off_reg);
5144 if (!IS_BROXTON(dev)) {
5145 I915_WRITE(pp_ctrl_reg, pp_ctl);
5146 pp_div = I915_READ(pp_div_reg);
5147 }
5148
5149 /* Pull timing values out of registers */
5150 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5151 PANEL_POWER_UP_DELAY_SHIFT;
5152
5153 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5154 PANEL_LIGHT_ON_DELAY_SHIFT;
5155
5156 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5157 PANEL_LIGHT_OFF_DELAY_SHIFT;
5158
5159 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5160 PANEL_POWER_DOWN_DELAY_SHIFT;
5161
5162 if (IS_BROXTON(dev)) {
5163 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5164 BXT_POWER_CYCLE_DELAY_SHIFT;
5165 if (tmp > 0)
5166 cur.t11_t12 = (tmp - 1) * 1000;
5167 else
5168 cur.t11_t12 = 0;
5169 } else {
5170 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5171 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5172 }
5173
5174 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5175 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5176
5177 vbt = dev_priv->vbt.edp_pps;
5178
5179 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5180 * our hw here, which are all in 100usec. */
5181 spec.t1_t3 = 210 * 10;
5182 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5183 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5184 spec.t10 = 500 * 10;
5185 /* This one is special and actually in units of 100ms, but zero
5186 * based in the hw (so we need to add 100 ms). But the sw vbt
5187 * table multiplies it with 1000 to make it in units of 100usec,
5188 * too. */
5189 spec.t11_t12 = (510 + 100) * 10;
5190
5191 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5192 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5193
5194 /* Use the max of the register settings and vbt. If both are
5195 * unset, fall back to the spec limits. */
5196 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5197 spec.field : \
5198 max(cur.field, vbt.field))
5199 assign_final(t1_t3);
5200 assign_final(t8);
5201 assign_final(t9);
5202 assign_final(t10);
5203 assign_final(t11_t12);
5204 #undef assign_final
5205
5206 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5207 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5208 intel_dp->backlight_on_delay = get_delay(t8);
5209 intel_dp->backlight_off_delay = get_delay(t9);
5210 intel_dp->panel_power_down_delay = get_delay(t10);
5211 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5212 #undef get_delay
5213
5214 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5215 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5216 intel_dp->panel_power_cycle_delay);
5217
5218 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5219 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5220 }
5221
5222 static void
5223 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5224 struct intel_dp *intel_dp)
5225 {
5226 struct drm_i915_private *dev_priv = dev->dev_private;
5227 u32 pp_on, pp_off, pp_div, port_sel = 0;
5228 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5229 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5230 enum port port = dp_to_dig_port(intel_dp)->port;
5231 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5232
5233 lockdep_assert_held(&dev_priv->pps_mutex);
5234
5235 if (IS_BROXTON(dev)) {
5236 /*
5237 * TODO: BXT has 2 sets of PPS registers.
5238 * Correct Register for Broxton need to be identified
5239 * using VBT. hardcoding for now
5240 */
5241 pp_ctrl_reg = BXT_PP_CONTROL(0);
5242 pp_on_reg = BXT_PP_ON_DELAYS(0);
5243 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5244
5245 } else if (HAS_PCH_SPLIT(dev)) {
5246 pp_on_reg = PCH_PP_ON_DELAYS;
5247 pp_off_reg = PCH_PP_OFF_DELAYS;
5248 pp_div_reg = PCH_PP_DIVISOR;
5249 } else {
5250 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5251
5252 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5253 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5254 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5255 }
5256
5257 /*
5258 * And finally store the new values in the power sequencer. The
5259 * backlight delays are set to 1 because we do manual waits on them. For
5260 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5261 * we'll end up waiting for the backlight off delay twice: once when we
5262 * do the manual sleep, and once when we disable the panel and wait for
5263 * the PP_STATUS bit to become zero.
5264 */
5265 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5266 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5267 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5268 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5269 /* Compute the divisor for the pp clock, simply match the Bspec
5270 * formula. */
5271 if (IS_BROXTON(dev)) {
5272 pp_div = I915_READ(pp_ctrl_reg);
5273 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5274 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5275 << BXT_POWER_CYCLE_DELAY_SHIFT);
5276 } else {
5277 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5278 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5279 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5280 }
5281
5282 /* Haswell doesn't have any port selection bits for the panel
5283 * power sequencer any more. */
5284 if (IS_VALLEYVIEW(dev)) {
5285 port_sel = PANEL_PORT_SELECT_VLV(port);
5286 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5287 if (port == PORT_A)
5288 port_sel = PANEL_PORT_SELECT_DPA;
5289 else
5290 port_sel = PANEL_PORT_SELECT_DPD;
5291 }
5292
5293 pp_on |= port_sel;
5294
5295 I915_WRITE(pp_on_reg, pp_on);
5296 I915_WRITE(pp_off_reg, pp_off);
5297 if (IS_BROXTON(dev))
5298 I915_WRITE(pp_ctrl_reg, pp_div);
5299 else
5300 I915_WRITE(pp_div_reg, pp_div);
5301
5302 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5303 I915_READ(pp_on_reg),
5304 I915_READ(pp_off_reg),
5305 IS_BROXTON(dev) ?
5306 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5307 I915_READ(pp_div_reg));
5308 }
5309
5310 /**
5311 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5312 * @dev: DRM device
5313 * @refresh_rate: RR to be programmed
5314 *
5315 * This function gets called when refresh rate (RR) has to be changed from
5316 * one frequency to another. Switches can be between high and low RR
5317 * supported by the panel or to any other RR based on media playback (in
5318 * this case, RR value needs to be passed from user space).
5319 *
5320 * The caller of this function needs to take a lock on dev_priv->drrs.
5321 */
5322 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5323 {
5324 struct drm_i915_private *dev_priv = dev->dev_private;
5325 struct intel_encoder *encoder;
5326 struct intel_digital_port *dig_port = NULL;
5327 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5328 struct intel_crtc_state *config = NULL;
5329 struct intel_crtc *intel_crtc = NULL;
5330 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5331
5332 if (refresh_rate <= 0) {
5333 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5334 return;
5335 }
5336
5337 if (intel_dp == NULL) {
5338 DRM_DEBUG_KMS("DRRS not supported.\n");
5339 return;
5340 }
5341
5342 /*
5343 * FIXME: This needs proper synchronization with psr state for some
5344 * platforms that cannot have PSR and DRRS enabled at the same time.
5345 */
5346
5347 dig_port = dp_to_dig_port(intel_dp);
5348 encoder = &dig_port->base;
5349 intel_crtc = to_intel_crtc(encoder->base.crtc);
5350
5351 if (!intel_crtc) {
5352 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5353 return;
5354 }
5355
5356 config = intel_crtc->config;
5357
5358 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5359 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5360 return;
5361 }
5362
5363 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5364 refresh_rate)
5365 index = DRRS_LOW_RR;
5366
5367 if (index == dev_priv->drrs.refresh_rate_type) {
5368 DRM_DEBUG_KMS(
5369 "DRRS requested for previously set RR...ignoring\n");
5370 return;
5371 }
5372
5373 if (!intel_crtc->active) {
5374 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5375 return;
5376 }
5377
5378 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5379 switch (index) {
5380 case DRRS_HIGH_RR:
5381 intel_dp_set_m_n(intel_crtc, M1_N1);
5382 break;
5383 case DRRS_LOW_RR:
5384 intel_dp_set_m_n(intel_crtc, M2_N2);
5385 break;
5386 case DRRS_MAX_RR:
5387 default:
5388 DRM_ERROR("Unsupported refreshrate type\n");
5389 }
5390 } else if (INTEL_INFO(dev)->gen > 6) {
5391 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5392 u32 val;
5393
5394 val = I915_READ(reg);
5395 if (index > DRRS_HIGH_RR) {
5396 if (IS_VALLEYVIEW(dev))
5397 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5398 else
5399 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5400 } else {
5401 if (IS_VALLEYVIEW(dev))
5402 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5403 else
5404 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5405 }
5406 I915_WRITE(reg, val);
5407 }
5408
5409 dev_priv->drrs.refresh_rate_type = index;
5410
5411 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5412 }
5413
5414 /**
5415 * intel_edp_drrs_enable - init drrs struct if supported
5416 * @intel_dp: DP struct
5417 *
5418 * Initializes frontbuffer_bits and drrs.dp
5419 */
5420 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5421 {
5422 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5423 struct drm_i915_private *dev_priv = dev->dev_private;
5424 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5425 struct drm_crtc *crtc = dig_port->base.base.crtc;
5426 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5427
5428 if (!intel_crtc->config->has_drrs) {
5429 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5430 return;
5431 }
5432
5433 mutex_lock(&dev_priv->drrs.mutex);
5434 if (WARN_ON(dev_priv->drrs.dp)) {
5435 DRM_ERROR("DRRS already enabled\n");
5436 goto unlock;
5437 }
5438
5439 dev_priv->drrs.busy_frontbuffer_bits = 0;
5440
5441 dev_priv->drrs.dp = intel_dp;
5442
5443 unlock:
5444 mutex_unlock(&dev_priv->drrs.mutex);
5445 }
5446
5447 /**
5448 * intel_edp_drrs_disable - Disable DRRS
5449 * @intel_dp: DP struct
5450 *
5451 */
5452 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5453 {
5454 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5455 struct drm_i915_private *dev_priv = dev->dev_private;
5456 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5457 struct drm_crtc *crtc = dig_port->base.base.crtc;
5458 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5459
5460 if (!intel_crtc->config->has_drrs)
5461 return;
5462
5463 mutex_lock(&dev_priv->drrs.mutex);
5464 if (!dev_priv->drrs.dp) {
5465 mutex_unlock(&dev_priv->drrs.mutex);
5466 return;
5467 }
5468
5469 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5470 intel_dp_set_drrs_state(dev_priv->dev,
5471 intel_dp->attached_connector->panel.
5472 fixed_mode->vrefresh);
5473
5474 dev_priv->drrs.dp = NULL;
5475 mutex_unlock(&dev_priv->drrs.mutex);
5476
5477 cancel_delayed_work_sync(&dev_priv->drrs.work);
5478 }
5479
5480 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5481 {
5482 struct drm_i915_private *dev_priv =
5483 container_of(work, typeof(*dev_priv), drrs.work.work);
5484 struct intel_dp *intel_dp;
5485
5486 mutex_lock(&dev_priv->drrs.mutex);
5487
5488 intel_dp = dev_priv->drrs.dp;
5489
5490 if (!intel_dp)
5491 goto unlock;
5492
5493 /*
5494 * The delayed work can race with an invalidate hence we need to
5495 * recheck.
5496 */
5497
5498 if (dev_priv->drrs.busy_frontbuffer_bits)
5499 goto unlock;
5500
5501 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5502 intel_dp_set_drrs_state(dev_priv->dev,
5503 intel_dp->attached_connector->panel.
5504 downclock_mode->vrefresh);
5505
5506 unlock:
5507 mutex_unlock(&dev_priv->drrs.mutex);
5508 }
5509
5510 /**
5511 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5512 * @dev: DRM device
5513 * @frontbuffer_bits: frontbuffer plane tracking bits
5514 *
5515 * This function gets called everytime rendering on the given planes start.
5516 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5517 *
5518 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5519 */
5520 void intel_edp_drrs_invalidate(struct drm_device *dev,
5521 unsigned frontbuffer_bits)
5522 {
5523 struct drm_i915_private *dev_priv = dev->dev_private;
5524 struct drm_crtc *crtc;
5525 enum pipe pipe;
5526
5527 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5528 return;
5529
5530 cancel_delayed_work(&dev_priv->drrs.work);
5531
5532 mutex_lock(&dev_priv->drrs.mutex);
5533 if (!dev_priv->drrs.dp) {
5534 mutex_unlock(&dev_priv->drrs.mutex);
5535 return;
5536 }
5537
5538 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5539 pipe = to_intel_crtc(crtc)->pipe;
5540
5541 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5542 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5543
5544 /* invalidate means busy screen hence upclock */
5545 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5546 intel_dp_set_drrs_state(dev_priv->dev,
5547 dev_priv->drrs.dp->attached_connector->panel.
5548 fixed_mode->vrefresh);
5549
5550 mutex_unlock(&dev_priv->drrs.mutex);
5551 }
5552
5553 /**
5554 * intel_edp_drrs_flush - Restart Idleness DRRS
5555 * @dev: DRM device
5556 * @frontbuffer_bits: frontbuffer plane tracking bits
5557 *
5558 * This function gets called every time rendering on the given planes has
5559 * completed or flip on a crtc is completed. So DRRS should be upclocked
5560 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5561 * if no other planes are dirty.
5562 *
5563 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5564 */
5565 void intel_edp_drrs_flush(struct drm_device *dev,
5566 unsigned frontbuffer_bits)
5567 {
5568 struct drm_i915_private *dev_priv = dev->dev_private;
5569 struct drm_crtc *crtc;
5570 enum pipe pipe;
5571
5572 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5573 return;
5574
5575 cancel_delayed_work(&dev_priv->drrs.work);
5576
5577 mutex_lock(&dev_priv->drrs.mutex);
5578 if (!dev_priv->drrs.dp) {
5579 mutex_unlock(&dev_priv->drrs.mutex);
5580 return;
5581 }
5582
5583 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5584 pipe = to_intel_crtc(crtc)->pipe;
5585
5586 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5587 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5588
5589 /* flush means busy screen hence upclock */
5590 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5591 intel_dp_set_drrs_state(dev_priv->dev,
5592 dev_priv->drrs.dp->attached_connector->panel.
5593 fixed_mode->vrefresh);
5594
5595 /*
5596 * flush also means no more activity hence schedule downclock, if all
5597 * other fbs are quiescent too
5598 */
5599 if (!dev_priv->drrs.busy_frontbuffer_bits)
5600 schedule_delayed_work(&dev_priv->drrs.work,
5601 msecs_to_jiffies(1000));
5602 mutex_unlock(&dev_priv->drrs.mutex);
5603 }
5604
5605 /**
5606 * DOC: Display Refresh Rate Switching (DRRS)
5607 *
5608 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5609 * which enables swtching between low and high refresh rates,
5610 * dynamically, based on the usage scenario. This feature is applicable
5611 * for internal panels.
5612 *
5613 * Indication that the panel supports DRRS is given by the panel EDID, which
5614 * would list multiple refresh rates for one resolution.
5615 *
5616 * DRRS is of 2 types - static and seamless.
5617 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5618 * (may appear as a blink on screen) and is used in dock-undock scenario.
5619 * Seamless DRRS involves changing RR without any visual effect to the user
5620 * and can be used during normal system usage. This is done by programming
5621 * certain registers.
5622 *
5623 * Support for static/seamless DRRS may be indicated in the VBT based on
5624 * inputs from the panel spec.
5625 *
5626 * DRRS saves power by switching to low RR based on usage scenarios.
5627 *
5628 * eDP DRRS:-
5629 * The implementation is based on frontbuffer tracking implementation.
5630 * When there is a disturbance on the screen triggered by user activity or a
5631 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5632 * When there is no movement on screen, after a timeout of 1 second, a switch
5633 * to low RR is made.
5634 * For integration with frontbuffer tracking code,
5635 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5636 *
5637 * DRRS can be further extended to support other internal panels and also
5638 * the scenario of video playback wherein RR is set based on the rate
5639 * requested by userspace.
5640 */
5641
5642 /**
5643 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5644 * @intel_connector: eDP connector
5645 * @fixed_mode: preferred mode of panel
5646 *
5647 * This function is called only once at driver load to initialize basic
5648 * DRRS stuff.
5649 *
5650 * Returns:
5651 * Downclock mode if panel supports it, else return NULL.
5652 * DRRS support is determined by the presence of downclock mode (apart
5653 * from VBT setting).
5654 */
5655 static struct drm_display_mode *
5656 intel_dp_drrs_init(struct intel_connector *intel_connector,
5657 struct drm_display_mode *fixed_mode)
5658 {
5659 struct drm_connector *connector = &intel_connector->base;
5660 struct drm_device *dev = connector->dev;
5661 struct drm_i915_private *dev_priv = dev->dev_private;
5662 struct drm_display_mode *downclock_mode = NULL;
5663
5664 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5665 mutex_init(&dev_priv->drrs.mutex);
5666
5667 if (INTEL_INFO(dev)->gen <= 6) {
5668 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5669 return NULL;
5670 }
5671
5672 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5673 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5674 return NULL;
5675 }
5676
5677 downclock_mode = intel_find_panel_downclock
5678 (dev, fixed_mode, connector);
5679
5680 if (!downclock_mode) {
5681 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5682 return NULL;
5683 }
5684
5685 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5686
5687 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5688 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5689 return downclock_mode;
5690 }
5691
5692 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5693 struct intel_connector *intel_connector)
5694 {
5695 struct drm_connector *connector = &intel_connector->base;
5696 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5697 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5698 struct drm_device *dev = intel_encoder->base.dev;
5699 struct drm_i915_private *dev_priv = dev->dev_private;
5700 struct drm_display_mode *fixed_mode = NULL;
5701 struct drm_display_mode *downclock_mode = NULL;
5702 bool has_dpcd;
5703 struct drm_display_mode *scan;
5704 struct edid *edid;
5705 enum pipe pipe = INVALID_PIPE;
5706
5707 if (!is_edp(intel_dp))
5708 return true;
5709
5710 pps_lock(intel_dp);
5711 intel_edp_panel_vdd_sanitize(intel_dp);
5712 pps_unlock(intel_dp);
5713
5714 /* Cache DPCD and EDID for edp. */
5715 has_dpcd = intel_dp_get_dpcd(intel_dp);
5716
5717 if (has_dpcd) {
5718 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5719 dev_priv->no_aux_handshake =
5720 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5721 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5722 } else {
5723 /* if this fails, presume the device is a ghost */
5724 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5725 return false;
5726 }
5727
5728 /* We now know it's not a ghost, init power sequence regs. */
5729 pps_lock(intel_dp);
5730 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5731 pps_unlock(intel_dp);
5732
5733 mutex_lock(&dev->mode_config.mutex);
5734 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5735 if (edid) {
5736 if (drm_add_edid_modes(connector, edid)) {
5737 drm_mode_connector_update_edid_property(connector,
5738 edid);
5739 drm_edid_to_eld(connector, edid);
5740 } else {
5741 kfree(edid);
5742 edid = ERR_PTR(-EINVAL);
5743 }
5744 } else {
5745 edid = ERR_PTR(-ENOENT);
5746 }
5747 intel_connector->edid = edid;
5748
5749 /* prefer fixed mode from EDID if available */
5750 list_for_each_entry(scan, &connector->probed_modes, head) {
5751 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5752 fixed_mode = drm_mode_duplicate(dev, scan);
5753 downclock_mode = intel_dp_drrs_init(
5754 intel_connector, fixed_mode);
5755 break;
5756 }
5757 }
5758
5759 /* fallback to VBT if available for eDP */
5760 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5761 fixed_mode = drm_mode_duplicate(dev,
5762 dev_priv->vbt.lfp_lvds_vbt_mode);
5763 if (fixed_mode)
5764 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5765 }
5766 mutex_unlock(&dev->mode_config.mutex);
5767
5768 if (IS_VALLEYVIEW(dev)) {
5769 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5770 register_reboot_notifier(&intel_dp->edp_notifier);
5771
5772 /*
5773 * Figure out the current pipe for the initial backlight setup.
5774 * If the current pipe isn't valid, try the PPS pipe, and if that
5775 * fails just assume pipe A.
5776 */
5777 if (IS_CHERRYVIEW(dev))
5778 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5779 else
5780 pipe = PORT_TO_PIPE(intel_dp->DP);
5781
5782 if (pipe != PIPE_A && pipe != PIPE_B)
5783 pipe = intel_dp->pps_pipe;
5784
5785 if (pipe != PIPE_A && pipe != PIPE_B)
5786 pipe = PIPE_A;
5787
5788 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5789 pipe_name(pipe));
5790 }
5791
5792 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5793 intel_connector->panel.backlight.power = intel_edp_backlight_power;
5794 intel_panel_setup_backlight(connector, pipe);
5795
5796 return true;
5797 }
5798
5799 bool
5800 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5801 struct intel_connector *intel_connector)
5802 {
5803 struct drm_connector *connector = &intel_connector->base;
5804 struct intel_dp *intel_dp = &intel_dig_port->dp;
5805 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5806 struct drm_device *dev = intel_encoder->base.dev;
5807 struct drm_i915_private *dev_priv = dev->dev_private;
5808 enum port port = intel_dig_port->port;
5809 int type, ret;
5810
5811 intel_dp->pps_pipe = INVALID_PIPE;
5812
5813 /* intel_dp vfuncs */
5814 if (INTEL_INFO(dev)->gen >= 9)
5815 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5816 else if (IS_VALLEYVIEW(dev))
5817 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5818 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5819 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5820 else if (HAS_PCH_SPLIT(dev))
5821 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5822 else
5823 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5824
5825 if (INTEL_INFO(dev)->gen >= 9)
5826 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5827 else
5828 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5829
5830 if (HAS_DDI(dev))
5831 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5832
5833 /* Preserve the current hw state. */
5834 intel_dp->DP = I915_READ(intel_dp->output_reg);
5835 intel_dp->attached_connector = intel_connector;
5836
5837 if (intel_dp_is_edp(dev, port))
5838 type = DRM_MODE_CONNECTOR_eDP;
5839 else
5840 type = DRM_MODE_CONNECTOR_DisplayPort;
5841
5842 /*
5843 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5844 * for DP the encoder type can be set by the caller to
5845 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5846 */
5847 if (type == DRM_MODE_CONNECTOR_eDP)
5848 intel_encoder->type = INTEL_OUTPUT_EDP;
5849
5850 /* eDP only on port B and/or C on vlv/chv */
5851 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5852 port != PORT_B && port != PORT_C))
5853 return false;
5854
5855 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5856 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5857 port_name(port));
5858
5859 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5860 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5861
5862 connector->interlace_allowed = true;
5863 connector->doublescan_allowed = 0;
5864
5865 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5866 edp_panel_vdd_work);
5867
5868 intel_connector_attach_encoder(intel_connector, intel_encoder);
5869 drm_connector_register(connector);
5870
5871 if (HAS_DDI(dev))
5872 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5873 else
5874 intel_connector->get_hw_state = intel_connector_get_hw_state;
5875 intel_connector->unregister = intel_dp_connector_unregister;
5876
5877 /* Set up the hotplug pin. */
5878 switch (port) {
5879 case PORT_A:
5880 intel_encoder->hpd_pin = HPD_PORT_A;
5881 break;
5882 case PORT_B:
5883 intel_encoder->hpd_pin = HPD_PORT_B;
5884 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5885 intel_encoder->hpd_pin = HPD_PORT_A;
5886 break;
5887 case PORT_C:
5888 intel_encoder->hpd_pin = HPD_PORT_C;
5889 break;
5890 case PORT_D:
5891 intel_encoder->hpd_pin = HPD_PORT_D;
5892 break;
5893 case PORT_E:
5894 intel_encoder->hpd_pin = HPD_PORT_E;
5895 break;
5896 default:
5897 BUG();
5898 }
5899
5900 if (is_edp(intel_dp)) {
5901 pps_lock(intel_dp);
5902 intel_dp_init_panel_power_timestamps(intel_dp);
5903 if (IS_VALLEYVIEW(dev))
5904 vlv_initial_power_sequencer_setup(intel_dp);
5905 else
5906 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5907 pps_unlock(intel_dp);
5908 }
5909
5910 ret = intel_dp_aux_init(intel_dp, intel_connector);
5911 if (ret)
5912 goto fail;
5913
5914 /* init MST on ports that can support it */
5915 if (HAS_DP_MST(dev) &&
5916 (port == PORT_B || port == PORT_C || port == PORT_D))
5917 intel_dp_mst_encoder_init(intel_dig_port,
5918 intel_connector->base.base.id);
5919
5920 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5921 intel_dp_aux_fini(intel_dp);
5922 intel_dp_mst_encoder_cleanup(intel_dig_port);
5923 goto fail;
5924 }
5925
5926 intel_dp_add_properties(intel_dp, connector);
5927
5928 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5929 * 0xd. Failure to do so will result in spurious interrupts being
5930 * generated on the port when a cable is not attached.
5931 */
5932 if (IS_G4X(dev) && !IS_GM45(dev)) {
5933 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5934 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5935 }
5936
5937 i915_debugfs_connector_add(connector);
5938
5939 return true;
5940
5941 fail:
5942 if (is_edp(intel_dp)) {
5943 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5944 /*
5945 * vdd might still be enabled do to the delayed vdd off.
5946 * Make sure vdd is actually turned off here.
5947 */
5948 pps_lock(intel_dp);
5949 edp_panel_vdd_off_sync(intel_dp);
5950 pps_unlock(intel_dp);
5951 }
5952 drm_connector_unregister(connector);
5953 drm_connector_cleanup(connector);
5954
5955 return false;
5956 }
5957
5958 void
5959 intel_dp_init(struct drm_device *dev,
5960 i915_reg_t output_reg, enum port port)
5961 {
5962 struct drm_i915_private *dev_priv = dev->dev_private;
5963 struct intel_digital_port *intel_dig_port;
5964 struct intel_encoder *intel_encoder;
5965 struct drm_encoder *encoder;
5966 struct intel_connector *intel_connector;
5967
5968 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5969 if (!intel_dig_port)
5970 return;
5971
5972 intel_connector = intel_connector_alloc();
5973 if (!intel_connector)
5974 goto err_connector_alloc;
5975
5976 intel_encoder = &intel_dig_port->base;
5977 encoder = &intel_encoder->base;
5978
5979 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5980 DRM_MODE_ENCODER_TMDS, NULL);
5981
5982 intel_encoder->compute_config = intel_dp_compute_config;
5983 intel_encoder->disable = intel_disable_dp;
5984 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5985 intel_encoder->get_config = intel_dp_get_config;
5986 intel_encoder->suspend = intel_dp_encoder_suspend;
5987 if (IS_CHERRYVIEW(dev)) {
5988 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5989 intel_encoder->pre_enable = chv_pre_enable_dp;
5990 intel_encoder->enable = vlv_enable_dp;
5991 intel_encoder->post_disable = chv_post_disable_dp;
5992 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5993 } else if (IS_VALLEYVIEW(dev)) {
5994 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5995 intel_encoder->pre_enable = vlv_pre_enable_dp;
5996 intel_encoder->enable = vlv_enable_dp;
5997 intel_encoder->post_disable = vlv_post_disable_dp;
5998 } else {
5999 intel_encoder->pre_enable = g4x_pre_enable_dp;
6000 intel_encoder->enable = g4x_enable_dp;
6001 if (INTEL_INFO(dev)->gen >= 5)
6002 intel_encoder->post_disable = ilk_post_disable_dp;
6003 }
6004
6005 intel_dig_port->port = port;
6006 intel_dig_port->dp.output_reg = output_reg;
6007
6008 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6009 if (IS_CHERRYVIEW(dev)) {
6010 if (port == PORT_D)
6011 intel_encoder->crtc_mask = 1 << 2;
6012 else
6013 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6014 } else {
6015 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6016 }
6017 intel_encoder->cloneable = 0;
6018
6019 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6020 dev_priv->hotplug.irq_port[port] = intel_dig_port;
6021
6022 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6023 goto err_init_connector;
6024
6025 return;
6026
6027 err_init_connector:
6028 drm_encoder_cleanup(encoder);
6029 kfree(intel_connector);
6030 err_connector_alloc:
6031 kfree(intel_dig_port);
6032
6033 return;
6034 }
6035
6036 void intel_dp_mst_suspend(struct drm_device *dev)
6037 {
6038 struct drm_i915_private *dev_priv = dev->dev_private;
6039 int i;
6040
6041 /* disable MST */
6042 for (i = 0; i < I915_MAX_PORTS; i++) {
6043 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6044 if (!intel_dig_port)
6045 continue;
6046
6047 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6048 if (!intel_dig_port->dp.can_mst)
6049 continue;
6050 if (intel_dig_port->dp.is_mst)
6051 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6052 }
6053 }
6054 }
6055
6056 void intel_dp_mst_resume(struct drm_device *dev)
6057 {
6058 struct drm_i915_private *dev_priv = dev->dev_private;
6059 int i;
6060
6061 for (i = 0; i < I915_MAX_PORTS; i++) {
6062 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6063 if (!intel_dig_port)
6064 continue;
6065 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6066 int ret;
6067
6068 if (!intel_dig_port->dp.can_mst)
6069 continue;
6070
6071 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6072 if (ret != 0) {
6073 intel_dp_check_mst_status(&intel_dig_port->dp);
6074 }
6075 }
6076 }
6077 }
This page took 0.165728 seconds and 5 git commands to generate.