Commit | Line | Data |
---|---|---|
9c065a7d DV |
1 | /* |
2 | * Copyright © 2012-2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eugeni Dodonov <eugeni.dodonov@intel.com> | |
25 | * Daniel Vetter <daniel.vetter@ffwll.ch> | |
26 | * | |
27 | */ | |
28 | ||
29 | #include <linux/pm_runtime.h> | |
30 | #include <linux/vgaarb.h> | |
31 | ||
32 | #include "i915_drv.h" | |
33 | #include "intel_drv.h" | |
34 | #include <drm/i915_powerwell.h> | |
35 | ||
36 | static struct i915_power_domains *hsw_pwr; | |
37 | ||
38 | #define for_each_power_well(i, power_well, domain_mask, power_domains) \ | |
39 | for (i = 0; \ | |
40 | i < (power_domains)->power_well_count && \ | |
41 | ((power_well) = &(power_domains)->power_wells[i]); \ | |
42 | i++) \ | |
43 | if ((power_well)->domains & (domain_mask)) | |
44 | ||
45 | #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ | |
46 | for (i = (power_domains)->power_well_count - 1; \ | |
47 | i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ | |
48 | i--) \ | |
49 | if ((power_well)->domains & (domain_mask)) | |
50 | ||
51 | /** | |
52 | * We should only use the power well if we explicitly asked the hardware to | |
53 | * enable it, so check if it's enabled and also check if we've requested it to | |
54 | * be enabled. | |
55 | */ | |
56 | static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, | |
57 | struct i915_power_well *power_well) | |
58 | { | |
59 | return I915_READ(HSW_PWR_WELL_DRIVER) == | |
60 | (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); | |
61 | } | |
62 | ||
63 | bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv, | |
64 | enum intel_display_power_domain domain) | |
65 | { | |
66 | struct i915_power_domains *power_domains; | |
67 | struct i915_power_well *power_well; | |
68 | bool is_enabled; | |
69 | int i; | |
70 | ||
71 | if (dev_priv->pm.suspended) | |
72 | return false; | |
73 | ||
74 | power_domains = &dev_priv->power_domains; | |
75 | ||
76 | is_enabled = true; | |
77 | ||
78 | for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { | |
79 | if (power_well->always_on) | |
80 | continue; | |
81 | ||
82 | if (!power_well->hw_enabled) { | |
83 | is_enabled = false; | |
84 | break; | |
85 | } | |
86 | } | |
87 | ||
88 | return is_enabled; | |
89 | } | |
90 | ||
91 | bool intel_display_power_enabled(struct drm_i915_private *dev_priv, | |
92 | enum intel_display_power_domain domain) | |
93 | { | |
94 | struct i915_power_domains *power_domains; | |
95 | bool ret; | |
96 | ||
97 | power_domains = &dev_priv->power_domains; | |
98 | ||
99 | mutex_lock(&power_domains->lock); | |
100 | ret = intel_display_power_enabled_unlocked(dev_priv, domain); | |
101 | mutex_unlock(&power_domains->lock); | |
102 | ||
103 | return ret; | |
104 | } | |
105 | ||
106 | /* | |
107 | * Starting with Haswell, we have a "Power Down Well" that can be turned off | |
108 | * when not needed anymore. We have 4 registers that can request the power well | |
109 | * to be enabled, and it will only be disabled if none of the registers is | |
110 | * requesting it to be enabled. | |
111 | */ | |
112 | static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) | |
113 | { | |
114 | struct drm_device *dev = dev_priv->dev; | |
115 | ||
116 | /* | |
117 | * After we re-enable the power well, if we touch VGA register 0x3d5 | |
118 | * we'll get unclaimed register interrupts. This stops after we write | |
119 | * anything to the VGA MSR register. The vgacon module uses this | |
120 | * register all the time, so if we unbind our driver and, as a | |
121 | * consequence, bind vgacon, we'll get stuck in an infinite loop at | |
122 | * console_unlock(). So make here we touch the VGA MSR register, making | |
123 | * sure vgacon can keep working normally without triggering interrupts | |
124 | * and error messages. | |
125 | */ | |
126 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); | |
127 | outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); | |
128 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); | |
129 | ||
130 | if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) | |
131 | gen8_irq_power_well_post_enable(dev_priv); | |
132 | } | |
133 | ||
134 | static void hsw_set_power_well(struct drm_i915_private *dev_priv, | |
135 | struct i915_power_well *power_well, bool enable) | |
136 | { | |
137 | bool is_enabled, enable_requested; | |
138 | uint32_t tmp; | |
139 | ||
140 | tmp = I915_READ(HSW_PWR_WELL_DRIVER); | |
141 | is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; | |
142 | enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; | |
143 | ||
144 | if (enable) { | |
145 | if (!enable_requested) | |
146 | I915_WRITE(HSW_PWR_WELL_DRIVER, | |
147 | HSW_PWR_WELL_ENABLE_REQUEST); | |
148 | ||
149 | if (!is_enabled) { | |
150 | DRM_DEBUG_KMS("Enabling power well\n"); | |
151 | if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & | |
152 | HSW_PWR_WELL_STATE_ENABLED), 20)) | |
153 | DRM_ERROR("Timeout enabling power well\n"); | |
154 | } | |
155 | ||
156 | hsw_power_well_post_enable(dev_priv); | |
157 | } else { | |
158 | if (enable_requested) { | |
159 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); | |
160 | POSTING_READ(HSW_PWR_WELL_DRIVER); | |
161 | DRM_DEBUG_KMS("Requesting to disable the power well\n"); | |
162 | } | |
163 | } | |
164 | } | |
165 | ||
166 | static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, | |
167 | struct i915_power_well *power_well) | |
168 | { | |
169 | hsw_set_power_well(dev_priv, power_well, power_well->count > 0); | |
170 | ||
171 | /* | |
172 | * We're taking over the BIOS, so clear any requests made by it since | |
173 | * the driver is in charge now. | |
174 | */ | |
175 | if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) | |
176 | I915_WRITE(HSW_PWR_WELL_BIOS, 0); | |
177 | } | |
178 | ||
179 | static void hsw_power_well_enable(struct drm_i915_private *dev_priv, | |
180 | struct i915_power_well *power_well) | |
181 | { | |
182 | hsw_set_power_well(dev_priv, power_well, true); | |
183 | } | |
184 | ||
185 | static void hsw_power_well_disable(struct drm_i915_private *dev_priv, | |
186 | struct i915_power_well *power_well) | |
187 | { | |
188 | hsw_set_power_well(dev_priv, power_well, false); | |
189 | } | |
190 | ||
191 | static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, | |
192 | struct i915_power_well *power_well) | |
193 | { | |
194 | } | |
195 | ||
196 | static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, | |
197 | struct i915_power_well *power_well) | |
198 | { | |
199 | return true; | |
200 | } | |
201 | ||
202 | static void vlv_set_power_well(struct drm_i915_private *dev_priv, | |
203 | struct i915_power_well *power_well, bool enable) | |
204 | { | |
205 | enum punit_power_well power_well_id = power_well->data; | |
206 | u32 mask; | |
207 | u32 state; | |
208 | u32 ctrl; | |
209 | ||
210 | mask = PUNIT_PWRGT_MASK(power_well_id); | |
211 | state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : | |
212 | PUNIT_PWRGT_PWR_GATE(power_well_id); | |
213 | ||
214 | mutex_lock(&dev_priv->rps.hw_lock); | |
215 | ||
216 | #define COND \ | |
217 | ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) | |
218 | ||
219 | if (COND) | |
220 | goto out; | |
221 | ||
222 | ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); | |
223 | ctrl &= ~mask; | |
224 | ctrl |= state; | |
225 | vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); | |
226 | ||
227 | if (wait_for(COND, 100)) | |
228 | DRM_ERROR("timout setting power well state %08x (%08x)\n", | |
229 | state, | |
230 | vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); | |
231 | ||
232 | #undef COND | |
233 | ||
234 | out: | |
235 | mutex_unlock(&dev_priv->rps.hw_lock); | |
236 | } | |
237 | ||
238 | static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, | |
239 | struct i915_power_well *power_well) | |
240 | { | |
241 | vlv_set_power_well(dev_priv, power_well, power_well->count > 0); | |
242 | } | |
243 | ||
244 | static void vlv_power_well_enable(struct drm_i915_private *dev_priv, | |
245 | struct i915_power_well *power_well) | |
246 | { | |
247 | vlv_set_power_well(dev_priv, power_well, true); | |
248 | } | |
249 | ||
250 | static void vlv_power_well_disable(struct drm_i915_private *dev_priv, | |
251 | struct i915_power_well *power_well) | |
252 | { | |
253 | vlv_set_power_well(dev_priv, power_well, false); | |
254 | } | |
255 | ||
256 | static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, | |
257 | struct i915_power_well *power_well) | |
258 | { | |
259 | int power_well_id = power_well->data; | |
260 | bool enabled = false; | |
261 | u32 mask; | |
262 | u32 state; | |
263 | u32 ctrl; | |
264 | ||
265 | mask = PUNIT_PWRGT_MASK(power_well_id); | |
266 | ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); | |
267 | ||
268 | mutex_lock(&dev_priv->rps.hw_lock); | |
269 | ||
270 | state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; | |
271 | /* | |
272 | * We only ever set the power-on and power-gate states, anything | |
273 | * else is unexpected. | |
274 | */ | |
275 | WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && | |
276 | state != PUNIT_PWRGT_PWR_GATE(power_well_id)); | |
277 | if (state == ctrl) | |
278 | enabled = true; | |
279 | ||
280 | /* | |
281 | * A transient state at this point would mean some unexpected party | |
282 | * is poking at the power controls too. | |
283 | */ | |
284 | ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; | |
285 | WARN_ON(ctrl != state); | |
286 | ||
287 | mutex_unlock(&dev_priv->rps.hw_lock); | |
288 | ||
289 | return enabled; | |
290 | } | |
291 | ||
292 | static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, | |
293 | struct i915_power_well *power_well) | |
294 | { | |
295 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); | |
296 | ||
297 | vlv_set_power_well(dev_priv, power_well, true); | |
298 | ||
299 | spin_lock_irq(&dev_priv->irq_lock); | |
300 | valleyview_enable_display_irqs(dev_priv); | |
301 | spin_unlock_irq(&dev_priv->irq_lock); | |
302 | ||
303 | /* | |
304 | * During driver initialization/resume we can avoid restoring the | |
305 | * part of the HW/SW state that will be inited anyway explicitly. | |
306 | */ | |
307 | if (dev_priv->power_domains.initializing) | |
308 | return; | |
309 | ||
310 | intel_hpd_init(dev_priv->dev); | |
311 | ||
312 | i915_redisable_vga_power_on(dev_priv->dev); | |
313 | } | |
314 | ||
315 | static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, | |
316 | struct i915_power_well *power_well) | |
317 | { | |
318 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); | |
319 | ||
320 | spin_lock_irq(&dev_priv->irq_lock); | |
321 | valleyview_disable_display_irqs(dev_priv); | |
322 | spin_unlock_irq(&dev_priv->irq_lock); | |
323 | ||
324 | vlv_set_power_well(dev_priv, power_well, false); | |
325 | ||
326 | vlv_power_sequencer_reset(dev_priv); | |
327 | } | |
328 | ||
329 | static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, | |
330 | struct i915_power_well *power_well) | |
331 | { | |
332 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); | |
333 | ||
334 | /* | |
335 | * Enable the CRI clock source so we can get at the | |
336 | * display and the reference clock for VGA | |
337 | * hotplug / manual detection. | |
338 | */ | |
339 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | | |
340 | DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); | |
341 | udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ | |
342 | ||
343 | vlv_set_power_well(dev_priv, power_well, true); | |
344 | ||
345 | /* | |
346 | * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - | |
347 | * 6. De-assert cmn_reset/side_reset. Same as VLV X0. | |
348 | * a. GUnit 0x2110 bit[0] set to 1 (def 0) | |
349 | * b. The other bits such as sfr settings / modesel may all | |
350 | * be set to 0. | |
351 | * | |
352 | * This should only be done on init and resume from S3 with | |
353 | * both PLLs disabled, or we risk losing DPIO and PLL | |
354 | * synchronization. | |
355 | */ | |
356 | I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); | |
357 | } | |
358 | ||
359 | static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, | |
360 | struct i915_power_well *power_well) | |
361 | { | |
362 | enum pipe pipe; | |
363 | ||
364 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); | |
365 | ||
366 | for_each_pipe(dev_priv, pipe) | |
367 | assert_pll_disabled(dev_priv, pipe); | |
368 | ||
369 | /* Assert common reset */ | |
370 | I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); | |
371 | ||
372 | vlv_set_power_well(dev_priv, power_well, false); | |
373 | } | |
374 | ||
375 | static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, | |
376 | struct i915_power_well *power_well) | |
377 | { | |
378 | enum dpio_phy phy; | |
379 | ||
380 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && | |
381 | power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); | |
382 | ||
383 | /* | |
384 | * Enable the CRI clock source so we can get at the | |
385 | * display and the reference clock for VGA | |
386 | * hotplug / manual detection. | |
387 | */ | |
388 | if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { | |
389 | phy = DPIO_PHY0; | |
390 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | | |
391 | DPLL_REFA_CLK_ENABLE_VLV); | |
392 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | | |
393 | DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); | |
394 | } else { | |
395 | phy = DPIO_PHY1; | |
396 | I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | | |
397 | DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); | |
398 | } | |
399 | udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ | |
400 | vlv_set_power_well(dev_priv, power_well, true); | |
401 | ||
402 | /* Poll for phypwrgood signal */ | |
403 | if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) | |
404 | DRM_ERROR("Display PHY %d is not power up\n", phy); | |
405 | ||
406 | I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) | | |
407 | PHY_COM_LANE_RESET_DEASSERT(phy)); | |
408 | } | |
409 | ||
410 | static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, | |
411 | struct i915_power_well *power_well) | |
412 | { | |
413 | enum dpio_phy phy; | |
414 | ||
415 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && | |
416 | power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); | |
417 | ||
418 | if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { | |
419 | phy = DPIO_PHY0; | |
420 | assert_pll_disabled(dev_priv, PIPE_A); | |
421 | assert_pll_disabled(dev_priv, PIPE_B); | |
422 | } else { | |
423 | phy = DPIO_PHY1; | |
424 | assert_pll_disabled(dev_priv, PIPE_C); | |
425 | } | |
426 | ||
427 | I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) & | |
428 | ~PHY_COM_LANE_RESET_DEASSERT(phy)); | |
429 | ||
430 | vlv_set_power_well(dev_priv, power_well, false); | |
431 | } | |
432 | ||
433 | static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, | |
434 | struct i915_power_well *power_well) | |
435 | { | |
436 | enum pipe pipe = power_well->data; | |
437 | bool enabled; | |
438 | u32 state, ctrl; | |
439 | ||
440 | mutex_lock(&dev_priv->rps.hw_lock); | |
441 | ||
442 | state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); | |
443 | /* | |
444 | * We only ever set the power-on and power-gate states, anything | |
445 | * else is unexpected. | |
446 | */ | |
447 | WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); | |
448 | enabled = state == DP_SSS_PWR_ON(pipe); | |
449 | ||
450 | /* | |
451 | * A transient state at this point would mean some unexpected party | |
452 | * is poking at the power controls too. | |
453 | */ | |
454 | ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); | |
455 | WARN_ON(ctrl << 16 != state); | |
456 | ||
457 | mutex_unlock(&dev_priv->rps.hw_lock); | |
458 | ||
459 | return enabled; | |
460 | } | |
461 | ||
462 | static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, | |
463 | struct i915_power_well *power_well, | |
464 | bool enable) | |
465 | { | |
466 | enum pipe pipe = power_well->data; | |
467 | u32 state; | |
468 | u32 ctrl; | |
469 | ||
470 | state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); | |
471 | ||
472 | mutex_lock(&dev_priv->rps.hw_lock); | |
473 | ||
474 | #define COND \ | |
475 | ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) | |
476 | ||
477 | if (COND) | |
478 | goto out; | |
479 | ||
480 | ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); | |
481 | ctrl &= ~DP_SSC_MASK(pipe); | |
482 | ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); | |
483 | vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); | |
484 | ||
485 | if (wait_for(COND, 100)) | |
486 | DRM_ERROR("timout setting power well state %08x (%08x)\n", | |
487 | state, | |
488 | vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); | |
489 | ||
490 | #undef COND | |
491 | ||
492 | out: | |
493 | mutex_unlock(&dev_priv->rps.hw_lock); | |
494 | } | |
495 | ||
496 | static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, | |
497 | struct i915_power_well *power_well) | |
498 | { | |
499 | chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); | |
500 | } | |
501 | ||
502 | static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, | |
503 | struct i915_power_well *power_well) | |
504 | { | |
505 | WARN_ON_ONCE(power_well->data != PIPE_A && | |
506 | power_well->data != PIPE_B && | |
507 | power_well->data != PIPE_C); | |
508 | ||
509 | chv_set_pipe_power_well(dev_priv, power_well, true); | |
510 | } | |
511 | ||
512 | static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, | |
513 | struct i915_power_well *power_well) | |
514 | { | |
515 | WARN_ON_ONCE(power_well->data != PIPE_A && | |
516 | power_well->data != PIPE_B && | |
517 | power_well->data != PIPE_C); | |
518 | ||
519 | chv_set_pipe_power_well(dev_priv, power_well, false); | |
520 | } | |
521 | ||
522 | static void check_power_well_state(struct drm_i915_private *dev_priv, | |
523 | struct i915_power_well *power_well) | |
524 | { | |
525 | bool enabled = power_well->ops->is_enabled(dev_priv, power_well); | |
526 | ||
527 | if (power_well->always_on || !i915.disable_power_well) { | |
528 | if (!enabled) | |
529 | goto mismatch; | |
530 | ||
531 | return; | |
532 | } | |
533 | ||
534 | if (enabled != (power_well->count > 0)) | |
535 | goto mismatch; | |
536 | ||
537 | return; | |
538 | ||
539 | mismatch: | |
540 | WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n", | |
541 | power_well->name, power_well->always_on, enabled, | |
542 | power_well->count, i915.disable_power_well); | |
543 | } | |
544 | ||
545 | void intel_display_power_get(struct drm_i915_private *dev_priv, | |
546 | enum intel_display_power_domain domain) | |
547 | { | |
548 | struct i915_power_domains *power_domains; | |
549 | struct i915_power_well *power_well; | |
550 | int i; | |
551 | ||
552 | intel_runtime_pm_get(dev_priv); | |
553 | ||
554 | power_domains = &dev_priv->power_domains; | |
555 | ||
556 | mutex_lock(&power_domains->lock); | |
557 | ||
558 | for_each_power_well(i, power_well, BIT(domain), power_domains) { | |
559 | if (!power_well->count++) { | |
560 | DRM_DEBUG_KMS("enabling %s\n", power_well->name); | |
561 | power_well->ops->enable(dev_priv, power_well); | |
562 | power_well->hw_enabled = true; | |
563 | } | |
564 | ||
565 | check_power_well_state(dev_priv, power_well); | |
566 | } | |
567 | ||
568 | power_domains->domain_use_count[domain]++; | |
569 | ||
570 | mutex_unlock(&power_domains->lock); | |
571 | } | |
572 | ||
573 | void intel_display_power_put(struct drm_i915_private *dev_priv, | |
574 | enum intel_display_power_domain domain) | |
575 | { | |
576 | struct i915_power_domains *power_domains; | |
577 | struct i915_power_well *power_well; | |
578 | int i; | |
579 | ||
580 | power_domains = &dev_priv->power_domains; | |
581 | ||
582 | mutex_lock(&power_domains->lock); | |
583 | ||
584 | WARN_ON(!power_domains->domain_use_count[domain]); | |
585 | power_domains->domain_use_count[domain]--; | |
586 | ||
587 | for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { | |
588 | WARN_ON(!power_well->count); | |
589 | ||
590 | if (!--power_well->count && i915.disable_power_well) { | |
591 | DRM_DEBUG_KMS("disabling %s\n", power_well->name); | |
592 | power_well->hw_enabled = false; | |
593 | power_well->ops->disable(dev_priv, power_well); | |
594 | } | |
595 | ||
596 | check_power_well_state(dev_priv, power_well); | |
597 | } | |
598 | ||
599 | mutex_unlock(&power_domains->lock); | |
600 | ||
601 | intel_runtime_pm_put(dev_priv); | |
602 | } | |
603 | ||
604 | #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) | |
605 | ||
606 | #define HSW_ALWAYS_ON_POWER_DOMAINS ( \ | |
607 | BIT(POWER_DOMAIN_PIPE_A) | \ | |
608 | BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ | |
609 | BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ | |
610 | BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ | |
611 | BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ | |
612 | BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ | |
613 | BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ | |
614 | BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ | |
615 | BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ | |
616 | BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ | |
617 | BIT(POWER_DOMAIN_PORT_CRT) | \ | |
618 | BIT(POWER_DOMAIN_PLLS) | \ | |
619 | BIT(POWER_DOMAIN_INIT)) | |
620 | #define HSW_DISPLAY_POWER_DOMAINS ( \ | |
621 | (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ | |
622 | BIT(POWER_DOMAIN_INIT)) | |
623 | ||
624 | #define BDW_ALWAYS_ON_POWER_DOMAINS ( \ | |
625 | HSW_ALWAYS_ON_POWER_DOMAINS | \ | |
626 | BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | |
627 | #define BDW_DISPLAY_POWER_DOMAINS ( \ | |
628 | (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \ | |
629 | BIT(POWER_DOMAIN_INIT)) | |
630 | ||
631 | #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT) | |
632 | #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK | |
633 | ||
634 | #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ | |
635 | BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ | |
636 | BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ | |
637 | BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ | |
638 | BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ | |
639 | BIT(POWER_DOMAIN_PORT_CRT) | \ | |
640 | BIT(POWER_DOMAIN_INIT)) | |
641 | ||
642 | #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ | |
643 | BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ | |
644 | BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ | |
645 | BIT(POWER_DOMAIN_INIT)) | |
646 | ||
647 | #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ | |
648 | BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ | |
649 | BIT(POWER_DOMAIN_INIT)) | |
650 | ||
651 | #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ | |
652 | BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ | |
653 | BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ | |
654 | BIT(POWER_DOMAIN_INIT)) | |
655 | ||
656 | #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ | |
657 | BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ | |
658 | BIT(POWER_DOMAIN_INIT)) | |
659 | ||
660 | #define CHV_PIPE_A_POWER_DOMAINS ( \ | |
661 | BIT(POWER_DOMAIN_PIPE_A) | \ | |
662 | BIT(POWER_DOMAIN_INIT)) | |
663 | ||
664 | #define CHV_PIPE_B_POWER_DOMAINS ( \ | |
665 | BIT(POWER_DOMAIN_PIPE_B) | \ | |
666 | BIT(POWER_DOMAIN_INIT)) | |
667 | ||
668 | #define CHV_PIPE_C_POWER_DOMAINS ( \ | |
669 | BIT(POWER_DOMAIN_PIPE_C) | \ | |
670 | BIT(POWER_DOMAIN_INIT)) | |
671 | ||
672 | #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ | |
673 | BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ | |
674 | BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ | |
675 | BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ | |
676 | BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ | |
677 | BIT(POWER_DOMAIN_INIT)) | |
678 | ||
679 | #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ | |
680 | BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ | |
681 | BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ | |
682 | BIT(POWER_DOMAIN_INIT)) | |
683 | ||
684 | #define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \ | |
685 | BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ | |
686 | BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ | |
687 | BIT(POWER_DOMAIN_INIT)) | |
688 | ||
689 | #define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \ | |
690 | BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ | |
691 | BIT(POWER_DOMAIN_INIT)) | |
692 | ||
693 | static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { | |
694 | .sync_hw = i9xx_always_on_power_well_noop, | |
695 | .enable = i9xx_always_on_power_well_noop, | |
696 | .disable = i9xx_always_on_power_well_noop, | |
697 | .is_enabled = i9xx_always_on_power_well_enabled, | |
698 | }; | |
699 | ||
700 | static const struct i915_power_well_ops chv_pipe_power_well_ops = { | |
701 | .sync_hw = chv_pipe_power_well_sync_hw, | |
702 | .enable = chv_pipe_power_well_enable, | |
703 | .disable = chv_pipe_power_well_disable, | |
704 | .is_enabled = chv_pipe_power_well_enabled, | |
705 | }; | |
706 | ||
707 | static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { | |
708 | .sync_hw = vlv_power_well_sync_hw, | |
709 | .enable = chv_dpio_cmn_power_well_enable, | |
710 | .disable = chv_dpio_cmn_power_well_disable, | |
711 | .is_enabled = vlv_power_well_enabled, | |
712 | }; | |
713 | ||
714 | static struct i915_power_well i9xx_always_on_power_well[] = { | |
715 | { | |
716 | .name = "always-on", | |
717 | .always_on = 1, | |
718 | .domains = POWER_DOMAIN_MASK, | |
719 | .ops = &i9xx_always_on_power_well_ops, | |
720 | }, | |
721 | }; | |
722 | ||
723 | static const struct i915_power_well_ops hsw_power_well_ops = { | |
724 | .sync_hw = hsw_power_well_sync_hw, | |
725 | .enable = hsw_power_well_enable, | |
726 | .disable = hsw_power_well_disable, | |
727 | .is_enabled = hsw_power_well_enabled, | |
728 | }; | |
729 | ||
730 | static struct i915_power_well hsw_power_wells[] = { | |
731 | { | |
732 | .name = "always-on", | |
733 | .always_on = 1, | |
734 | .domains = HSW_ALWAYS_ON_POWER_DOMAINS, | |
735 | .ops = &i9xx_always_on_power_well_ops, | |
736 | }, | |
737 | { | |
738 | .name = "display", | |
739 | .domains = HSW_DISPLAY_POWER_DOMAINS, | |
740 | .ops = &hsw_power_well_ops, | |
741 | }, | |
742 | }; | |
743 | ||
744 | static struct i915_power_well bdw_power_wells[] = { | |
745 | { | |
746 | .name = "always-on", | |
747 | .always_on = 1, | |
748 | .domains = BDW_ALWAYS_ON_POWER_DOMAINS, | |
749 | .ops = &i9xx_always_on_power_well_ops, | |
750 | }, | |
751 | { | |
752 | .name = "display", | |
753 | .domains = BDW_DISPLAY_POWER_DOMAINS, | |
754 | .ops = &hsw_power_well_ops, | |
755 | }, | |
756 | }; | |
757 | ||
758 | static const struct i915_power_well_ops vlv_display_power_well_ops = { | |
759 | .sync_hw = vlv_power_well_sync_hw, | |
760 | .enable = vlv_display_power_well_enable, | |
761 | .disable = vlv_display_power_well_disable, | |
762 | .is_enabled = vlv_power_well_enabled, | |
763 | }; | |
764 | ||
765 | static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { | |
766 | .sync_hw = vlv_power_well_sync_hw, | |
767 | .enable = vlv_dpio_cmn_power_well_enable, | |
768 | .disable = vlv_dpio_cmn_power_well_disable, | |
769 | .is_enabled = vlv_power_well_enabled, | |
770 | }; | |
771 | ||
772 | static const struct i915_power_well_ops vlv_dpio_power_well_ops = { | |
773 | .sync_hw = vlv_power_well_sync_hw, | |
774 | .enable = vlv_power_well_enable, | |
775 | .disable = vlv_power_well_disable, | |
776 | .is_enabled = vlv_power_well_enabled, | |
777 | }; | |
778 | ||
779 | static struct i915_power_well vlv_power_wells[] = { | |
780 | { | |
781 | .name = "always-on", | |
782 | .always_on = 1, | |
783 | .domains = VLV_ALWAYS_ON_POWER_DOMAINS, | |
784 | .ops = &i9xx_always_on_power_well_ops, | |
785 | }, | |
786 | { | |
787 | .name = "display", | |
788 | .domains = VLV_DISPLAY_POWER_DOMAINS, | |
789 | .data = PUNIT_POWER_WELL_DISP2D, | |
790 | .ops = &vlv_display_power_well_ops, | |
791 | }, | |
792 | { | |
793 | .name = "dpio-tx-b-01", | |
794 | .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | | |
795 | VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | | |
796 | VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | | |
797 | VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, | |
798 | .ops = &vlv_dpio_power_well_ops, | |
799 | .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, | |
800 | }, | |
801 | { | |
802 | .name = "dpio-tx-b-23", | |
803 | .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | | |
804 | VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | | |
805 | VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | | |
806 | VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, | |
807 | .ops = &vlv_dpio_power_well_ops, | |
808 | .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, | |
809 | }, | |
810 | { | |
811 | .name = "dpio-tx-c-01", | |
812 | .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | | |
813 | VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | | |
814 | VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | | |
815 | VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, | |
816 | .ops = &vlv_dpio_power_well_ops, | |
817 | .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, | |
818 | }, | |
819 | { | |
820 | .name = "dpio-tx-c-23", | |
821 | .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | | |
822 | VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | | |
823 | VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | | |
824 | VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, | |
825 | .ops = &vlv_dpio_power_well_ops, | |
826 | .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, | |
827 | }, | |
828 | { | |
829 | .name = "dpio-common", | |
830 | .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, | |
831 | .data = PUNIT_POWER_WELL_DPIO_CMN_BC, | |
832 | .ops = &vlv_dpio_cmn_power_well_ops, | |
833 | }, | |
834 | }; | |
835 | ||
836 | static struct i915_power_well chv_power_wells[] = { | |
837 | { | |
838 | .name = "always-on", | |
839 | .always_on = 1, | |
840 | .domains = VLV_ALWAYS_ON_POWER_DOMAINS, | |
841 | .ops = &i9xx_always_on_power_well_ops, | |
842 | }, | |
843 | #if 0 | |
844 | { | |
845 | .name = "display", | |
846 | .domains = VLV_DISPLAY_POWER_DOMAINS, | |
847 | .data = PUNIT_POWER_WELL_DISP2D, | |
848 | .ops = &vlv_display_power_well_ops, | |
849 | }, | |
850 | { | |
851 | .name = "pipe-a", | |
852 | .domains = CHV_PIPE_A_POWER_DOMAINS, | |
853 | .data = PIPE_A, | |
854 | .ops = &chv_pipe_power_well_ops, | |
855 | }, | |
856 | { | |
857 | .name = "pipe-b", | |
858 | .domains = CHV_PIPE_B_POWER_DOMAINS, | |
859 | .data = PIPE_B, | |
860 | .ops = &chv_pipe_power_well_ops, | |
861 | }, | |
862 | { | |
863 | .name = "pipe-c", | |
864 | .domains = CHV_PIPE_C_POWER_DOMAINS, | |
865 | .data = PIPE_C, | |
866 | .ops = &chv_pipe_power_well_ops, | |
867 | }, | |
868 | #endif | |
869 | { | |
870 | .name = "dpio-common-bc", | |
871 | /* | |
872 | * XXX: cmnreset for one PHY seems to disturb the other. | |
873 | * As a workaround keep both powered on at the same | |
874 | * time for now. | |
875 | */ | |
876 | .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS, | |
877 | .data = PUNIT_POWER_WELL_DPIO_CMN_BC, | |
878 | .ops = &chv_dpio_cmn_power_well_ops, | |
879 | }, | |
880 | { | |
881 | .name = "dpio-common-d", | |
882 | /* | |
883 | * XXX: cmnreset for one PHY seems to disturb the other. | |
884 | * As a workaround keep both powered on at the same | |
885 | * time for now. | |
886 | */ | |
887 | .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS, | |
888 | .data = PUNIT_POWER_WELL_DPIO_CMN_D, | |
889 | .ops = &chv_dpio_cmn_power_well_ops, | |
890 | }, | |
891 | #if 0 | |
892 | { | |
893 | .name = "dpio-tx-b-01", | |
894 | .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | | |
895 | VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS, | |
896 | .ops = &vlv_dpio_power_well_ops, | |
897 | .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, | |
898 | }, | |
899 | { | |
900 | .name = "dpio-tx-b-23", | |
901 | .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | | |
902 | VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS, | |
903 | .ops = &vlv_dpio_power_well_ops, | |
904 | .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, | |
905 | }, | |
906 | { | |
907 | .name = "dpio-tx-c-01", | |
908 | .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | | |
909 | VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, | |
910 | .ops = &vlv_dpio_power_well_ops, | |
911 | .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, | |
912 | }, | |
913 | { | |
914 | .name = "dpio-tx-c-23", | |
915 | .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | | |
916 | VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, | |
917 | .ops = &vlv_dpio_power_well_ops, | |
918 | .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, | |
919 | }, | |
920 | { | |
921 | .name = "dpio-tx-d-01", | |
922 | .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS | | |
923 | CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS, | |
924 | .ops = &vlv_dpio_power_well_ops, | |
925 | .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01, | |
926 | }, | |
927 | { | |
928 | .name = "dpio-tx-d-23", | |
929 | .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS | | |
930 | CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS, | |
931 | .ops = &vlv_dpio_power_well_ops, | |
932 | .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23, | |
933 | }, | |
934 | #endif | |
935 | }; | |
936 | ||
937 | static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, | |
938 | enum punit_power_well power_well_id) | |
939 | { | |
940 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | |
941 | struct i915_power_well *power_well; | |
942 | int i; | |
943 | ||
944 | for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { | |
945 | if (power_well->data == power_well_id) | |
946 | return power_well; | |
947 | } | |
948 | ||
949 | return NULL; | |
950 | } | |
951 | ||
952 | #define set_power_wells(power_domains, __power_wells) ({ \ | |
953 | (power_domains)->power_wells = (__power_wells); \ | |
954 | (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ | |
955 | }) | |
956 | ||
957 | int intel_power_domains_init(struct drm_i915_private *dev_priv) | |
958 | { | |
959 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | |
960 | ||
961 | mutex_init(&power_domains->lock); | |
962 | ||
963 | /* | |
964 | * The enabling order will be from lower to higher indexed wells, | |
965 | * the disabling order is reversed. | |
966 | */ | |
967 | if (IS_HASWELL(dev_priv->dev)) { | |
968 | set_power_wells(power_domains, hsw_power_wells); | |
969 | hsw_pwr = power_domains; | |
970 | } else if (IS_BROADWELL(dev_priv->dev)) { | |
971 | set_power_wells(power_domains, bdw_power_wells); | |
972 | hsw_pwr = power_domains; | |
973 | } else if (IS_CHERRYVIEW(dev_priv->dev)) { | |
974 | set_power_wells(power_domains, chv_power_wells); | |
975 | } else if (IS_VALLEYVIEW(dev_priv->dev)) { | |
976 | set_power_wells(power_domains, vlv_power_wells); | |
977 | } else { | |
978 | set_power_wells(power_domains, i9xx_always_on_power_well); | |
979 | } | |
980 | ||
981 | return 0; | |
982 | } | |
983 | ||
984 | void intel_power_domains_remove(struct drm_i915_private *dev_priv) | |
985 | { | |
986 | hsw_pwr = NULL; | |
987 | } | |
988 | ||
989 | static void intel_power_domains_resume(struct drm_i915_private *dev_priv) | |
990 | { | |
991 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | |
992 | struct i915_power_well *power_well; | |
993 | int i; | |
994 | ||
995 | mutex_lock(&power_domains->lock); | |
996 | for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { | |
997 | power_well->ops->sync_hw(dev_priv, power_well); | |
998 | power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, | |
999 | power_well); | |
1000 | } | |
1001 | mutex_unlock(&power_domains->lock); | |
1002 | } | |
1003 | ||
1004 | static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) | |
1005 | { | |
1006 | struct i915_power_well *cmn = | |
1007 | lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); | |
1008 | struct i915_power_well *disp2d = | |
1009 | lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); | |
1010 | ||
1011 | /* nothing to do if common lane is already off */ | |
1012 | if (!cmn->ops->is_enabled(dev_priv, cmn)) | |
1013 | return; | |
1014 | ||
1015 | /* If the display might be already active skip this */ | |
1016 | if (disp2d->ops->is_enabled(dev_priv, disp2d) && | |
1017 | I915_READ(DPIO_CTL) & DPIO_CMNRST) | |
1018 | return; | |
1019 | ||
1020 | DRM_DEBUG_KMS("toggling display PHY side reset\n"); | |
1021 | ||
1022 | /* cmnlane needs DPLL registers */ | |
1023 | disp2d->ops->enable(dev_priv, disp2d); | |
1024 | ||
1025 | /* | |
1026 | * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: | |
1027 | * Need to assert and de-assert PHY SB reset by gating the | |
1028 | * common lane power, then un-gating it. | |
1029 | * Simply ungating isn't enough to reset the PHY enough to get | |
1030 | * ports and lanes running. | |
1031 | */ | |
1032 | cmn->ops->disable(dev_priv, cmn); | |
1033 | } | |
1034 | ||
1035 | void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) | |
1036 | { | |
1037 | struct drm_device *dev = dev_priv->dev; | |
1038 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | |
1039 | ||
1040 | power_domains->initializing = true; | |
1041 | ||
1042 | if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { | |
1043 | mutex_lock(&power_domains->lock); | |
1044 | vlv_cmnlane_wa(dev_priv); | |
1045 | mutex_unlock(&power_domains->lock); | |
1046 | } | |
1047 | ||
1048 | /* For now, we need the power well to be always enabled. */ | |
1049 | intel_display_set_init_power(dev_priv, true); | |
1050 | intel_power_domains_resume(dev_priv); | |
1051 | power_domains->initializing = false; | |
1052 | } | |
1053 | ||
1054 | void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) | |
1055 | { | |
1056 | intel_runtime_pm_get(dev_priv); | |
1057 | } | |
1058 | ||
1059 | void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) | |
1060 | { | |
1061 | intel_runtime_pm_put(dev_priv); | |
1062 | } | |
1063 | ||
1064 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv) | |
1065 | { | |
1066 | struct drm_device *dev = dev_priv->dev; | |
1067 | struct device *device = &dev->pdev->dev; | |
1068 | ||
1069 | if (!HAS_RUNTIME_PM(dev)) | |
1070 | return; | |
1071 | ||
1072 | pm_runtime_get_sync(device); | |
1073 | WARN(dev_priv->pm.suspended, "Device still suspended.\n"); | |
1074 | } | |
1075 | ||
1076 | void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) | |
1077 | { | |
1078 | struct drm_device *dev = dev_priv->dev; | |
1079 | struct device *device = &dev->pdev->dev; | |
1080 | ||
1081 | if (!HAS_RUNTIME_PM(dev)) | |
1082 | return; | |
1083 | ||
1084 | WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n"); | |
1085 | pm_runtime_get_noresume(device); | |
1086 | } | |
1087 | ||
1088 | void intel_runtime_pm_put(struct drm_i915_private *dev_priv) | |
1089 | { | |
1090 | struct drm_device *dev = dev_priv->dev; | |
1091 | struct device *device = &dev->pdev->dev; | |
1092 | ||
1093 | if (!HAS_RUNTIME_PM(dev)) | |
1094 | return; | |
1095 | ||
1096 | pm_runtime_mark_last_busy(device); | |
1097 | pm_runtime_put_autosuspend(device); | |
1098 | } | |
1099 | ||
1100 | void intel_init_runtime_pm(struct drm_i915_private *dev_priv) | |
1101 | { | |
1102 | struct drm_device *dev = dev_priv->dev; | |
1103 | struct device *device = &dev->pdev->dev; | |
1104 | ||
1105 | if (!HAS_RUNTIME_PM(dev)) | |
1106 | return; | |
1107 | ||
1108 | pm_runtime_set_active(device); | |
1109 | ||
1110 | /* | |
1111 | * RPM depends on RC6 to save restore the GT HW context, so make RC6 a | |
1112 | * requirement. | |
1113 | */ | |
1114 | if (!intel_enable_rc6(dev)) { | |
1115 | DRM_INFO("RC6 disabled, disabling runtime PM support\n"); | |
1116 | return; | |
1117 | } | |
1118 | ||
1119 | pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ | |
1120 | pm_runtime_mark_last_busy(device); | |
1121 | pm_runtime_use_autosuspend(device); | |
1122 | ||
1123 | pm_runtime_put_autosuspend(device); | |
1124 | } | |
1125 | ||
1126 | void intel_fini_runtime_pm(struct drm_i915_private *dev_priv) | |
1127 | { | |
1128 | struct drm_device *dev = dev_priv->dev; | |
1129 | struct device *device = &dev->pdev->dev; | |
1130 | ||
1131 | if (!HAS_RUNTIME_PM(dev)) | |
1132 | return; | |
1133 | ||
1134 | if (!intel_enable_rc6(dev)) | |
1135 | return; | |
1136 | ||
1137 | /* Make sure we're not suspended first. */ | |
1138 | pm_runtime_get_sync(device); | |
1139 | pm_runtime_disable(device); | |
1140 | } | |
1141 | ||
1142 | /* Display audio driver power well request */ | |
1143 | int i915_request_power_well(void) | |
1144 | { | |
1145 | struct drm_i915_private *dev_priv; | |
1146 | ||
1147 | if (!hsw_pwr) | |
1148 | return -ENODEV; | |
1149 | ||
1150 | dev_priv = container_of(hsw_pwr, struct drm_i915_private, | |
1151 | power_domains); | |
1152 | intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); | |
1153 | return 0; | |
1154 | } | |
1155 | EXPORT_SYMBOL_GPL(i915_request_power_well); | |
1156 | ||
1157 | /* Display audio driver power well release */ | |
1158 | int i915_release_power_well(void) | |
1159 | { | |
1160 | struct drm_i915_private *dev_priv; | |
1161 | ||
1162 | if (!hsw_pwr) | |
1163 | return -ENODEV; | |
1164 | ||
1165 | dev_priv = container_of(hsw_pwr, struct drm_i915_private, | |
1166 | power_domains); | |
1167 | intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); | |
1168 | return 0; | |
1169 | } | |
1170 | EXPORT_SYMBOL_GPL(i915_release_power_well); | |
1171 | ||
1172 | /* | |
1173 | * Private interface for the audio driver to get CDCLK in kHz. | |
1174 | * | |
1175 | * Caller must request power well using i915_request_power_well() prior to | |
1176 | * making the call. | |
1177 | */ | |
1178 | int i915_get_cdclk_freq(void) | |
1179 | { | |
1180 | struct drm_i915_private *dev_priv; | |
1181 | ||
1182 | if (!hsw_pwr) | |
1183 | return -ENODEV; | |
1184 | ||
1185 | dev_priv = container_of(hsw_pwr, struct drm_i915_private, | |
1186 | power_domains); | |
1187 | ||
1188 | return intel_ddi_get_cdclk_freq(dev_priv); | |
1189 | } | |
1190 | EXPORT_SYMBOL_GPL(i915_get_cdclk_freq); |