drm/i915/bxt: Define BXT power domains
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_runtime_pm.c
CommitLineData
9c065a7d
DV
1/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29#include <linux/pm_runtime.h>
30#include <linux/vgaarb.h>
31
32#include "i915_drv.h"
33#include "intel_drv.h"
9c065a7d 34
e4e7684f
DV
35/**
36 * DOC: runtime pm
37 *
38 * The i915 driver supports dynamic enabling and disabling of entire hardware
39 * blocks at runtime. This is especially important on the display side where
40 * software is supposed to control many power gates manually on recent hardware,
41 * since on the GT side a lot of the power management is done by the hardware.
42 * But even there some manual control at the device level is required.
43 *
44 * Since i915 supports a diverse set of platforms with a unified codebase and
45 * hardware engineers just love to shuffle functionality around between power
46 * domains there's a sizeable amount of indirection required. This file provides
47 * generic functions to the driver for grabbing and releasing references for
48 * abstract power domains. It then maps those to the actual power wells
49 * present for a given platform.
50 */
51
9c065a7d
DV
52#define for_each_power_well(i, power_well, domain_mask, power_domains) \
53 for (i = 0; \
54 i < (power_domains)->power_well_count && \
55 ((power_well) = &(power_domains)->power_wells[i]); \
56 i++) \
57 if ((power_well)->domains & (domain_mask))
58
59#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
60 for (i = (power_domains)->power_well_count - 1; \
61 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
62 i--) \
63 if ((power_well)->domains & (domain_mask))
64
e4e7684f 65/*
9c065a7d
DV
66 * We should only use the power well if we explicitly asked the hardware to
67 * enable it, so check if it's enabled and also check if we've requested it to
68 * be enabled.
69 */
70static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
71 struct i915_power_well *power_well)
72{
73 return I915_READ(HSW_PWR_WELL_DRIVER) ==
74 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
75}
76
e4e7684f
DV
77/**
78 * __intel_display_power_is_enabled - unlocked check for a power domain
79 * @dev_priv: i915 device instance
80 * @domain: power domain to check
81 *
82 * This is the unlocked version of intel_display_power_is_enabled() and should
83 * only be used from error capture and recovery code where deadlocks are
84 * possible.
85 *
86 * Returns:
87 * True when the power domain is enabled, false otherwise.
88 */
f458ebbc
DV
89bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
90 enum intel_display_power_domain domain)
9c065a7d
DV
91{
92 struct i915_power_domains *power_domains;
93 struct i915_power_well *power_well;
94 bool is_enabled;
95 int i;
96
97 if (dev_priv->pm.suspended)
98 return false;
99
100 power_domains = &dev_priv->power_domains;
101
102 is_enabled = true;
103
104 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
105 if (power_well->always_on)
106 continue;
107
108 if (!power_well->hw_enabled) {
109 is_enabled = false;
110 break;
111 }
112 }
113
114 return is_enabled;
115}
116
e4e7684f 117/**
f61ccae3 118 * intel_display_power_is_enabled - check for a power domain
e4e7684f
DV
119 * @dev_priv: i915 device instance
120 * @domain: power domain to check
121 *
122 * This function can be used to check the hw power domain state. It is mostly
123 * used in hardware state readout functions. Everywhere else code should rely
124 * upon explicit power domain reference counting to ensure that the hardware
125 * block is powered up before accessing it.
126 *
127 * Callers must hold the relevant modesetting locks to ensure that concurrent
128 * threads can't disable the power well while the caller tries to read a few
129 * registers.
130 *
131 * Returns:
132 * True when the power domain is enabled, false otherwise.
133 */
f458ebbc
DV
134bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
135 enum intel_display_power_domain domain)
9c065a7d
DV
136{
137 struct i915_power_domains *power_domains;
138 bool ret;
139
140 power_domains = &dev_priv->power_domains;
141
142 mutex_lock(&power_domains->lock);
f458ebbc 143 ret = __intel_display_power_is_enabled(dev_priv, domain);
9c065a7d
DV
144 mutex_unlock(&power_domains->lock);
145
146 return ret;
147}
148
e4e7684f
DV
149/**
150 * intel_display_set_init_power - set the initial power domain state
151 * @dev_priv: i915 device instance
152 * @enable: whether to enable or disable the initial power domain state
153 *
154 * For simplicity our driver load/unload and system suspend/resume code assumes
155 * that all power domains are always enabled. This functions controls the state
156 * of this little hack. While the initial power domain state is enabled runtime
157 * pm is effectively disabled.
158 */
d9bc89d9
DV
159void intel_display_set_init_power(struct drm_i915_private *dev_priv,
160 bool enable)
161{
162 if (dev_priv->power_domains.init_power_on == enable)
163 return;
164
165 if (enable)
166 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
167 else
168 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
169
170 dev_priv->power_domains.init_power_on = enable;
171}
172
9c065a7d
DV
173/*
174 * Starting with Haswell, we have a "Power Down Well" that can be turned off
175 * when not needed anymore. We have 4 registers that can request the power well
176 * to be enabled, and it will only be disabled if none of the registers is
177 * requesting it to be enabled.
178 */
179static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
180{
181 struct drm_device *dev = dev_priv->dev;
182
183 /*
184 * After we re-enable the power well, if we touch VGA register 0x3d5
185 * we'll get unclaimed register interrupts. This stops after we write
186 * anything to the VGA MSR register. The vgacon module uses this
187 * register all the time, so if we unbind our driver and, as a
188 * consequence, bind vgacon, we'll get stuck in an infinite loop at
189 * console_unlock(). So make here we touch the VGA MSR register, making
190 * sure vgacon can keep working normally without triggering interrupts
191 * and error messages.
192 */
193 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
194 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
195 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
196
25400392 197 if (IS_BROADWELL(dev))
4c6c03be
DL
198 gen8_irq_power_well_post_enable(dev_priv,
199 1 << PIPE_C | 1 << PIPE_B);
9c065a7d
DV
200}
201
d14c0343
DL
202static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
203 struct i915_power_well *power_well)
204{
205 struct drm_device *dev = dev_priv->dev;
206
207 /*
208 * After we re-enable the power well, if we touch VGA register 0x3d5
209 * we'll get unclaimed register interrupts. This stops after we write
210 * anything to the VGA MSR register. The vgacon module uses this
211 * register all the time, so if we unbind our driver and, as a
212 * consequence, bind vgacon, we'll get stuck in an infinite loop at
213 * console_unlock(). So make here we touch the VGA MSR register, making
214 * sure vgacon can keep working normally without triggering interrupts
215 * and error messages.
216 */
217 if (power_well->data == SKL_DISP_PW_2) {
218 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
219 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
220 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
221
222 gen8_irq_power_well_post_enable(dev_priv,
223 1 << PIPE_C | 1 << PIPE_B);
224 }
225
1d2b9526
DL
226 if (power_well->data == SKL_DISP_PW_1) {
227 intel_prepare_ddi(dev);
d14c0343 228 gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
1d2b9526 229 }
d14c0343
DL
230}
231
9c065a7d
DV
232static void hsw_set_power_well(struct drm_i915_private *dev_priv,
233 struct i915_power_well *power_well, bool enable)
234{
235 bool is_enabled, enable_requested;
236 uint32_t tmp;
237
238 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
239 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
240 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
241
242 if (enable) {
243 if (!enable_requested)
244 I915_WRITE(HSW_PWR_WELL_DRIVER,
245 HSW_PWR_WELL_ENABLE_REQUEST);
246
247 if (!is_enabled) {
248 DRM_DEBUG_KMS("Enabling power well\n");
249 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
250 HSW_PWR_WELL_STATE_ENABLED), 20))
251 DRM_ERROR("Timeout enabling power well\n");
6d729bff 252 hsw_power_well_post_enable(dev_priv);
9c065a7d
DV
253 }
254
9c065a7d
DV
255 } else {
256 if (enable_requested) {
257 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
258 POSTING_READ(HSW_PWR_WELL_DRIVER);
259 DRM_DEBUG_KMS("Requesting to disable the power well\n");
260 }
261 }
262}
263
94dd5138
S
264#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
265 BIT(POWER_DOMAIN_TRANSCODER_A) | \
266 BIT(POWER_DOMAIN_PIPE_B) | \
267 BIT(POWER_DOMAIN_TRANSCODER_B) | \
268 BIT(POWER_DOMAIN_PIPE_C) | \
269 BIT(POWER_DOMAIN_TRANSCODER_C) | \
270 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
271 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
272 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
273 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
274 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
275 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
276 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
277 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
278 BIT(POWER_DOMAIN_AUX_B) | \
279 BIT(POWER_DOMAIN_AUX_C) | \
280 BIT(POWER_DOMAIN_AUX_D) | \
281 BIT(POWER_DOMAIN_AUDIO) | \
282 BIT(POWER_DOMAIN_VGA) | \
283 BIT(POWER_DOMAIN_INIT))
284#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
285 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
286 BIT(POWER_DOMAIN_PLLS) | \
287 BIT(POWER_DOMAIN_PIPE_A) | \
288 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
289 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
290 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
291 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
292 BIT(POWER_DOMAIN_AUX_A) | \
293 BIT(POWER_DOMAIN_INIT))
294#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
295 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
296 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
297 BIT(POWER_DOMAIN_INIT))
298#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
299 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
300 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
301 BIT(POWER_DOMAIN_INIT))
302#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
303 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
304 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
305 BIT(POWER_DOMAIN_INIT))
306#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
307 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
308 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
309 BIT(POWER_DOMAIN_INIT))
310#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
311 SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS)
312#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
313 (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
314 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
315 SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \
316 SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
317 SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
318 SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
319 SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
320 BIT(POWER_DOMAIN_INIT))
321
0b4a2a36
S
322#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
323 BIT(POWER_DOMAIN_TRANSCODER_A) | \
324 BIT(POWER_DOMAIN_PIPE_B) | \
325 BIT(POWER_DOMAIN_TRANSCODER_B) | \
326 BIT(POWER_DOMAIN_PIPE_C) | \
327 BIT(POWER_DOMAIN_TRANSCODER_C) | \
328 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
329 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
330 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
331 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
332 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
333 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
334 BIT(POWER_DOMAIN_AUX_B) | \
335 BIT(POWER_DOMAIN_AUX_C) | \
336 BIT(POWER_DOMAIN_AUDIO) | \
337 BIT(POWER_DOMAIN_VGA) | \
338 BIT(POWER_DOMAIN_INIT))
339#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
340 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
341 BIT(POWER_DOMAIN_PIPE_A) | \
342 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
343 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
344 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
345 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
346 BIT(POWER_DOMAIN_AUX_A) | \
347 BIT(POWER_DOMAIN_PLLS) | \
348 BIT(POWER_DOMAIN_INIT))
349#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
350 (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
351 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \
352 BIT(POWER_DOMAIN_INIT))
353
94dd5138
S
354static void skl_set_power_well(struct drm_i915_private *dev_priv,
355 struct i915_power_well *power_well, bool enable)
356{
357 uint32_t tmp, fuse_status;
358 uint32_t req_mask, state_mask;
2a51835f 359 bool is_enabled, enable_requested, check_fuse_status = false;
94dd5138
S
360
361 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
362 fuse_status = I915_READ(SKL_FUSE_STATUS);
363
364 switch (power_well->data) {
365 case SKL_DISP_PW_1:
366 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
367 SKL_FUSE_PG0_DIST_STATUS), 1)) {
368 DRM_ERROR("PG0 not enabled\n");
369 return;
370 }
371 break;
372 case SKL_DISP_PW_2:
373 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
374 DRM_ERROR("PG1 in disabled state\n");
375 return;
376 }
377 break;
378 case SKL_DISP_PW_DDI_A_E:
379 case SKL_DISP_PW_DDI_B:
380 case SKL_DISP_PW_DDI_C:
381 case SKL_DISP_PW_DDI_D:
382 case SKL_DISP_PW_MISC_IO:
383 break;
384 default:
385 WARN(1, "Unknown power well %lu\n", power_well->data);
386 return;
387 }
388
389 req_mask = SKL_POWER_WELL_REQ(power_well->data);
2a51835f 390 enable_requested = tmp & req_mask;
94dd5138 391 state_mask = SKL_POWER_WELL_STATE(power_well->data);
2a51835f 392 is_enabled = tmp & state_mask;
94dd5138
S
393
394 if (enable) {
2a51835f 395 if (!enable_requested) {
94dd5138 396 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
94dd5138
S
397 }
398
2a51835f 399 if (!is_enabled) {
510e6fdd 400 DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
94dd5138
S
401 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
402 state_mask), 1))
403 DRM_ERROR("%s enable timeout\n",
404 power_well->name);
405 check_fuse_status = true;
406 }
407 } else {
2a51835f 408 if (enable_requested) {
94dd5138
S
409 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
410 POSTING_READ(HSW_PWR_WELL_DRIVER);
411 DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
412 }
413 }
414
415 if (check_fuse_status) {
416 if (power_well->data == SKL_DISP_PW_1) {
417 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
418 SKL_FUSE_PG1_DIST_STATUS), 1))
419 DRM_ERROR("PG1 distributing status timeout\n");
420 } else if (power_well->data == SKL_DISP_PW_2) {
421 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
422 SKL_FUSE_PG2_DIST_STATUS), 1))
423 DRM_ERROR("PG2 distributing status timeout\n");
424 }
425 }
d14c0343
DL
426
427 if (enable && !is_enabled)
428 skl_power_well_post_enable(dev_priv, power_well);
94dd5138
S
429}
430
9c065a7d
DV
431static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
432 struct i915_power_well *power_well)
433{
434 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
435
436 /*
437 * We're taking over the BIOS, so clear any requests made by it since
438 * the driver is in charge now.
439 */
440 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
441 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
442}
443
444static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
445 struct i915_power_well *power_well)
446{
447 hsw_set_power_well(dev_priv, power_well, true);
448}
449
450static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
451 struct i915_power_well *power_well)
452{
453 hsw_set_power_well(dev_priv, power_well, false);
454}
455
94dd5138
S
456static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
457 struct i915_power_well *power_well)
458{
459 uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
460 SKL_POWER_WELL_STATE(power_well->data);
461
462 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
463}
464
465static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
466 struct i915_power_well *power_well)
467{
468 skl_set_power_well(dev_priv, power_well, power_well->count > 0);
469
470 /* Clear any request made by BIOS as driver is taking over */
471 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
472}
473
474static void skl_power_well_enable(struct drm_i915_private *dev_priv,
475 struct i915_power_well *power_well)
476{
477 skl_set_power_well(dev_priv, power_well, true);
478}
479
480static void skl_power_well_disable(struct drm_i915_private *dev_priv,
481 struct i915_power_well *power_well)
482{
483 skl_set_power_well(dev_priv, power_well, false);
484}
485
9c065a7d
DV
486static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
487 struct i915_power_well *power_well)
488{
489}
490
491static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
492 struct i915_power_well *power_well)
493{
494 return true;
495}
496
497static void vlv_set_power_well(struct drm_i915_private *dev_priv,
498 struct i915_power_well *power_well, bool enable)
499{
500 enum punit_power_well power_well_id = power_well->data;
501 u32 mask;
502 u32 state;
503 u32 ctrl;
504
505 mask = PUNIT_PWRGT_MASK(power_well_id);
506 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
507 PUNIT_PWRGT_PWR_GATE(power_well_id);
508
509 mutex_lock(&dev_priv->rps.hw_lock);
510
511#define COND \
512 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
513
514 if (COND)
515 goto out;
516
517 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
518 ctrl &= ~mask;
519 ctrl |= state;
520 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
521
522 if (wait_for(COND, 100))
523 DRM_ERROR("timout setting power well state %08x (%08x)\n",
524 state,
525 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
526
527#undef COND
528
529out:
530 mutex_unlock(&dev_priv->rps.hw_lock);
531}
532
533static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
534 struct i915_power_well *power_well)
535{
536 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
537}
538
539static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
540 struct i915_power_well *power_well)
541{
542 vlv_set_power_well(dev_priv, power_well, true);
543}
544
545static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
546 struct i915_power_well *power_well)
547{
548 vlv_set_power_well(dev_priv, power_well, false);
549}
550
551static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
552 struct i915_power_well *power_well)
553{
554 int power_well_id = power_well->data;
555 bool enabled = false;
556 u32 mask;
557 u32 state;
558 u32 ctrl;
559
560 mask = PUNIT_PWRGT_MASK(power_well_id);
561 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
562
563 mutex_lock(&dev_priv->rps.hw_lock);
564
565 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
566 /*
567 * We only ever set the power-on and power-gate states, anything
568 * else is unexpected.
569 */
570 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
571 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
572 if (state == ctrl)
573 enabled = true;
574
575 /*
576 * A transient state at this point would mean some unexpected party
577 * is poking at the power controls too.
578 */
579 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
580 WARN_ON(ctrl != state);
581
582 mutex_unlock(&dev_priv->rps.hw_lock);
583
584 return enabled;
585}
586
587static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
588 struct i915_power_well *power_well)
589{
590 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
591
592 vlv_set_power_well(dev_priv, power_well, true);
593
594 spin_lock_irq(&dev_priv->irq_lock);
595 valleyview_enable_display_irqs(dev_priv);
596 spin_unlock_irq(&dev_priv->irq_lock);
597
598 /*
599 * During driver initialization/resume we can avoid restoring the
600 * part of the HW/SW state that will be inited anyway explicitly.
601 */
602 if (dev_priv->power_domains.initializing)
603 return;
604
b963291c 605 intel_hpd_init(dev_priv);
9c065a7d
DV
606
607 i915_redisable_vga_power_on(dev_priv->dev);
608}
609
610static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
611 struct i915_power_well *power_well)
612{
613 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
614
615 spin_lock_irq(&dev_priv->irq_lock);
616 valleyview_disable_display_irqs(dev_priv);
617 spin_unlock_irq(&dev_priv->irq_lock);
618
619 vlv_set_power_well(dev_priv, power_well, false);
620
621 vlv_power_sequencer_reset(dev_priv);
622}
623
624static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
625 struct i915_power_well *power_well)
626{
627 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
628
629 /*
630 * Enable the CRI clock source so we can get at the
631 * display and the reference clock for VGA
632 * hotplug / manual detection.
633 */
634 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
635 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
636 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
637
638 vlv_set_power_well(dev_priv, power_well, true);
639
640 /*
641 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
642 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
643 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
644 * b. The other bits such as sfr settings / modesel may all
645 * be set to 0.
646 *
647 * This should only be done on init and resume from S3 with
648 * both PLLs disabled, or we risk losing DPIO and PLL
649 * synchronization.
650 */
651 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
652}
653
654static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
655 struct i915_power_well *power_well)
656{
657 enum pipe pipe;
658
659 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
660
661 for_each_pipe(dev_priv, pipe)
662 assert_pll_disabled(dev_priv, pipe);
663
664 /* Assert common reset */
665 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
666
667 vlv_set_power_well(dev_priv, power_well, false);
668}
669
670static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
671 struct i915_power_well *power_well)
672{
673 enum dpio_phy phy;
674
675 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
676 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
677
678 /*
679 * Enable the CRI clock source so we can get at the
680 * display and the reference clock for VGA
681 * hotplug / manual detection.
682 */
683 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
684 phy = DPIO_PHY0;
685 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
686 DPLL_REFA_CLK_ENABLE_VLV);
687 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
688 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
689 } else {
690 phy = DPIO_PHY1;
691 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
692 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
693 }
694 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
695 vlv_set_power_well(dev_priv, power_well, true);
696
697 /* Poll for phypwrgood signal */
698 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
699 DRM_ERROR("Display PHY %d is not power up\n", phy);
700
701 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
702 PHY_COM_LANE_RESET_DEASSERT(phy));
703}
704
705static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
706 struct i915_power_well *power_well)
707{
708 enum dpio_phy phy;
709
710 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
711 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
712
713 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
714 phy = DPIO_PHY0;
715 assert_pll_disabled(dev_priv, PIPE_A);
716 assert_pll_disabled(dev_priv, PIPE_B);
717 } else {
718 phy = DPIO_PHY1;
719 assert_pll_disabled(dev_priv, PIPE_C);
720 }
721
722 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
723 ~PHY_COM_LANE_RESET_DEASSERT(phy));
724
725 vlv_set_power_well(dev_priv, power_well, false);
726}
727
728static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
729 struct i915_power_well *power_well)
730{
731 enum pipe pipe = power_well->data;
732 bool enabled;
733 u32 state, ctrl;
734
735 mutex_lock(&dev_priv->rps.hw_lock);
736
737 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
738 /*
739 * We only ever set the power-on and power-gate states, anything
740 * else is unexpected.
741 */
742 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
743 enabled = state == DP_SSS_PWR_ON(pipe);
744
745 /*
746 * A transient state at this point would mean some unexpected party
747 * is poking at the power controls too.
748 */
749 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
750 WARN_ON(ctrl << 16 != state);
751
752 mutex_unlock(&dev_priv->rps.hw_lock);
753
754 return enabled;
755}
756
757static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
758 struct i915_power_well *power_well,
759 bool enable)
760{
761 enum pipe pipe = power_well->data;
762 u32 state;
763 u32 ctrl;
764
765 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
766
767 mutex_lock(&dev_priv->rps.hw_lock);
768
769#define COND \
770 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
771
772 if (COND)
773 goto out;
774
775 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
776 ctrl &= ~DP_SSC_MASK(pipe);
777 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
778 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
779
780 if (wait_for(COND, 100))
781 DRM_ERROR("timout setting power well state %08x (%08x)\n",
782 state,
783 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
784
785#undef COND
786
787out:
788 mutex_unlock(&dev_priv->rps.hw_lock);
789}
790
791static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
792 struct i915_power_well *power_well)
793{
794 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
795}
796
797static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
798 struct i915_power_well *power_well)
799{
800 WARN_ON_ONCE(power_well->data != PIPE_A &&
801 power_well->data != PIPE_B &&
802 power_well->data != PIPE_C);
803
804 chv_set_pipe_power_well(dev_priv, power_well, true);
afd6275d
VS
805
806 if (power_well->data == PIPE_A) {
807 spin_lock_irq(&dev_priv->irq_lock);
808 valleyview_enable_display_irqs(dev_priv);
809 spin_unlock_irq(&dev_priv->irq_lock);
810
811 /*
812 * During driver initialization/resume we can avoid restoring the
813 * part of the HW/SW state that will be inited anyway explicitly.
814 */
815 if (dev_priv->power_domains.initializing)
816 return;
817
818 intel_hpd_init(dev_priv);
819
820 i915_redisable_vga_power_on(dev_priv->dev);
821 }
9c065a7d
DV
822}
823
824static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
825 struct i915_power_well *power_well)
826{
827 WARN_ON_ONCE(power_well->data != PIPE_A &&
828 power_well->data != PIPE_B &&
829 power_well->data != PIPE_C);
830
afd6275d
VS
831 if (power_well->data == PIPE_A) {
832 spin_lock_irq(&dev_priv->irq_lock);
833 valleyview_disable_display_irqs(dev_priv);
834 spin_unlock_irq(&dev_priv->irq_lock);
835 }
836
9c065a7d 837 chv_set_pipe_power_well(dev_priv, power_well, false);
baa4e575
VS
838
839 if (power_well->data == PIPE_A)
840 vlv_power_sequencer_reset(dev_priv);
9c065a7d
DV
841}
842
e4e7684f
DV
843/**
844 * intel_display_power_get - grab a power domain reference
845 * @dev_priv: i915 device instance
846 * @domain: power domain to reference
847 *
848 * This function grabs a power domain reference for @domain and ensures that the
849 * power domain and all its parents are powered up. Therefore users should only
850 * grab a reference to the innermost power domain they need.
851 *
852 * Any power domain reference obtained by this function must have a symmetric
853 * call to intel_display_power_put() to release the reference again.
854 */
9c065a7d
DV
855void intel_display_power_get(struct drm_i915_private *dev_priv,
856 enum intel_display_power_domain domain)
857{
858 struct i915_power_domains *power_domains;
859 struct i915_power_well *power_well;
860 int i;
861
862 intel_runtime_pm_get(dev_priv);
863
864 power_domains = &dev_priv->power_domains;
865
866 mutex_lock(&power_domains->lock);
867
868 for_each_power_well(i, power_well, BIT(domain), power_domains) {
869 if (!power_well->count++) {
870 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
871 power_well->ops->enable(dev_priv, power_well);
872 power_well->hw_enabled = true;
873 }
9c065a7d
DV
874 }
875
876 power_domains->domain_use_count[domain]++;
877
878 mutex_unlock(&power_domains->lock);
879}
880
e4e7684f
DV
881/**
882 * intel_display_power_put - release a power domain reference
883 * @dev_priv: i915 device instance
884 * @domain: power domain to reference
885 *
886 * This function drops the power domain reference obtained by
887 * intel_display_power_get() and might power down the corresponding hardware
888 * block right away if this is the last reference.
889 */
9c065a7d
DV
890void intel_display_power_put(struct drm_i915_private *dev_priv,
891 enum intel_display_power_domain domain)
892{
893 struct i915_power_domains *power_domains;
894 struct i915_power_well *power_well;
895 int i;
896
897 power_domains = &dev_priv->power_domains;
898
899 mutex_lock(&power_domains->lock);
900
901 WARN_ON(!power_domains->domain_use_count[domain]);
902 power_domains->domain_use_count[domain]--;
903
904 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
905 WARN_ON(!power_well->count);
906
907 if (!--power_well->count && i915.disable_power_well) {
908 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
909 power_well->hw_enabled = false;
910 power_well->ops->disable(dev_priv, power_well);
911 }
9c065a7d
DV
912 }
913
914 mutex_unlock(&power_domains->lock);
915
916 intel_runtime_pm_put(dev_priv);
917}
918
919#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
920
921#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
922 BIT(POWER_DOMAIN_PIPE_A) | \
923 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
924 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
925 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
926 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
927 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
928 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
929 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
930 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
931 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
932 BIT(POWER_DOMAIN_PORT_CRT) | \
933 BIT(POWER_DOMAIN_PLLS) | \
1407121a
S
934 BIT(POWER_DOMAIN_AUX_A) | \
935 BIT(POWER_DOMAIN_AUX_B) | \
936 BIT(POWER_DOMAIN_AUX_C) | \
937 BIT(POWER_DOMAIN_AUX_D) | \
9c065a7d
DV
938 BIT(POWER_DOMAIN_INIT))
939#define HSW_DISPLAY_POWER_DOMAINS ( \
940 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
941 BIT(POWER_DOMAIN_INIT))
942
943#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
944 HSW_ALWAYS_ON_POWER_DOMAINS | \
945 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
946#define BDW_DISPLAY_POWER_DOMAINS ( \
947 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
948 BIT(POWER_DOMAIN_INIT))
949
950#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
951#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
952
953#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
954 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
955 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
956 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
957 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
958 BIT(POWER_DOMAIN_PORT_CRT) | \
1407121a
S
959 BIT(POWER_DOMAIN_AUX_B) | \
960 BIT(POWER_DOMAIN_AUX_C) | \
9c065a7d
DV
961 BIT(POWER_DOMAIN_INIT))
962
963#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
964 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
965 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
1407121a 966 BIT(POWER_DOMAIN_AUX_B) | \
9c065a7d
DV
967 BIT(POWER_DOMAIN_INIT))
968
969#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
970 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
1407121a 971 BIT(POWER_DOMAIN_AUX_B) | \
9c065a7d
DV
972 BIT(POWER_DOMAIN_INIT))
973
974#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
975 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
976 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
1407121a 977 BIT(POWER_DOMAIN_AUX_C) | \
9c065a7d
DV
978 BIT(POWER_DOMAIN_INIT))
979
980#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
981 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
1407121a 982 BIT(POWER_DOMAIN_AUX_C) | \
9c065a7d
DV
983 BIT(POWER_DOMAIN_INIT))
984
985#define CHV_PIPE_A_POWER_DOMAINS ( \
986 BIT(POWER_DOMAIN_PIPE_A) | \
987 BIT(POWER_DOMAIN_INIT))
988
989#define CHV_PIPE_B_POWER_DOMAINS ( \
990 BIT(POWER_DOMAIN_PIPE_B) | \
991 BIT(POWER_DOMAIN_INIT))
992
993#define CHV_PIPE_C_POWER_DOMAINS ( \
994 BIT(POWER_DOMAIN_PIPE_C) | \
995 BIT(POWER_DOMAIN_INIT))
996
997#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
998 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
999 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
1000 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
1001 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
1407121a
S
1002 BIT(POWER_DOMAIN_AUX_B) | \
1003 BIT(POWER_DOMAIN_AUX_C) | \
9c065a7d
DV
1004 BIT(POWER_DOMAIN_INIT))
1005
1006#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
1007 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
1008 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
1407121a 1009 BIT(POWER_DOMAIN_AUX_D) | \
9c065a7d
DV
1010 BIT(POWER_DOMAIN_INIT))
1011
1012#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
1013 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
1014 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
1407121a 1015 BIT(POWER_DOMAIN_AUX_D) | \
9c065a7d
DV
1016 BIT(POWER_DOMAIN_INIT))
1017
1018#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
1019 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
1407121a 1020 BIT(POWER_DOMAIN_AUX_D) | \
9c065a7d
DV
1021 BIT(POWER_DOMAIN_INIT))
1022
1023static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1024 .sync_hw = i9xx_always_on_power_well_noop,
1025 .enable = i9xx_always_on_power_well_noop,
1026 .disable = i9xx_always_on_power_well_noop,
1027 .is_enabled = i9xx_always_on_power_well_enabled,
1028};
1029
1030static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1031 .sync_hw = chv_pipe_power_well_sync_hw,
1032 .enable = chv_pipe_power_well_enable,
1033 .disable = chv_pipe_power_well_disable,
1034 .is_enabled = chv_pipe_power_well_enabled,
1035};
1036
1037static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1038 .sync_hw = vlv_power_well_sync_hw,
1039 .enable = chv_dpio_cmn_power_well_enable,
1040 .disable = chv_dpio_cmn_power_well_disable,
1041 .is_enabled = vlv_power_well_enabled,
1042};
1043
1044static struct i915_power_well i9xx_always_on_power_well[] = {
1045 {
1046 .name = "always-on",
1047 .always_on = 1,
1048 .domains = POWER_DOMAIN_MASK,
1049 .ops = &i9xx_always_on_power_well_ops,
1050 },
1051};
1052
1053static const struct i915_power_well_ops hsw_power_well_ops = {
1054 .sync_hw = hsw_power_well_sync_hw,
1055 .enable = hsw_power_well_enable,
1056 .disable = hsw_power_well_disable,
1057 .is_enabled = hsw_power_well_enabled,
1058};
1059
94dd5138
S
1060static const struct i915_power_well_ops skl_power_well_ops = {
1061 .sync_hw = skl_power_well_sync_hw,
1062 .enable = skl_power_well_enable,
1063 .disable = skl_power_well_disable,
1064 .is_enabled = skl_power_well_enabled,
1065};
1066
9c065a7d
DV
1067static struct i915_power_well hsw_power_wells[] = {
1068 {
1069 .name = "always-on",
1070 .always_on = 1,
1071 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
1072 .ops = &i9xx_always_on_power_well_ops,
1073 },
1074 {
1075 .name = "display",
1076 .domains = HSW_DISPLAY_POWER_DOMAINS,
1077 .ops = &hsw_power_well_ops,
1078 },
1079};
1080
1081static struct i915_power_well bdw_power_wells[] = {
1082 {
1083 .name = "always-on",
1084 .always_on = 1,
1085 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
1086 .ops = &i9xx_always_on_power_well_ops,
1087 },
1088 {
1089 .name = "display",
1090 .domains = BDW_DISPLAY_POWER_DOMAINS,
1091 .ops = &hsw_power_well_ops,
1092 },
1093};
1094
1095static const struct i915_power_well_ops vlv_display_power_well_ops = {
1096 .sync_hw = vlv_power_well_sync_hw,
1097 .enable = vlv_display_power_well_enable,
1098 .disable = vlv_display_power_well_disable,
1099 .is_enabled = vlv_power_well_enabled,
1100};
1101
1102static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1103 .sync_hw = vlv_power_well_sync_hw,
1104 .enable = vlv_dpio_cmn_power_well_enable,
1105 .disable = vlv_dpio_cmn_power_well_disable,
1106 .is_enabled = vlv_power_well_enabled,
1107};
1108
1109static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1110 .sync_hw = vlv_power_well_sync_hw,
1111 .enable = vlv_power_well_enable,
1112 .disable = vlv_power_well_disable,
1113 .is_enabled = vlv_power_well_enabled,
1114};
1115
1116static struct i915_power_well vlv_power_wells[] = {
1117 {
1118 .name = "always-on",
1119 .always_on = 1,
1120 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1121 .ops = &i9xx_always_on_power_well_ops,
1122 },
1123 {
1124 .name = "display",
1125 .domains = VLV_DISPLAY_POWER_DOMAINS,
1126 .data = PUNIT_POWER_WELL_DISP2D,
1127 .ops = &vlv_display_power_well_ops,
1128 },
1129 {
1130 .name = "dpio-tx-b-01",
1131 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1132 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1133 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1134 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1135 .ops = &vlv_dpio_power_well_ops,
1136 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1137 },
1138 {
1139 .name = "dpio-tx-b-23",
1140 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1141 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1142 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1143 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1144 .ops = &vlv_dpio_power_well_ops,
1145 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1146 },
1147 {
1148 .name = "dpio-tx-c-01",
1149 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1150 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1151 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1152 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1153 .ops = &vlv_dpio_power_well_ops,
1154 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1155 },
1156 {
1157 .name = "dpio-tx-c-23",
1158 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1159 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1160 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1161 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1162 .ops = &vlv_dpio_power_well_ops,
1163 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1164 },
1165 {
1166 .name = "dpio-common",
1167 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
1168 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1169 .ops = &vlv_dpio_cmn_power_well_ops,
1170 },
1171};
1172
1173static struct i915_power_well chv_power_wells[] = {
1174 {
1175 .name = "always-on",
1176 .always_on = 1,
1177 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1178 .ops = &i9xx_always_on_power_well_ops,
1179 },
1180#if 0
1181 {
1182 .name = "display",
1183 .domains = VLV_DISPLAY_POWER_DOMAINS,
1184 .data = PUNIT_POWER_WELL_DISP2D,
1185 .ops = &vlv_display_power_well_ops,
1186 },
baa4e575 1187#endif
9c065a7d
DV
1188 {
1189 .name = "pipe-a",
baa4e575
VS
1190 /*
1191 * FIXME: pipe A power well seems to be the new disp2d well.
1192 * At least all registers seem to be housed there. Figure
1193 * out if this a a temporary situation in pre-production
1194 * hardware or a permanent state of affairs.
1195 */
1196 .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
9c065a7d
DV
1197 .data = PIPE_A,
1198 .ops = &chv_pipe_power_well_ops,
1199 },
baa4e575 1200#if 0
9c065a7d
DV
1201 {
1202 .name = "pipe-b",
1203 .domains = CHV_PIPE_B_POWER_DOMAINS,
1204 .data = PIPE_B,
1205 .ops = &chv_pipe_power_well_ops,
1206 },
1207 {
1208 .name = "pipe-c",
1209 .domains = CHV_PIPE_C_POWER_DOMAINS,
1210 .data = PIPE_C,
1211 .ops = &chv_pipe_power_well_ops,
1212 },
1213#endif
1214 {
1215 .name = "dpio-common-bc",
1216 /*
1217 * XXX: cmnreset for one PHY seems to disturb the other.
1218 * As a workaround keep both powered on at the same
1219 * time for now.
1220 */
1221 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
1222 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1223 .ops = &chv_dpio_cmn_power_well_ops,
1224 },
1225 {
1226 .name = "dpio-common-d",
1227 /*
1228 * XXX: cmnreset for one PHY seems to disturb the other.
1229 * As a workaround keep both powered on at the same
1230 * time for now.
1231 */
1232 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
1233 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
1234 .ops = &chv_dpio_cmn_power_well_ops,
1235 },
1236#if 0
1237 {
1238 .name = "dpio-tx-b-01",
1239 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1240 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
1241 .ops = &vlv_dpio_power_well_ops,
1242 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1243 },
1244 {
1245 .name = "dpio-tx-b-23",
1246 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1247 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
1248 .ops = &vlv_dpio_power_well_ops,
1249 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1250 },
1251 {
1252 .name = "dpio-tx-c-01",
1253 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1254 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1255 .ops = &vlv_dpio_power_well_ops,
1256 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1257 },
1258 {
1259 .name = "dpio-tx-c-23",
1260 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1261 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1262 .ops = &vlv_dpio_power_well_ops,
1263 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1264 },
1265 {
1266 .name = "dpio-tx-d-01",
1267 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
1268 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
1269 .ops = &vlv_dpio_power_well_ops,
1270 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
1271 },
1272 {
1273 .name = "dpio-tx-d-23",
1274 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
1275 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
1276 .ops = &vlv_dpio_power_well_ops,
1277 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
1278 },
1279#endif
1280};
1281
1282static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1283 enum punit_power_well power_well_id)
1284{
1285 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1286 struct i915_power_well *power_well;
1287 int i;
1288
1289 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1290 if (power_well->data == power_well_id)
1291 return power_well;
1292 }
1293
1294 return NULL;
1295}
1296
94dd5138
S
1297static struct i915_power_well skl_power_wells[] = {
1298 {
1299 .name = "always-on",
1300 .always_on = 1,
1301 .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1302 .ops = &i9xx_always_on_power_well_ops,
1303 },
1304 {
1305 .name = "power well 1",
1306 .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1307 .ops = &skl_power_well_ops,
1308 .data = SKL_DISP_PW_1,
1309 },
1310 {
1311 .name = "MISC IO power well",
1312 .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
1313 .ops = &skl_power_well_ops,
1314 .data = SKL_DISP_PW_MISC_IO,
1315 },
1316 {
1317 .name = "power well 2",
1318 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1319 .ops = &skl_power_well_ops,
1320 .data = SKL_DISP_PW_2,
1321 },
1322 {
1323 .name = "DDI A/E power well",
1324 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
1325 .ops = &skl_power_well_ops,
1326 .data = SKL_DISP_PW_DDI_A_E,
1327 },
1328 {
1329 .name = "DDI B power well",
1330 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
1331 .ops = &skl_power_well_ops,
1332 .data = SKL_DISP_PW_DDI_B,
1333 },
1334 {
1335 .name = "DDI C power well",
1336 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
1337 .ops = &skl_power_well_ops,
1338 .data = SKL_DISP_PW_DDI_C,
1339 },
1340 {
1341 .name = "DDI D power well",
1342 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
1343 .ops = &skl_power_well_ops,
1344 .data = SKL_DISP_PW_DDI_D,
1345 },
1346};
1347
0b4a2a36
S
1348static struct i915_power_well bxt_power_wells[] = {
1349 {
1350 .name = "always-on",
1351 .always_on = 1,
1352 .domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1353 .ops = &i9xx_always_on_power_well_ops,
1354 },
1355 {
1356 .name = "power well 1",
1357 .domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1358 .ops = &skl_power_well_ops,
1359 .data = SKL_DISP_PW_1,
1360 },
1361 {
1362 .name = "power well 2",
1363 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1364 .ops = &skl_power_well_ops,
1365 .data = SKL_DISP_PW_2,
1366 }
1367};
1368
9c065a7d
DV
1369#define set_power_wells(power_domains, __power_wells) ({ \
1370 (power_domains)->power_wells = (__power_wells); \
1371 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
1372})
1373
e4e7684f
DV
1374/**
1375 * intel_power_domains_init - initializes the power domain structures
1376 * @dev_priv: i915 device instance
1377 *
1378 * Initializes the power domain structures for @dev_priv depending upon the
1379 * supported platform.
1380 */
9c065a7d
DV
1381int intel_power_domains_init(struct drm_i915_private *dev_priv)
1382{
1383 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1384
1385 mutex_init(&power_domains->lock);
1386
1387 /*
1388 * The enabling order will be from lower to higher indexed wells,
1389 * the disabling order is reversed.
1390 */
1391 if (IS_HASWELL(dev_priv->dev)) {
1392 set_power_wells(power_domains, hsw_power_wells);
9c065a7d
DV
1393 } else if (IS_BROADWELL(dev_priv->dev)) {
1394 set_power_wells(power_domains, bdw_power_wells);
94dd5138
S
1395 } else if (IS_SKYLAKE(dev_priv->dev)) {
1396 set_power_wells(power_domains, skl_power_wells);
0b4a2a36
S
1397 } else if (IS_BROXTON(dev_priv->dev)) {
1398 set_power_wells(power_domains, bxt_power_wells);
9c065a7d
DV
1399 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1400 set_power_wells(power_domains, chv_power_wells);
1401 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
1402 set_power_wells(power_domains, vlv_power_wells);
1403 } else {
1404 set_power_wells(power_domains, i9xx_always_on_power_well);
1405 }
1406
1407 return 0;
1408}
1409
41373cd5
DV
1410static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1411{
1412 struct drm_device *dev = dev_priv->dev;
1413 struct device *device = &dev->pdev->dev;
1414
1415 if (!HAS_RUNTIME_PM(dev))
1416 return;
1417
1418 if (!intel_enable_rc6(dev))
1419 return;
1420
1421 /* Make sure we're not suspended first. */
1422 pm_runtime_get_sync(device);
1423 pm_runtime_disable(device);
1424}
1425
e4e7684f
DV
1426/**
1427 * intel_power_domains_fini - finalizes the power domain structures
1428 * @dev_priv: i915 device instance
1429 *
1430 * Finalizes the power domain structures for @dev_priv depending upon the
1431 * supported platform. This function also disables runtime pm and ensures that
1432 * the device stays powered up so that the driver can be reloaded.
1433 */
f458ebbc 1434void intel_power_domains_fini(struct drm_i915_private *dev_priv)
9c065a7d 1435{
41373cd5
DV
1436 intel_runtime_pm_disable(dev_priv);
1437
f458ebbc
DV
1438 /* The i915.ko module is still not prepared to be loaded when
1439 * the power well is not enabled, so just enable it in case
1440 * we're going to unload/reload. */
1441 intel_display_set_init_power(dev_priv, true);
9c065a7d
DV
1442}
1443
1444static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1445{
1446 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1447 struct i915_power_well *power_well;
1448 int i;
1449
1450 mutex_lock(&power_domains->lock);
1451 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1452 power_well->ops->sync_hw(dev_priv, power_well);
1453 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
1454 power_well);
1455 }
1456 mutex_unlock(&power_domains->lock);
1457}
1458
1459static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1460{
1461 struct i915_power_well *cmn =
1462 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1463 struct i915_power_well *disp2d =
1464 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
1465
9c065a7d 1466 /* If the display might be already active skip this */
5d93a6e5
VS
1467 if (cmn->ops->is_enabled(dev_priv, cmn) &&
1468 disp2d->ops->is_enabled(dev_priv, disp2d) &&
9c065a7d
DV
1469 I915_READ(DPIO_CTL) & DPIO_CMNRST)
1470 return;
1471
1472 DRM_DEBUG_KMS("toggling display PHY side reset\n");
1473
1474 /* cmnlane needs DPLL registers */
1475 disp2d->ops->enable(dev_priv, disp2d);
1476
1477 /*
1478 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1479 * Need to assert and de-assert PHY SB reset by gating the
1480 * common lane power, then un-gating it.
1481 * Simply ungating isn't enough to reset the PHY enough to get
1482 * ports and lanes running.
1483 */
1484 cmn->ops->disable(dev_priv, cmn);
1485}
1486
e4e7684f
DV
1487/**
1488 * intel_power_domains_init_hw - initialize hardware power domain state
1489 * @dev_priv: i915 device instance
1490 *
1491 * This function initializes the hardware power domain state and enables all
1492 * power domains using intel_display_set_init_power().
1493 */
9c065a7d
DV
1494void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
1495{
1496 struct drm_device *dev = dev_priv->dev;
1497 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1498
1499 power_domains->initializing = true;
1500
1501 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
1502 mutex_lock(&power_domains->lock);
1503 vlv_cmnlane_wa(dev_priv);
1504 mutex_unlock(&power_domains->lock);
1505 }
1506
1507 /* For now, we need the power well to be always enabled. */
1508 intel_display_set_init_power(dev_priv, true);
1509 intel_power_domains_resume(dev_priv);
1510 power_domains->initializing = false;
1511}
1512
e4e7684f 1513/**
ca2b1403 1514 * intel_aux_display_runtime_get - grab an auxiliary power domain reference
e4e7684f
DV
1515 * @dev_priv: i915 device instance
1516 *
1517 * This function grabs a power domain reference for the auxiliary power domain
1518 * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
1519 * parents are powered up. Therefore users should only grab a reference to the
1520 * innermost power domain they need.
1521 *
1522 * Any power domain reference obtained by this function must have a symmetric
1523 * call to intel_aux_display_runtime_put() to release the reference again.
1524 */
9c065a7d
DV
1525void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
1526{
1527 intel_runtime_pm_get(dev_priv);
1528}
1529
e4e7684f 1530/**
ca2b1403 1531 * intel_aux_display_runtime_put - release an auxiliary power domain reference
e4e7684f
DV
1532 * @dev_priv: i915 device instance
1533 *
ca2b1403 1534 * This function drops the auxiliary power domain reference obtained by
e4e7684f
DV
1535 * intel_aux_display_runtime_get() and might power down the corresponding
1536 * hardware block right away if this is the last reference.
1537 */
9c065a7d
DV
1538void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
1539{
1540 intel_runtime_pm_put(dev_priv);
1541}
1542
e4e7684f
DV
1543/**
1544 * intel_runtime_pm_get - grab a runtime pm reference
1545 * @dev_priv: i915 device instance
1546 *
1547 * This function grabs a device-level runtime pm reference (mostly used for GEM
1548 * code to ensure the GTT or GT is on) and ensures that it is powered up.
1549 *
1550 * Any runtime pm reference obtained by this function must have a symmetric
1551 * call to intel_runtime_pm_put() to release the reference again.
1552 */
9c065a7d
DV
1553void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
1554{
1555 struct drm_device *dev = dev_priv->dev;
1556 struct device *device = &dev->pdev->dev;
1557
1558 if (!HAS_RUNTIME_PM(dev))
1559 return;
1560
1561 pm_runtime_get_sync(device);
1562 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
1563}
1564
e4e7684f
DV
1565/**
1566 * intel_runtime_pm_get_noresume - grab a runtime pm reference
1567 * @dev_priv: i915 device instance
1568 *
1569 * This function grabs a device-level runtime pm reference (mostly used for GEM
1570 * code to ensure the GTT or GT is on).
1571 *
1572 * It will _not_ power up the device but instead only check that it's powered
1573 * on. Therefore it is only valid to call this functions from contexts where
1574 * the device is known to be powered up and where trying to power it up would
1575 * result in hilarity and deadlocks. That pretty much means only the system
1576 * suspend/resume code where this is used to grab runtime pm references for
1577 * delayed setup down in work items.
1578 *
1579 * Any runtime pm reference obtained by this function must have a symmetric
1580 * call to intel_runtime_pm_put() to release the reference again.
1581 */
9c065a7d
DV
1582void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
1583{
1584 struct drm_device *dev = dev_priv->dev;
1585 struct device *device = &dev->pdev->dev;
1586
1587 if (!HAS_RUNTIME_PM(dev))
1588 return;
1589
1590 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
1591 pm_runtime_get_noresume(device);
1592}
1593
e4e7684f
DV
1594/**
1595 * intel_runtime_pm_put - release a runtime pm reference
1596 * @dev_priv: i915 device instance
1597 *
1598 * This function drops the device-level runtime pm reference obtained by
1599 * intel_runtime_pm_get() and might power down the corresponding
1600 * hardware block right away if this is the last reference.
1601 */
9c065a7d
DV
1602void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
1603{
1604 struct drm_device *dev = dev_priv->dev;
1605 struct device *device = &dev->pdev->dev;
1606
1607 if (!HAS_RUNTIME_PM(dev))
1608 return;
1609
1610 pm_runtime_mark_last_busy(device);
1611 pm_runtime_put_autosuspend(device);
1612}
1613
e4e7684f
DV
1614/**
1615 * intel_runtime_pm_enable - enable runtime pm
1616 * @dev_priv: i915 device instance
1617 *
1618 * This function enables runtime pm at the end of the driver load sequence.
1619 *
1620 * Note that this function does currently not enable runtime pm for the
1621 * subordinate display power domains. That is only done on the first modeset
1622 * using intel_display_set_init_power().
1623 */
f458ebbc 1624void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
9c065a7d
DV
1625{
1626 struct drm_device *dev = dev_priv->dev;
1627 struct device *device = &dev->pdev->dev;
1628
1629 if (!HAS_RUNTIME_PM(dev))
1630 return;
1631
1632 pm_runtime_set_active(device);
1633
1634 /*
1635 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
1636 * requirement.
1637 */
1638 if (!intel_enable_rc6(dev)) {
1639 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
1640 return;
1641 }
1642
1643 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
1644 pm_runtime_mark_last_busy(device);
1645 pm_runtime_use_autosuspend(device);
1646
1647 pm_runtime_put_autosuspend(device);
1648}
1649
This page took 0.122086 seconds and 5 git commands to generate.