2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Panel Self Refresh (PSR/SRD)
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
34 * Panel Self Refresh must be supported by both Hardware (source) and
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
56 #include "intel_drv.h"
59 static bool is_edp_psr(struct intel_dp
*intel_dp
)
61 return intel_dp
->psr_dpcd
[0] & DP_PSR_IS_SUPPORTED
;
64 static bool vlv_is_psr_active_on_pipe(struct drm_device
*dev
, int pipe
)
66 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
69 val
= I915_READ(VLV_PSRSTAT(pipe
)) &
70 VLV_EDP_PSR_CURR_STATE_MASK
;
71 return (val
== VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
72 (val
== VLV_EDP_PSR_ACTIVE_SF_UPDATE
);
75 static void intel_psr_write_vsc(struct intel_dp
*intel_dp
,
76 struct edp_vsc_psr
*vsc_psr
)
78 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
79 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
80 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
81 struct intel_crtc
*crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
82 u32 ctl_reg
= HSW_TVIDEO_DIP_CTL(crtc
->config
->cpu_transcoder
);
83 u32 data_reg
= HSW_TVIDEO_DIP_VSC_DATA(crtc
->config
->cpu_transcoder
);
84 uint32_t *data
= (uint32_t *) vsc_psr
;
87 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
88 the video DIP being updated before program video DIP data buffer
89 registers for DIP being updated. */
90 I915_WRITE(ctl_reg
, 0);
91 POSTING_READ(ctl_reg
);
93 for (i
= 0; i
< VIDEO_DIP_VSC_DATA_SIZE
; i
+= 4) {
94 if (i
< sizeof(struct edp_vsc_psr
))
95 I915_WRITE(data_reg
+ i
, *data
++);
97 I915_WRITE(data_reg
+ i
, 0);
100 I915_WRITE(ctl_reg
, VIDEO_DIP_ENABLE_VSC_HSW
);
101 POSTING_READ(ctl_reg
);
104 static void vlv_psr_setup_vsc(struct intel_dp
*intel_dp
)
106 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
107 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
108 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
109 struct drm_crtc
*crtc
= intel_dig_port
->base
.base
.crtc
;
110 enum pipe pipe
= to_intel_crtc(crtc
)->pipe
;
113 /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
114 val
= I915_READ(VLV_VSCSDP(pipe
));
115 val
&= ~VLV_EDP_PSR_SDP_FREQ_MASK
;
116 val
|= VLV_EDP_PSR_SDP_FREQ_EVFRAME
;
117 I915_WRITE(VLV_VSCSDP(pipe
), val
);
120 static void skl_psr_setup_su_vsc(struct intel_dp
*intel_dp
)
122 struct edp_vsc_psr psr_vsc
;
124 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
125 memset(&psr_vsc
, 0, sizeof(psr_vsc
));
126 psr_vsc
.sdp_header
.HB0
= 0;
127 psr_vsc
.sdp_header
.HB1
= 0x7;
128 psr_vsc
.sdp_header
.HB2
= 0x3;
129 psr_vsc
.sdp_header
.HB3
= 0xb;
130 intel_psr_write_vsc(intel_dp
, &psr_vsc
);
133 static void hsw_psr_setup_vsc(struct intel_dp
*intel_dp
)
135 struct edp_vsc_psr psr_vsc
;
137 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
138 memset(&psr_vsc
, 0, sizeof(psr_vsc
));
139 psr_vsc
.sdp_header
.HB0
= 0;
140 psr_vsc
.sdp_header
.HB1
= 0x7;
141 psr_vsc
.sdp_header
.HB2
= 0x2;
142 psr_vsc
.sdp_header
.HB3
= 0x8;
143 intel_psr_write_vsc(intel_dp
, &psr_vsc
);
146 static void vlv_psr_enable_sink(struct intel_dp
*intel_dp
)
148 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
,
149 DP_PSR_ENABLE
| DP_PSR_MAIN_LINK_ACTIVE
);
152 static void hsw_psr_enable_sink(struct intel_dp
*intel_dp
)
154 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
155 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
156 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
157 uint32_t aux_clock_divider
;
158 uint32_t aux_data_reg
, aux_ctl_reg
;
160 static const uint8_t aux_msg
[] = {
161 [0] = DP_AUX_NATIVE_WRITE
<< 4,
162 [1] = DP_SET_POWER
>> 8,
163 [2] = DP_SET_POWER
& 0xff,
165 [4] = DP_SET_POWER_D0
,
169 BUILD_BUG_ON(sizeof(aux_msg
) > 20);
171 aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, 0);
173 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
,
174 DP_PSR_ENABLE
& ~DP_PSR_MAIN_LINK_ACTIVE
);
176 /* Enable AUX frame sync at sink */
177 if (dev_priv
->psr
.aux_frame_sync
)
178 drm_dp_dpcd_writeb(&intel_dp
->aux
,
179 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF
,
180 DP_AUX_FRAME_SYNC_ENABLE
);
182 aux_data_reg
= (INTEL_INFO(dev
)->gen
>= 9) ?
183 DPA_AUX_CH_DATA1
: EDP_PSR_AUX_DATA1(dev
);
184 aux_ctl_reg
= (INTEL_INFO(dev
)->gen
>= 9) ?
185 DPA_AUX_CH_CTL
: EDP_PSR_AUX_CTL(dev
);
187 /* Setup AUX registers */
188 for (i
= 0; i
< sizeof(aux_msg
); i
+= 4)
189 I915_WRITE(aux_data_reg
+ i
,
190 intel_dp_pack_aux(&aux_msg
[i
], sizeof(aux_msg
) - i
));
192 if (INTEL_INFO(dev
)->gen
>= 9) {
195 val
= I915_READ(aux_ctl_reg
);
196 val
&= ~DP_AUX_CH_CTL_TIME_OUT_MASK
;
197 val
|= DP_AUX_CH_CTL_TIME_OUT_1600us
;
198 val
&= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK
;
199 val
|= (sizeof(aux_msg
) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
);
200 /* Use hardcoded data values for PSR, frame sync and GTC */
201 val
&= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL
;
202 val
&= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL
;
203 val
&= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL
;
204 I915_WRITE(aux_ctl_reg
, val
);
206 I915_WRITE(aux_ctl_reg
,
207 DP_AUX_CH_CTL_TIME_OUT_400us
|
208 (sizeof(aux_msg
) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
209 (precharge
<< DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT
) |
210 (aux_clock_divider
<< DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT
));
213 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
, DP_PSR_ENABLE
);
216 static void vlv_psr_enable_source(struct intel_dp
*intel_dp
)
218 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
219 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
220 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
221 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
222 enum pipe pipe
= to_intel_crtc(crtc
)->pipe
;
224 /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
225 I915_WRITE(VLV_PSRCTL(pipe
),
226 VLV_EDP_PSR_MODE_SW_TIMER
|
227 VLV_EDP_PSR_SRC_TRANSMITTER_STATE
|
231 static void vlv_psr_activate(struct intel_dp
*intel_dp
)
233 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
234 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
235 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
236 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
237 enum pipe pipe
= to_intel_crtc(crtc
)->pipe
;
239 /* Let's do the transition from PSR_state 1 to PSR_state 2
240 * that is PSR transition to active - static frame transmission.
241 * Then Hardware is responsible for the transition to PSR_state 3
242 * that is PSR active - no Remote Frame Buffer (RFB) update.
244 I915_WRITE(VLV_PSRCTL(pipe
), I915_READ(VLV_PSRCTL(pipe
)) |
245 VLV_EDP_PSR_ACTIVE_ENTRY
);
248 static void hsw_psr_enable_source(struct intel_dp
*intel_dp
)
250 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
251 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
252 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
254 uint32_t max_sleep_time
= 0x1f;
255 /* Lately it was identified that depending on panel idle frame count
256 * calculated at HW can be off by 1. So let's use what came
258 * There are also other cases where panel demands at least 4
259 * but VBT is not being set. To cover these 2 cases lets use
260 * at least 5 when VBT isn't set to be on the safest side.
262 uint32_t idle_frames
= dev_priv
->vbt
.psr
.idle_frames
?
263 dev_priv
->vbt
.psr
.idle_frames
+ 1 : 5;
265 const uint32_t link_entry_time
= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES
;
267 if (intel_dp
->psr_dpcd
[1] & DP_PSR_NO_TRAIN_ON_EXIT
) {
268 /* It doesn't mean we shouldn't send TPS patters, so let's
269 send the minimal TP1 possible and skip TP2. */
270 val
|= EDP_PSR_TP1_TIME_100us
;
271 val
|= EDP_PSR_TP2_TP3_TIME_0us
;
272 val
|= EDP_PSR_SKIP_AUX_EXIT
;
273 /* Sink should be able to train with the 5 or 6 idle patterns */
277 I915_WRITE(EDP_PSR_CTL(dev
), val
|
278 (IS_BROADWELL(dev
) ? 0 : link_entry_time
) |
279 max_sleep_time
<< EDP_PSR_MAX_SLEEP_TIME_SHIFT
|
280 idle_frames
<< EDP_PSR_IDLE_FRAME_SHIFT
|
283 if (dev_priv
->psr
.psr2_support
)
284 I915_WRITE(EDP_PSR2_CTL
, EDP_PSR2_ENABLE
|
285 EDP_SU_TRACK_ENABLE
| EDP_PSR2_TP2_TIME_100
);
288 static bool intel_psr_match_conditions(struct intel_dp
*intel_dp
)
290 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
291 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
292 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
293 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
294 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
296 lockdep_assert_held(&dev_priv
->psr
.lock
);
297 WARN_ON(!drm_modeset_is_locked(&dev
->mode_config
.connection_mutex
));
298 WARN_ON(!drm_modeset_is_locked(&crtc
->mutex
));
300 dev_priv
->psr
.source_ok
= false;
302 if (IS_HASWELL(dev
) && dig_port
->port
!= PORT_A
) {
303 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
307 if (!i915
.enable_psr
) {
308 DRM_DEBUG_KMS("PSR disable by flag\n");
312 if (IS_HASWELL(dev
) &&
313 I915_READ(HSW_STEREO_3D_CTL(intel_crtc
->config
->cpu_transcoder
)) &
315 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
319 if (IS_HASWELL(dev
) &&
320 intel_crtc
->config
->base
.adjusted_mode
.flags
& DRM_MODE_FLAG_INTERLACE
) {
321 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
325 if (!IS_VALLEYVIEW(dev
) && ((dev_priv
->vbt
.psr
.full_link
) ||
326 (dig_port
->port
!= PORT_A
))) {
327 DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
331 dev_priv
->psr
.source_ok
= true;
335 static void intel_psr_activate(struct intel_dp
*intel_dp
)
337 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
338 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
339 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
341 WARN_ON(I915_READ(EDP_PSR_CTL(dev
)) & EDP_PSR_ENABLE
);
342 WARN_ON(dev_priv
->psr
.active
);
343 lockdep_assert_held(&dev_priv
->psr
.lock
);
345 /* Enable/Re-enable PSR on the host */
347 /* On HSW+ after we enable PSR on source it will activate it
348 * as soon as it match configure idle_frame count. So
349 * we just actually enable it here on activation time.
351 hsw_psr_enable_source(intel_dp
);
353 vlv_psr_activate(intel_dp
);
355 dev_priv
->psr
.active
= true;
359 * intel_psr_enable - Enable PSR
360 * @intel_dp: Intel DP
362 * This function can only be called after the pipe is fully trained and enabled.
364 void intel_psr_enable(struct intel_dp
*intel_dp
)
366 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
367 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
368 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
369 struct intel_crtc
*crtc
= to_intel_crtc(intel_dig_port
->base
.base
.crtc
);
372 DRM_DEBUG_KMS("PSR not supported on this platform\n");
376 if (!is_edp_psr(intel_dp
)) {
377 DRM_DEBUG_KMS("PSR not supported by this panel\n");
381 mutex_lock(&dev_priv
->psr
.lock
);
382 if (dev_priv
->psr
.enabled
) {
383 DRM_DEBUG_KMS("PSR already in use\n");
387 if (!intel_psr_match_conditions(intel_dp
))
390 dev_priv
->psr
.busy_frontbuffer_bits
= 0;
393 hsw_psr_setup_vsc(intel_dp
);
395 if (dev_priv
->psr
.psr2_support
) {
396 /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
397 if (crtc
->config
->pipe_src_w
> 3200 ||
398 crtc
->config
->pipe_src_h
> 2000)
399 dev_priv
->psr
.psr2_support
= false;
401 skl_psr_setup_su_vsc(intel_dp
);
404 /* Avoid continuous PSR exit by masking memup and hpd */
405 I915_WRITE(EDP_PSR_DEBUG_CTL(dev
), EDP_PSR_DEBUG_MASK_MEMUP
|
406 EDP_PSR_DEBUG_MASK_HPD
);
408 /* Enable PSR on the panel */
409 hsw_psr_enable_sink(intel_dp
);
411 if (INTEL_INFO(dev
)->gen
>= 9)
412 intel_psr_activate(intel_dp
);
414 vlv_psr_setup_vsc(intel_dp
);
416 /* Enable PSR on the panel */
417 vlv_psr_enable_sink(intel_dp
);
419 /* On HSW+ enable_source also means go to PSR entry/active
420 * state as soon as idle_frame achieved and here would be
421 * to soon. However on VLV enable_source just enable PSR
422 * but let it on inactive state. So we might do this prior
423 * to active transition, i.e. here.
425 vlv_psr_enable_source(intel_dp
);
428 dev_priv
->psr
.enabled
= intel_dp
;
430 mutex_unlock(&dev_priv
->psr
.lock
);
433 static void vlv_psr_disable(struct intel_dp
*intel_dp
)
435 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
436 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
437 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
438 struct intel_crtc
*intel_crtc
=
439 to_intel_crtc(intel_dig_port
->base
.base
.crtc
);
442 if (dev_priv
->psr
.active
) {
443 /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
444 if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc
->pipe
)) &
445 VLV_EDP_PSR_IN_TRANS
) == 0, 1))
446 WARN(1, "PSR transition took longer than expected\n");
448 val
= I915_READ(VLV_PSRCTL(intel_crtc
->pipe
));
449 val
&= ~VLV_EDP_PSR_ACTIVE_ENTRY
;
450 val
&= ~VLV_EDP_PSR_ENABLE
;
451 val
&= ~VLV_EDP_PSR_MODE_MASK
;
452 I915_WRITE(VLV_PSRCTL(intel_crtc
->pipe
), val
);
454 dev_priv
->psr
.active
= false;
456 WARN_ON(vlv_is_psr_active_on_pipe(dev
, intel_crtc
->pipe
));
460 static void hsw_psr_disable(struct intel_dp
*intel_dp
)
462 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
463 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
464 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
466 if (dev_priv
->psr
.active
) {
467 I915_WRITE(EDP_PSR_CTL(dev
),
468 I915_READ(EDP_PSR_CTL(dev
)) & ~EDP_PSR_ENABLE
);
470 /* Wait till PSR is idle */
471 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev
)) &
472 EDP_PSR_STATUS_STATE_MASK
) == 0, 2000, 10))
473 DRM_ERROR("Timed out waiting for PSR Idle State\n");
475 dev_priv
->psr
.active
= false;
477 WARN_ON(I915_READ(EDP_PSR_CTL(dev
)) & EDP_PSR_ENABLE
);
482 * intel_psr_disable - Disable PSR
483 * @intel_dp: Intel DP
485 * This function needs to be called before disabling pipe.
487 void intel_psr_disable(struct intel_dp
*intel_dp
)
489 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
490 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
491 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
493 mutex_lock(&dev_priv
->psr
.lock
);
494 if (!dev_priv
->psr
.enabled
) {
495 mutex_unlock(&dev_priv
->psr
.lock
);
500 hsw_psr_disable(intel_dp
);
502 vlv_psr_disable(intel_dp
);
504 dev_priv
->psr
.enabled
= NULL
;
505 mutex_unlock(&dev_priv
->psr
.lock
);
507 cancel_delayed_work_sync(&dev_priv
->psr
.work
);
510 static void intel_psr_work(struct work_struct
*work
)
512 struct drm_i915_private
*dev_priv
=
513 container_of(work
, typeof(*dev_priv
), psr
.work
.work
);
514 struct intel_dp
*intel_dp
= dev_priv
->psr
.enabled
;
515 struct drm_crtc
*crtc
= dp_to_dig_port(intel_dp
)->base
.base
.crtc
;
516 enum pipe pipe
= to_intel_crtc(crtc
)->pipe
;
518 /* We have to make sure PSR is ready for re-enable
519 * otherwise it keeps disabled until next full enable/disable cycle.
520 * PSR might take some time to get fully disabled
521 * and be ready for re-enable.
523 if (HAS_DDI(dev_priv
->dev
)) {
524 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv
->dev
)) &
525 EDP_PSR_STATUS_STATE_MASK
) == 0, 50)) {
526 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
530 if (wait_for((I915_READ(VLV_PSRSTAT(pipe
)) &
531 VLV_EDP_PSR_IN_TRANS
) == 0, 1)) {
532 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
536 mutex_lock(&dev_priv
->psr
.lock
);
537 intel_dp
= dev_priv
->psr
.enabled
;
543 * The delayed work can race with an invalidate hence we need to
544 * recheck. Since psr_flush first clears this and then reschedules we
545 * won't ever miss a flush when bailing out here.
547 if (dev_priv
->psr
.busy_frontbuffer_bits
)
550 intel_psr_activate(intel_dp
);
552 mutex_unlock(&dev_priv
->psr
.lock
);
555 static void intel_psr_exit(struct drm_device
*dev
)
557 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
558 struct intel_dp
*intel_dp
= dev_priv
->psr
.enabled
;
559 struct drm_crtc
*crtc
= dp_to_dig_port(intel_dp
)->base
.base
.crtc
;
560 enum pipe pipe
= to_intel_crtc(crtc
)->pipe
;
563 if (!dev_priv
->psr
.active
)
567 val
= I915_READ(EDP_PSR_CTL(dev
));
569 WARN_ON(!(val
& EDP_PSR_ENABLE
));
571 I915_WRITE(EDP_PSR_CTL(dev
), val
& ~EDP_PSR_ENABLE
);
573 val
= I915_READ(VLV_PSRCTL(pipe
));
575 /* Here we do the transition from PSR_state 3 to PSR_state 5
576 * directly once PSR State 4 that is active with single frame
577 * update can be skipped. PSR_state 5 that is PSR exit then
578 * Hardware is responsible to transition back to PSR_state 1
579 * that is PSR inactive. Same state after
580 * vlv_edp_psr_enable_source.
582 val
&= ~VLV_EDP_PSR_ACTIVE_ENTRY
;
583 I915_WRITE(VLV_PSRCTL(pipe
), val
);
585 /* Send AUX wake up - Spec says after transitioning to PSR
586 * active we have to send AUX wake up by writing 01h in DPCD
587 * 600h of sink device.
588 * XXX: This might slow down the transition, but without this
589 * HW doesn't complete the transition to PSR_state 1 and we
590 * never get the screen updated.
592 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
596 dev_priv
->psr
.active
= false;
600 * intel_psr_single_frame_update - Single Frame Update
602 * @frontbuffer_bits: frontbuffer plane tracking bits
604 * Some platforms support a single frame update feature that is used to
605 * send and update only one frame on Remote Frame Buffer.
606 * So far it is only implemented for Valleyview and Cherryview because
607 * hardware requires this to be done before a page flip.
609 void intel_psr_single_frame_update(struct drm_device
*dev
,
610 unsigned frontbuffer_bits
)
612 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
613 struct drm_crtc
*crtc
;
618 * Single frame update is already supported on BDW+ but it requires
619 * many W/A and it isn't really needed.
621 if (!IS_VALLEYVIEW(dev
))
624 mutex_lock(&dev_priv
->psr
.lock
);
625 if (!dev_priv
->psr
.enabled
) {
626 mutex_unlock(&dev_priv
->psr
.lock
);
630 crtc
= dp_to_dig_port(dev_priv
->psr
.enabled
)->base
.base
.crtc
;
631 pipe
= to_intel_crtc(crtc
)->pipe
;
633 if (frontbuffer_bits
& INTEL_FRONTBUFFER_ALL_MASK(pipe
)) {
634 val
= I915_READ(VLV_PSRCTL(pipe
));
637 * We need to set this bit before writing registers for a flip.
638 * This bit will be self-clear when it gets to the PSR active state.
640 I915_WRITE(VLV_PSRCTL(pipe
), val
| VLV_EDP_PSR_SINGLE_FRAME_UPDATE
);
642 mutex_unlock(&dev_priv
->psr
.lock
);
646 * intel_psr_invalidate - Invalidade PSR
648 * @frontbuffer_bits: frontbuffer plane tracking bits
650 * Since the hardware frontbuffer tracking has gaps we need to integrate
651 * with the software frontbuffer tracking. This function gets called every
652 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
653 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
655 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
657 void intel_psr_invalidate(struct drm_device
*dev
,
658 unsigned frontbuffer_bits
)
660 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
661 struct drm_crtc
*crtc
;
664 mutex_lock(&dev_priv
->psr
.lock
);
665 if (!dev_priv
->psr
.enabled
) {
666 mutex_unlock(&dev_priv
->psr
.lock
);
670 crtc
= dp_to_dig_port(dev_priv
->psr
.enabled
)->base
.base
.crtc
;
671 pipe
= to_intel_crtc(crtc
)->pipe
;
673 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
674 dev_priv
->psr
.busy_frontbuffer_bits
|= frontbuffer_bits
;
676 if (frontbuffer_bits
)
679 mutex_unlock(&dev_priv
->psr
.lock
);
683 * intel_psr_flush - Flush PSR
685 * @frontbuffer_bits: frontbuffer plane tracking bits
686 * @origin: which operation caused the flush
688 * Since the hardware frontbuffer tracking has gaps we need to integrate
689 * with the software frontbuffer tracking. This function gets called every
690 * time frontbuffer rendering has completed and flushed out to memory. PSR
691 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
693 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
695 void intel_psr_flush(struct drm_device
*dev
,
696 unsigned frontbuffer_bits
, enum fb_op_origin origin
)
698 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
699 struct drm_crtc
*crtc
;
701 int delay_ms
= HAS_DDI(dev
) ? 100 : 500;
703 mutex_lock(&dev_priv
->psr
.lock
);
704 if (!dev_priv
->psr
.enabled
) {
705 mutex_unlock(&dev_priv
->psr
.lock
);
709 crtc
= dp_to_dig_port(dev_priv
->psr
.enabled
)->base
.base
.crtc
;
710 pipe
= to_intel_crtc(crtc
)->pipe
;
712 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
713 dev_priv
->psr
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
717 * By definition every flush should mean invalidate + flush,
718 * however on core platforms let's minimize the
719 * disable/re-enable so we can avoid the invalidate when flip
720 * originated the flush.
722 if (frontbuffer_bits
&& origin
!= ORIGIN_FLIP
)
726 * On Valleyview and Cherryview we don't use hardware tracking
727 * so any plane updates or cursor moves don't result in a PSR
728 * invalidating. Which means we need to manually fake this in
729 * software for all flushes.
731 if (frontbuffer_bits
)
735 if (!dev_priv
->psr
.active
&& !dev_priv
->psr
.busy_frontbuffer_bits
)
736 schedule_delayed_work(&dev_priv
->psr
.work
,
737 msecs_to_jiffies(delay_ms
));
738 mutex_unlock(&dev_priv
->psr
.lock
);
742 * intel_psr_init - Init basic PSR work and mutex.
745 * This function is called only once at driver load to initialize basic
748 void intel_psr_init(struct drm_device
*dev
)
750 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
752 INIT_DELAYED_WORK(&dev_priv
->psr
.work
, intel_psr_work
);
753 mutex_init(&dev_priv
->psr
.lock
);