2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Panel Self Refresh (PSR/SRD)
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
34 * Panel Self Refresh must be supported by both Hardware (source) and
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
56 #include "intel_drv.h"
59 static bool is_edp_psr(struct intel_dp
*intel_dp
)
61 return intel_dp
->psr_dpcd
[0] & DP_PSR_IS_SUPPORTED
;
64 bool intel_psr_is_enabled(struct drm_device
*dev
)
66 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
71 return I915_READ(EDP_PSR_CTL(dev
)) & EDP_PSR_ENABLE
;
74 static void intel_psr_write_vsc(struct intel_dp
*intel_dp
,
75 struct edp_vsc_psr
*vsc_psr
)
77 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
78 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
79 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
80 struct intel_crtc
*crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
81 u32 ctl_reg
= HSW_TVIDEO_DIP_CTL(crtc
->config
.cpu_transcoder
);
82 u32 data_reg
= HSW_TVIDEO_DIP_VSC_DATA(crtc
->config
.cpu_transcoder
);
83 uint32_t *data
= (uint32_t *) vsc_psr
;
86 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
87 the video DIP being updated before program video DIP data buffer
88 registers for DIP being updated. */
89 I915_WRITE(ctl_reg
, 0);
90 POSTING_READ(ctl_reg
);
92 for (i
= 0; i
< VIDEO_DIP_VSC_DATA_SIZE
; i
+= 4) {
93 if (i
< sizeof(struct edp_vsc_psr
))
94 I915_WRITE(data_reg
+ i
, *data
++);
96 I915_WRITE(data_reg
+ i
, 0);
99 I915_WRITE(ctl_reg
, VIDEO_DIP_ENABLE_VSC_HSW
);
100 POSTING_READ(ctl_reg
);
103 static void intel_psr_setup_vsc(struct intel_dp
*intel_dp
)
105 struct edp_vsc_psr psr_vsc
;
107 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
108 memset(&psr_vsc
, 0, sizeof(psr_vsc
));
109 psr_vsc
.sdp_header
.HB0
= 0;
110 psr_vsc
.sdp_header
.HB1
= 0x7;
111 psr_vsc
.sdp_header
.HB2
= 0x2;
112 psr_vsc
.sdp_header
.HB3
= 0x8;
113 intel_psr_write_vsc(intel_dp
, &psr_vsc
);
116 static void intel_psr_enable_sink(struct intel_dp
*intel_dp
)
118 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
119 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
120 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
121 uint32_t aux_clock_divider
;
123 bool only_standby
= false;
124 static const uint8_t aux_msg
[] = {
125 [0] = DP_AUX_NATIVE_WRITE
<< 4,
126 [1] = DP_SET_POWER
>> 8,
127 [2] = DP_SET_POWER
& 0xff,
129 [4] = DP_SET_POWER_D0
,
133 BUILD_BUG_ON(sizeof(aux_msg
) > 20);
135 aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, 0);
137 if (IS_BROADWELL(dev
) && dig_port
->port
!= PORT_A
)
140 /* Enable PSR in sink */
141 if (intel_dp
->psr_dpcd
[1] & DP_PSR_NO_TRAIN_ON_EXIT
|| only_standby
)
142 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
,
143 DP_PSR_ENABLE
& ~DP_PSR_MAIN_LINK_ACTIVE
);
145 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
,
146 DP_PSR_ENABLE
| DP_PSR_MAIN_LINK_ACTIVE
);
148 /* Setup AUX registers */
149 for (i
= 0; i
< sizeof(aux_msg
); i
+= 4)
150 I915_WRITE(EDP_PSR_AUX_DATA1(dev
) + i
,
151 intel_dp_pack_aux(&aux_msg
[i
], sizeof(aux_msg
) - i
));
153 I915_WRITE(EDP_PSR_AUX_CTL(dev
),
154 DP_AUX_CH_CTL_TIME_OUT_400us
|
155 (sizeof(aux_msg
) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
156 (precharge
<< DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT
) |
157 (aux_clock_divider
<< DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT
));
160 static void intel_psr_enable_source(struct intel_dp
*intel_dp
)
162 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
163 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
164 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
165 uint32_t max_sleep_time
= 0x1f;
166 uint32_t idle_frames
= 1;
168 const uint32_t link_entry_time
= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES
;
169 bool only_standby
= false;
171 if (IS_BROADWELL(dev
) && dig_port
->port
!= PORT_A
)
174 if (intel_dp
->psr_dpcd
[1] & DP_PSR_NO_TRAIN_ON_EXIT
|| only_standby
) {
175 val
|= EDP_PSR_LINK_STANDBY
;
176 val
|= EDP_PSR_TP2_TP3_TIME_0us
;
177 val
|= EDP_PSR_TP1_TIME_0us
;
178 val
|= EDP_PSR_SKIP_AUX_EXIT
;
179 val
|= IS_BROADWELL(dev
) ? BDW_PSR_SINGLE_FRAME
: 0;
181 val
|= EDP_PSR_LINK_DISABLE
;
183 I915_WRITE(EDP_PSR_CTL(dev
), val
|
184 (IS_BROADWELL(dev
) ? 0 : link_entry_time
) |
185 max_sleep_time
<< EDP_PSR_MAX_SLEEP_TIME_SHIFT
|
186 idle_frames
<< EDP_PSR_IDLE_FRAME_SHIFT
|
190 static bool intel_psr_match_conditions(struct intel_dp
*intel_dp
)
192 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
193 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
194 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
195 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
196 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
198 lockdep_assert_held(&dev_priv
->psr
.lock
);
199 WARN_ON(!drm_modeset_is_locked(&dev
->mode_config
.connection_mutex
));
200 WARN_ON(!drm_modeset_is_locked(&crtc
->mutex
));
202 dev_priv
->psr
.source_ok
= false;
204 if (IS_HASWELL(dev
) && dig_port
->port
!= PORT_A
) {
205 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
209 if (!i915
.enable_psr
) {
210 DRM_DEBUG_KMS("PSR disable by flag\n");
214 /* Below limitations aren't valid for Broadwell */
215 if (IS_BROADWELL(dev
))
218 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc
->config
.cpu_transcoder
)) &
220 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
224 if (intel_crtc
->config
.adjusted_mode
.flags
& DRM_MODE_FLAG_INTERLACE
) {
225 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
230 dev_priv
->psr
.source_ok
= true;
234 static void intel_psr_do_enable(struct intel_dp
*intel_dp
)
236 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
237 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
238 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
240 WARN_ON(I915_READ(EDP_PSR_CTL(dev
)) & EDP_PSR_ENABLE
);
241 WARN_ON(dev_priv
->psr
.active
);
242 lockdep_assert_held(&dev_priv
->psr
.lock
);
244 /* Enable/Re-enable PSR on the host */
245 intel_psr_enable_source(intel_dp
);
247 dev_priv
->psr
.active
= true;
251 * intel_psr_enable - Enable PSR
252 * @intel_dp: Intel DP
254 * This function can only be called after the pipe is fully trained and enabled.
256 void intel_psr_enable(struct intel_dp
*intel_dp
)
258 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
259 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
260 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
263 DRM_DEBUG_KMS("PSR not supported on this platform\n");
267 if (!is_edp_psr(intel_dp
)) {
268 DRM_DEBUG_KMS("PSR not supported by this panel\n");
272 mutex_lock(&dev_priv
->psr
.lock
);
273 if (dev_priv
->psr
.enabled
) {
274 DRM_DEBUG_KMS("PSR already in use\n");
278 if (!intel_psr_match_conditions(intel_dp
))
281 dev_priv
->psr
.busy_frontbuffer_bits
= 0;
283 intel_psr_setup_vsc(intel_dp
);
285 /* Avoid continuous PSR exit by masking memup and hpd */
286 I915_WRITE(EDP_PSR_DEBUG_CTL(dev
), EDP_PSR_DEBUG_MASK_MEMUP
|
287 EDP_PSR_DEBUG_MASK_HPD
| EDP_PSR_DEBUG_MASK_LPSP
);
289 /* Enable PSR on the panel */
290 intel_psr_enable_sink(intel_dp
);
292 dev_priv
->psr
.enabled
= intel_dp
;
294 mutex_unlock(&dev_priv
->psr
.lock
);
298 * intel_psr_disable - Disable PSR
299 * @intel_dp: Intel DP
301 * This function needs to be called before disabling pipe.
303 void intel_psr_disable(struct intel_dp
*intel_dp
)
305 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
306 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
307 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
309 mutex_lock(&dev_priv
->psr
.lock
);
310 if (!dev_priv
->psr
.enabled
) {
311 mutex_unlock(&dev_priv
->psr
.lock
);
315 if (dev_priv
->psr
.active
) {
316 I915_WRITE(EDP_PSR_CTL(dev
),
317 I915_READ(EDP_PSR_CTL(dev
)) & ~EDP_PSR_ENABLE
);
319 /* Wait till PSR is idle */
320 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev
)) &
321 EDP_PSR_STATUS_STATE_MASK
) == 0, 2000, 10))
322 DRM_ERROR("Timed out waiting for PSR Idle State\n");
324 dev_priv
->psr
.active
= false;
326 WARN_ON(I915_READ(EDP_PSR_CTL(dev
)) & EDP_PSR_ENABLE
);
329 dev_priv
->psr
.enabled
= NULL
;
330 mutex_unlock(&dev_priv
->psr
.lock
);
332 cancel_delayed_work_sync(&dev_priv
->psr
.work
);
335 static void intel_psr_work(struct work_struct
*work
)
337 struct drm_i915_private
*dev_priv
=
338 container_of(work
, typeof(*dev_priv
), psr
.work
.work
);
339 struct intel_dp
*intel_dp
= dev_priv
->psr
.enabled
;
341 /* We have to make sure PSR is ready for re-enable
342 * otherwise it keeps disabled until next full enable/disable cycle.
343 * PSR might take some time to get fully disabled
344 * and be ready for re-enable.
346 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv
->dev
)) &
347 EDP_PSR_STATUS_STATE_MASK
) == 0, 50)) {
348 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
352 mutex_lock(&dev_priv
->psr
.lock
);
353 intel_dp
= dev_priv
->psr
.enabled
;
359 * The delayed work can race with an invalidate hence we need to
360 * recheck. Since psr_flush first clears this and then reschedules we
361 * won't ever miss a flush when bailing out here.
363 if (dev_priv
->psr
.busy_frontbuffer_bits
)
366 intel_psr_do_enable(intel_dp
);
368 mutex_unlock(&dev_priv
->psr
.lock
);
371 static void intel_psr_exit(struct drm_device
*dev
)
373 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
375 if (dev_priv
->psr
.active
) {
376 u32 val
= I915_READ(EDP_PSR_CTL(dev
));
378 WARN_ON(!(val
& EDP_PSR_ENABLE
));
380 I915_WRITE(EDP_PSR_CTL(dev
), val
& ~EDP_PSR_ENABLE
);
382 dev_priv
->psr
.active
= false;
388 * intel_psr_invalidate - Invalidade PSR
390 * @frontbuffer_bits: frontbuffer plane tracking bits
392 * Since the hardware frontbuffer tracking has gaps we need to integrate
393 * with the software frontbuffer tracking. This function gets called every
394 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
395 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
397 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
399 void intel_psr_invalidate(struct drm_device
*dev
,
400 unsigned frontbuffer_bits
)
402 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
403 struct drm_crtc
*crtc
;
406 mutex_lock(&dev_priv
->psr
.lock
);
407 if (!dev_priv
->psr
.enabled
) {
408 mutex_unlock(&dev_priv
->psr
.lock
);
412 crtc
= dp_to_dig_port(dev_priv
->psr
.enabled
)->base
.base
.crtc
;
413 pipe
= to_intel_crtc(crtc
)->pipe
;
417 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
419 dev_priv
->psr
.busy_frontbuffer_bits
|= frontbuffer_bits
;
420 mutex_unlock(&dev_priv
->psr
.lock
);
424 * intel_psr_flush - Flush PSR
426 * @frontbuffer_bits: frontbuffer plane tracking bits
428 * Since the hardware frontbuffer tracking has gaps we need to integrate
429 * with the software frontbuffer tracking. This function gets called every
430 * time frontbuffer rendering has completed and flushed out to memory. PSR
431 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
433 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
435 void intel_psr_flush(struct drm_device
*dev
,
436 unsigned frontbuffer_bits
)
438 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
439 struct drm_crtc
*crtc
;
442 mutex_lock(&dev_priv
->psr
.lock
);
443 if (!dev_priv
->psr
.enabled
) {
444 mutex_unlock(&dev_priv
->psr
.lock
);
448 crtc
= dp_to_dig_port(dev_priv
->psr
.enabled
)->base
.base
.crtc
;
449 pipe
= to_intel_crtc(crtc
)->pipe
;
450 dev_priv
->psr
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
453 * On Haswell sprite plane updates don't result in a psr invalidating
454 * signal in the hardware. Which means we need to manually fake this in
455 * software for all flushes, not just when we've seen a preceding
456 * invalidation through frontbuffer rendering.
458 if (IS_HASWELL(dev
) &&
459 (frontbuffer_bits
& INTEL_FRONTBUFFER_SPRITE(pipe
)))
462 if (!dev_priv
->psr
.active
&& !dev_priv
->psr
.busy_frontbuffer_bits
)
463 schedule_delayed_work(&dev_priv
->psr
.work
,
464 msecs_to_jiffies(100));
465 mutex_unlock(&dev_priv
->psr
.lock
);
469 * intel_psr_init - Init basic PSR work and mutex.
472 * This function is called only once at driver load to initialize basic
475 void intel_psr_init(struct drm_device
*dev
)
477 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
479 INIT_DELAYED_WORK(&dev_priv
->psr
.work
, intel_psr_work
);
480 mutex_init(&dev_priv
->psr
.lock
);