Commit | Line | Data |
---|---|---|
7ff0ebcc RV |
1 | /* |
2 | * Copyright © 2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | */ | |
23 | ||
94b83957 RV |
24 | /** |
25 | * DOC: Frame Buffer Compression (FBC) | |
26 | * | |
27 | * FBC tries to save memory bandwidth (and so power consumption) by | |
28 | * compressing the amount of memory used by the display. It is total | |
29 | * transparent to user space and completely handled in the kernel. | |
7ff0ebcc RV |
30 | * |
31 | * The benefits of FBC are mostly visible with solid backgrounds and | |
94b83957 RV |
32 | * variation-less patterns. It comes from keeping the memory footprint small |
33 | * and having fewer memory pages opened and accessed for refreshing the display. | |
7ff0ebcc | 34 | * |
94b83957 RV |
35 | * i915 is responsible to reserve stolen memory for FBC and configure its |
36 | * offset on proper registers. The hardware takes care of all | |
37 | * compress/decompress. However there are many known cases where we have to | |
38 | * forcibly disable it to allow proper screen updates. | |
7ff0ebcc RV |
39 | */ |
40 | ||
94b83957 RV |
41 | #include "intel_drv.h" |
42 | #include "i915_drv.h" | |
43 | ||
9f218336 PZ |
44 | static inline bool fbc_supported(struct drm_i915_private *dev_priv) |
45 | { | |
8c40074c | 46 | return HAS_FBC(dev_priv); |
9f218336 PZ |
47 | } |
48 | ||
57105022 PZ |
49 | static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv) |
50 | { | |
51 | return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8; | |
52 | } | |
53 | ||
e6cd6dc1 PZ |
54 | static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv) |
55 | { | |
56 | return INTEL_INFO(dev_priv)->gen < 4; | |
57 | } | |
58 | ||
010cf73d PZ |
59 | static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv) |
60 | { | |
61 | return INTEL_INFO(dev_priv)->gen <= 3; | |
62 | } | |
63 | ||
2db3366b PZ |
64 | /* |
65 | * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the | |
66 | * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's | |
67 | * origin so the x and y offsets can actually fit the registers. As a | |
68 | * consequence, the fence doesn't really start exactly at the display plane | |
69 | * address we program because it starts at the real start of the buffer, so we | |
70 | * have to take this into consideration here. | |
71 | */ | |
72 | static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc) | |
73 | { | |
74 | return crtc->base.y - crtc->adjusted_y; | |
75 | } | |
76 | ||
c5ecd469 PZ |
77 | /* |
78 | * For SKL+, the plane source size used by the hardware is based on the value we | |
79 | * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value | |
80 | * we wrote to PIPESRC. | |
81 | */ | |
aaf78d27 | 82 | static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, |
c5ecd469 PZ |
83 | int *width, int *height) |
84 | { | |
c5ecd469 PZ |
85 | int w, h; |
86 | ||
aaf78d27 PZ |
87 | if (intel_rotation_90_or_270(cache->plane.rotation)) { |
88 | w = cache->plane.src_h; | |
89 | h = cache->plane.src_w; | |
c5ecd469 | 90 | } else { |
aaf78d27 PZ |
91 | w = cache->plane.src_w; |
92 | h = cache->plane.src_h; | |
c5ecd469 PZ |
93 | } |
94 | ||
95 | if (width) | |
96 | *width = w; | |
97 | if (height) | |
98 | *height = h; | |
99 | } | |
100 | ||
aaf78d27 PZ |
101 | static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, |
102 | struct intel_fbc_state_cache *cache) | |
c5ecd469 | 103 | { |
c5ecd469 PZ |
104 | int lines; |
105 | ||
aaf78d27 | 106 | intel_fbc_get_plane_source_size(cache, NULL, &lines); |
c5ecd469 PZ |
107 | if (INTEL_INFO(dev_priv)->gen >= 7) |
108 | lines = min(lines, 2048); | |
109 | ||
110 | /* Hardware needs the full buffer stride, not just the active area. */ | |
aaf78d27 | 111 | return lines * cache->fb.stride; |
c5ecd469 PZ |
112 | } |
113 | ||
0e631adc | 114 | static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) |
7ff0ebcc | 115 | { |
7ff0ebcc RV |
116 | u32 fbc_ctl; |
117 | ||
7ff0ebcc RV |
118 | /* Disable compression */ |
119 | fbc_ctl = I915_READ(FBC_CONTROL); | |
120 | if ((fbc_ctl & FBC_CTL_EN) == 0) | |
121 | return; | |
122 | ||
123 | fbc_ctl &= ~FBC_CTL_EN; | |
124 | I915_WRITE(FBC_CONTROL, fbc_ctl); | |
125 | ||
126 | /* Wait for compressing bit to clear */ | |
127 | if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { | |
128 | DRM_DEBUG_KMS("FBC idle timed out\n"); | |
129 | return; | |
130 | } | |
7ff0ebcc RV |
131 | } |
132 | ||
b183b3f1 | 133 | static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) |
7ff0ebcc | 134 | { |
b183b3f1 | 135 | struct intel_fbc_reg_params *params = &dev_priv->fbc.params; |
7ff0ebcc RV |
136 | int cfb_pitch; |
137 | int i; | |
138 | u32 fbc_ctl; | |
139 | ||
60ee5cd2 | 140 | /* Note: fbc.threshold == 1 for i8xx */ |
b183b3f1 PZ |
141 | cfb_pitch = params->cfb_size / FBC_LL_SIZE; |
142 | if (params->fb.stride < cfb_pitch) | |
143 | cfb_pitch = params->fb.stride; | |
7ff0ebcc RV |
144 | |
145 | /* FBC_CTL wants 32B or 64B units */ | |
7733b49b | 146 | if (IS_GEN2(dev_priv)) |
7ff0ebcc RV |
147 | cfb_pitch = (cfb_pitch / 32) - 1; |
148 | else | |
149 | cfb_pitch = (cfb_pitch / 64) - 1; | |
150 | ||
151 | /* Clear old tags */ | |
152 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) | |
4d110c71 | 153 | I915_WRITE(FBC_TAG(i), 0); |
7ff0ebcc | 154 | |
7733b49b | 155 | if (IS_GEN4(dev_priv)) { |
7ff0ebcc RV |
156 | u32 fbc_ctl2; |
157 | ||
158 | /* Set it up... */ | |
159 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; | |
b183b3f1 | 160 | fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane); |
7ff0ebcc | 161 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); |
b183b3f1 | 162 | I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset); |
7ff0ebcc RV |
163 | } |
164 | ||
165 | /* enable it... */ | |
166 | fbc_ctl = I915_READ(FBC_CONTROL); | |
167 | fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; | |
168 | fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; | |
7733b49b | 169 | if (IS_I945GM(dev_priv)) |
7ff0ebcc RV |
170 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
171 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | |
b183b3f1 | 172 | fbc_ctl |= params->fb.fence_reg; |
7ff0ebcc | 173 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
7ff0ebcc RV |
174 | } |
175 | ||
0e631adc | 176 | static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) |
7ff0ebcc | 177 | { |
7ff0ebcc RV |
178 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; |
179 | } | |
180 | ||
b183b3f1 | 181 | static void g4x_fbc_activate(struct drm_i915_private *dev_priv) |
7ff0ebcc | 182 | { |
b183b3f1 | 183 | struct intel_fbc_reg_params *params = &dev_priv->fbc.params; |
7ff0ebcc RV |
184 | u32 dpfc_ctl; |
185 | ||
b183b3f1 PZ |
186 | dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN; |
187 | if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) | |
7ff0ebcc RV |
188 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; |
189 | else | |
190 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | |
b183b3f1 | 191 | dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; |
7ff0ebcc | 192 | |
b183b3f1 | 193 | I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); |
7ff0ebcc RV |
194 | |
195 | /* enable it... */ | |
196 | I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | |
7ff0ebcc RV |
197 | } |
198 | ||
0e631adc | 199 | static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) |
7ff0ebcc | 200 | { |
7ff0ebcc RV |
201 | u32 dpfc_ctl; |
202 | ||
7ff0ebcc RV |
203 | /* Disable compression */ |
204 | dpfc_ctl = I915_READ(DPFC_CONTROL); | |
205 | if (dpfc_ctl & DPFC_CTL_EN) { | |
206 | dpfc_ctl &= ~DPFC_CTL_EN; | |
207 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | |
7ff0ebcc RV |
208 | } |
209 | } | |
210 | ||
0e631adc | 211 | static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) |
7ff0ebcc | 212 | { |
7ff0ebcc RV |
213 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; |
214 | } | |
215 | ||
d5ce4164 PZ |
216 | /* This function forces a CFB recompression through the nuke operation. */ |
217 | static void intel_fbc_recompress(struct drm_i915_private *dev_priv) | |
7ff0ebcc | 218 | { |
dbef0f15 PZ |
219 | I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE); |
220 | POSTING_READ(MSG_FBC_REND_STATE); | |
7ff0ebcc RV |
221 | } |
222 | ||
b183b3f1 | 223 | static void ilk_fbc_activate(struct drm_i915_private *dev_priv) |
7ff0ebcc | 224 | { |
b183b3f1 | 225 | struct intel_fbc_reg_params *params = &dev_priv->fbc.params; |
7ff0ebcc | 226 | u32 dpfc_ctl; |
ce65e47b | 227 | int threshold = dev_priv->fbc.threshold; |
7ff0ebcc | 228 | |
b183b3f1 PZ |
229 | dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane); |
230 | if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) | |
ce65e47b | 231 | threshold++; |
7ff0ebcc | 232 | |
ce65e47b | 233 | switch (threshold) { |
7ff0ebcc RV |
234 | case 4: |
235 | case 3: | |
236 | dpfc_ctl |= DPFC_CTL_LIMIT_4X; | |
237 | break; | |
238 | case 2: | |
239 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; | |
240 | break; | |
241 | case 1: | |
242 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | |
243 | break; | |
244 | } | |
245 | dpfc_ctl |= DPFC_CTL_FENCE_EN; | |
7733b49b | 246 | if (IS_GEN5(dev_priv)) |
b183b3f1 | 247 | dpfc_ctl |= params->fb.fence_reg; |
7ff0ebcc | 248 | |
b183b3f1 PZ |
249 | I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); |
250 | I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID); | |
7ff0ebcc RV |
251 | /* enable it... */ |
252 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | |
253 | ||
7733b49b | 254 | if (IS_GEN6(dev_priv)) { |
7ff0ebcc | 255 | I915_WRITE(SNB_DPFC_CTL_SA, |
b183b3f1 PZ |
256 | SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); |
257 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); | |
7ff0ebcc RV |
258 | } |
259 | ||
d5ce4164 | 260 | intel_fbc_recompress(dev_priv); |
7ff0ebcc RV |
261 | } |
262 | ||
0e631adc | 263 | static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) |
7ff0ebcc | 264 | { |
7ff0ebcc RV |
265 | u32 dpfc_ctl; |
266 | ||
7ff0ebcc RV |
267 | /* Disable compression */ |
268 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | |
269 | if (dpfc_ctl & DPFC_CTL_EN) { | |
270 | dpfc_ctl &= ~DPFC_CTL_EN; | |
271 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | |
7ff0ebcc RV |
272 | } |
273 | } | |
274 | ||
0e631adc | 275 | static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) |
7ff0ebcc | 276 | { |
7ff0ebcc RV |
277 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; |
278 | } | |
279 | ||
b183b3f1 | 280 | static void gen7_fbc_activate(struct drm_i915_private *dev_priv) |
7ff0ebcc | 281 | { |
b183b3f1 | 282 | struct intel_fbc_reg_params *params = &dev_priv->fbc.params; |
7ff0ebcc | 283 | u32 dpfc_ctl; |
ce65e47b | 284 | int threshold = dev_priv->fbc.threshold; |
7ff0ebcc | 285 | |
d8514d63 | 286 | dpfc_ctl = 0; |
7733b49b | 287 | if (IS_IVYBRIDGE(dev_priv)) |
b183b3f1 | 288 | dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane); |
d8514d63 | 289 | |
b183b3f1 | 290 | if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) |
ce65e47b | 291 | threshold++; |
7ff0ebcc | 292 | |
ce65e47b | 293 | switch (threshold) { |
7ff0ebcc RV |
294 | case 4: |
295 | case 3: | |
296 | dpfc_ctl |= DPFC_CTL_LIMIT_4X; | |
297 | break; | |
298 | case 2: | |
299 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; | |
300 | break; | |
301 | case 1: | |
302 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | |
303 | break; | |
304 | } | |
305 | ||
306 | dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; | |
307 | ||
308 | if (dev_priv->fbc.false_color) | |
309 | dpfc_ctl |= FBC_CTL_FALSE_COLOR; | |
310 | ||
7733b49b | 311 | if (IS_IVYBRIDGE(dev_priv)) { |
7ff0ebcc RV |
312 | /* WaFbcAsynchFlipDisableFbcQueue:ivb */ |
313 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | |
314 | I915_READ(ILK_DISPLAY_CHICKEN1) | | |
315 | ILK_FBCQ_DIS); | |
40f4022e | 316 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
7ff0ebcc | 317 | /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ |
b183b3f1 PZ |
318 | I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe), |
319 | I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) | | |
7ff0ebcc RV |
320 | HSW_FBCQ_DIS); |
321 | } | |
322 | ||
57012be9 PZ |
323 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
324 | ||
7ff0ebcc | 325 | I915_WRITE(SNB_DPFC_CTL_SA, |
b183b3f1 PZ |
326 | SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); |
327 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); | |
7ff0ebcc | 328 | |
d5ce4164 | 329 | intel_fbc_recompress(dev_priv); |
7ff0ebcc RV |
330 | } |
331 | ||
8c40074c PZ |
332 | static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) |
333 | { | |
334 | if (INTEL_INFO(dev_priv)->gen >= 5) | |
335 | return ilk_fbc_is_active(dev_priv); | |
336 | else if (IS_GM45(dev_priv)) | |
337 | return g4x_fbc_is_active(dev_priv); | |
338 | else | |
339 | return i8xx_fbc_is_active(dev_priv); | |
340 | } | |
341 | ||
342 | static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) | |
343 | { | |
5375ce9f PZ |
344 | struct intel_fbc *fbc = &dev_priv->fbc; |
345 | ||
346 | fbc->active = true; | |
347 | ||
8c40074c PZ |
348 | if (INTEL_INFO(dev_priv)->gen >= 7) |
349 | gen7_fbc_activate(dev_priv); | |
350 | else if (INTEL_INFO(dev_priv)->gen >= 5) | |
351 | ilk_fbc_activate(dev_priv); | |
352 | else if (IS_GM45(dev_priv)) | |
353 | g4x_fbc_activate(dev_priv); | |
354 | else | |
355 | i8xx_fbc_activate(dev_priv); | |
356 | } | |
357 | ||
358 | static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) | |
359 | { | |
5375ce9f PZ |
360 | struct intel_fbc *fbc = &dev_priv->fbc; |
361 | ||
362 | fbc->active = false; | |
363 | ||
8c40074c PZ |
364 | if (INTEL_INFO(dev_priv)->gen >= 5) |
365 | ilk_fbc_deactivate(dev_priv); | |
366 | else if (IS_GM45(dev_priv)) | |
367 | g4x_fbc_deactivate(dev_priv); | |
368 | else | |
369 | i8xx_fbc_deactivate(dev_priv); | |
370 | } | |
371 | ||
94b83957 | 372 | /** |
0e631adc | 373 | * intel_fbc_is_active - Is FBC active? |
7733b49b | 374 | * @dev_priv: i915 device instance |
94b83957 RV |
375 | * |
376 | * This function is used to verify the current state of FBC. | |
377 | * FIXME: This should be tracked in the plane config eventually | |
378 | * instead of queried at runtime for most callers. | |
379 | */ | |
0e631adc | 380 | bool intel_fbc_is_active(struct drm_i915_private *dev_priv) |
7ff0ebcc | 381 | { |
0e631adc | 382 | return dev_priv->fbc.active; |
7ff0ebcc RV |
383 | } |
384 | ||
7ff0ebcc RV |
385 | static void intel_fbc_work_fn(struct work_struct *__work) |
386 | { | |
128d7356 PZ |
387 | struct drm_i915_private *dev_priv = |
388 | container_of(__work, struct drm_i915_private, fbc.work.work); | |
ab34a7e8 PZ |
389 | struct intel_fbc *fbc = &dev_priv->fbc; |
390 | struct intel_fbc_work *work = &fbc->work; | |
391 | struct intel_crtc *crtc = fbc->crtc; | |
ca18d51d PZ |
392 | struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe]; |
393 | ||
394 | if (drm_crtc_vblank_get(&crtc->base)) { | |
395 | DRM_ERROR("vblank not available for FBC on pipe %c\n", | |
396 | pipe_name(crtc->pipe)); | |
397 | ||
ab34a7e8 | 398 | mutex_lock(&fbc->lock); |
ca18d51d | 399 | work->scheduled = false; |
ab34a7e8 | 400 | mutex_unlock(&fbc->lock); |
ca18d51d PZ |
401 | return; |
402 | } | |
128d7356 PZ |
403 | |
404 | retry: | |
405 | /* Delay the actual enabling to let pageflipping cease and the | |
406 | * display to settle before starting the compression. Note that | |
407 | * this delay also serves a second purpose: it allows for a | |
408 | * vblank to pass after disabling the FBC before we attempt | |
409 | * to modify the control registers. | |
410 | * | |
128d7356 | 411 | * WaFbcWaitForVBlankBeforeEnable:ilk,snb |
ca18d51d PZ |
412 | * |
413 | * It is also worth mentioning that since work->scheduled_vblank can be | |
414 | * updated multiple times by the other threads, hitting the timeout is | |
415 | * not an error condition. We'll just end up hitting the "goto retry" | |
416 | * case below. | |
128d7356 | 417 | */ |
ca18d51d PZ |
418 | wait_event_timeout(vblank->queue, |
419 | drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank, | |
420 | msecs_to_jiffies(50)); | |
7ff0ebcc | 421 | |
ab34a7e8 | 422 | mutex_lock(&fbc->lock); |
7ff0ebcc | 423 | |
128d7356 PZ |
424 | /* Were we cancelled? */ |
425 | if (!work->scheduled) | |
426 | goto out; | |
427 | ||
428 | /* Were we delayed again while this function was sleeping? */ | |
ca18d51d | 429 | if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) { |
ab34a7e8 | 430 | mutex_unlock(&fbc->lock); |
128d7356 | 431 | goto retry; |
7ff0ebcc | 432 | } |
7ff0ebcc | 433 | |
8c40074c | 434 | intel_fbc_hw_activate(dev_priv); |
128d7356 PZ |
435 | |
436 | work->scheduled = false; | |
437 | ||
438 | out: | |
ab34a7e8 | 439 | mutex_unlock(&fbc->lock); |
ca18d51d | 440 | drm_crtc_vblank_put(&crtc->base); |
7ff0ebcc RV |
441 | } |
442 | ||
0e631adc | 443 | static void intel_fbc_schedule_activation(struct intel_crtc *crtc) |
7ff0ebcc | 444 | { |
220285f2 | 445 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
ab34a7e8 PZ |
446 | struct intel_fbc *fbc = &dev_priv->fbc; |
447 | struct intel_fbc_work *work = &fbc->work; | |
7ff0ebcc | 448 | |
ab34a7e8 | 449 | WARN_ON(!mutex_is_locked(&fbc->lock)); |
25ad93fd | 450 | |
ca18d51d PZ |
451 | if (drm_crtc_vblank_get(&crtc->base)) { |
452 | DRM_ERROR("vblank not available for FBC on pipe %c\n", | |
453 | pipe_name(crtc->pipe)); | |
454 | return; | |
455 | } | |
456 | ||
e35be23f PZ |
457 | /* It is useless to call intel_fbc_cancel_work() or cancel_work() in |
458 | * this function since we're not releasing fbc.lock, so it won't have an | |
459 | * opportunity to grab it to discover that it was cancelled. So we just | |
460 | * update the expected jiffy count. */ | |
128d7356 | 461 | work->scheduled = true; |
ca18d51d PZ |
462 | work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base); |
463 | drm_crtc_vblank_put(&crtc->base); | |
7ff0ebcc | 464 | |
128d7356 | 465 | schedule_work(&work->work); |
7ff0ebcc RV |
466 | } |
467 | ||
60eb2cc7 | 468 | static void intel_fbc_deactivate(struct drm_i915_private *dev_priv) |
25ad93fd | 469 | { |
ab34a7e8 PZ |
470 | struct intel_fbc *fbc = &dev_priv->fbc; |
471 | ||
472 | WARN_ON(!mutex_is_locked(&fbc->lock)); | |
25ad93fd | 473 | |
e35be23f PZ |
474 | /* Calling cancel_work() here won't help due to the fact that the work |
475 | * function grabs fbc->lock. Just set scheduled to false so the work | |
476 | * function can know it was cancelled. */ | |
477 | fbc->work.scheduled = false; | |
25ad93fd | 478 | |
ab34a7e8 | 479 | if (fbc->active) |
8c40074c | 480 | intel_fbc_hw_deactivate(dev_priv); |
754d1133 PZ |
481 | } |
482 | ||
010cf73d | 483 | static bool multiple_pipes_ok(struct intel_crtc *crtc) |
232fd934 | 484 | { |
010cf73d PZ |
485 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
486 | struct drm_plane *primary = crtc->base.primary; | |
487 | struct intel_fbc *fbc = &dev_priv->fbc; | |
488 | enum pipe pipe = crtc->pipe; | |
232fd934 | 489 | |
010cf73d PZ |
490 | /* Don't even bother tracking anything we don't need. */ |
491 | if (!no_fbc_on_multiple_pipes(dev_priv)) | |
232fd934 PZ |
492 | return true; |
493 | ||
010cf73d | 494 | WARN_ON(!drm_modeset_is_locked(&primary->mutex)); |
232fd934 | 495 | |
010cf73d PZ |
496 | if (to_intel_plane_state(primary->state)->visible) |
497 | fbc->visible_pipes_mask |= (1 << pipe); | |
498 | else | |
499 | fbc->visible_pipes_mask &= ~(1 << pipe); | |
232fd934 | 500 | |
010cf73d | 501 | return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0; |
232fd934 PZ |
502 | } |
503 | ||
7733b49b | 504 | static int find_compression_threshold(struct drm_i915_private *dev_priv, |
fc786728 PZ |
505 | struct drm_mm_node *node, |
506 | int size, | |
507 | int fb_cpp) | |
508 | { | |
72e96d64 | 509 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
fc786728 PZ |
510 | int compression_threshold = 1; |
511 | int ret; | |
a9da512b PZ |
512 | u64 end; |
513 | ||
514 | /* The FBC hardware for BDW/SKL doesn't have access to the stolen | |
515 | * reserved range size, so it always assumes the maximum (8mb) is used. | |
516 | * If we enable FBC using a CFB on that memory range we'll get FIFO | |
517 | * underruns, even if that range is not reserved by the BIOS. */ | |
ef11bdb3 RV |
518 | if (IS_BROADWELL(dev_priv) || |
519 | IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | |
72e96d64 | 520 | end = ggtt->stolen_size - 8 * 1024 * 1024; |
a9da512b | 521 | else |
72e96d64 | 522 | end = ggtt->stolen_usable_size; |
fc786728 PZ |
523 | |
524 | /* HACK: This code depends on what we will do in *_enable_fbc. If that | |
525 | * code changes, this code needs to change as well. | |
526 | * | |
527 | * The enable_fbc code will attempt to use one of our 2 compression | |
528 | * thresholds, therefore, in that case, we only have 1 resort. | |
529 | */ | |
530 | ||
531 | /* Try to over-allocate to reduce reallocations and fragmentation. */ | |
a9da512b PZ |
532 | ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1, |
533 | 4096, 0, end); | |
fc786728 PZ |
534 | if (ret == 0) |
535 | return compression_threshold; | |
536 | ||
537 | again: | |
538 | /* HW's ability to limit the CFB is 1:4 */ | |
539 | if (compression_threshold > 4 || | |
540 | (fb_cpp == 2 && compression_threshold == 2)) | |
541 | return 0; | |
542 | ||
a9da512b PZ |
543 | ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1, |
544 | 4096, 0, end); | |
7733b49b | 545 | if (ret && INTEL_INFO(dev_priv)->gen <= 4) { |
fc786728 PZ |
546 | return 0; |
547 | } else if (ret) { | |
548 | compression_threshold <<= 1; | |
549 | goto again; | |
550 | } else { | |
551 | return compression_threshold; | |
552 | } | |
553 | } | |
554 | ||
c5ecd469 | 555 | static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) |
fc786728 | 556 | { |
c5ecd469 | 557 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
ab34a7e8 | 558 | struct intel_fbc *fbc = &dev_priv->fbc; |
fc786728 | 559 | struct drm_mm_node *uninitialized_var(compressed_llb); |
c5ecd469 PZ |
560 | int size, fb_cpp, ret; |
561 | ||
ab34a7e8 | 562 | WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb)); |
c5ecd469 | 563 | |
aaf78d27 PZ |
564 | size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache); |
565 | fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0); | |
fc786728 | 566 | |
ab34a7e8 | 567 | ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, |
fc786728 PZ |
568 | size, fb_cpp); |
569 | if (!ret) | |
570 | goto err_llb; | |
571 | else if (ret > 1) { | |
572 | DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); | |
573 | ||
574 | } | |
575 | ||
ab34a7e8 | 576 | fbc->threshold = ret; |
fc786728 PZ |
577 | |
578 | if (INTEL_INFO(dev_priv)->gen >= 5) | |
ab34a7e8 | 579 | I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start); |
7733b49b | 580 | else if (IS_GM45(dev_priv)) { |
ab34a7e8 | 581 | I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start); |
fc786728 PZ |
582 | } else { |
583 | compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); | |
584 | if (!compressed_llb) | |
585 | goto err_fb; | |
586 | ||
587 | ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb, | |
588 | 4096, 4096); | |
589 | if (ret) | |
590 | goto err_fb; | |
591 | ||
ab34a7e8 | 592 | fbc->compressed_llb = compressed_llb; |
fc786728 PZ |
593 | |
594 | I915_WRITE(FBC_CFB_BASE, | |
ab34a7e8 | 595 | dev_priv->mm.stolen_base + fbc->compressed_fb.start); |
fc786728 PZ |
596 | I915_WRITE(FBC_LL_BASE, |
597 | dev_priv->mm.stolen_base + compressed_llb->start); | |
598 | } | |
599 | ||
b8bf5d7f | 600 | DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", |
ab34a7e8 | 601 | fbc->compressed_fb.size, fbc->threshold); |
fc786728 PZ |
602 | |
603 | return 0; | |
604 | ||
605 | err_fb: | |
606 | kfree(compressed_llb); | |
ab34a7e8 | 607 | i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); |
fc786728 PZ |
608 | err_llb: |
609 | pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); | |
610 | return -ENOSPC; | |
611 | } | |
612 | ||
7733b49b | 613 | static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) |
fc786728 | 614 | { |
ab34a7e8 PZ |
615 | struct intel_fbc *fbc = &dev_priv->fbc; |
616 | ||
617 | if (drm_mm_node_allocated(&fbc->compressed_fb)) | |
618 | i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); | |
619 | ||
620 | if (fbc->compressed_llb) { | |
621 | i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); | |
622 | kfree(fbc->compressed_llb); | |
fc786728 | 623 | } |
fc786728 PZ |
624 | } |
625 | ||
7733b49b | 626 | void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) |
25ad93fd | 627 | { |
ab34a7e8 PZ |
628 | struct intel_fbc *fbc = &dev_priv->fbc; |
629 | ||
9f218336 | 630 | if (!fbc_supported(dev_priv)) |
0bf73c36 PZ |
631 | return; |
632 | ||
ab34a7e8 | 633 | mutex_lock(&fbc->lock); |
7733b49b | 634 | __intel_fbc_cleanup_cfb(dev_priv); |
ab34a7e8 | 635 | mutex_unlock(&fbc->lock); |
25ad93fd PZ |
636 | } |
637 | ||
adf70c65 PZ |
638 | static bool stride_is_valid(struct drm_i915_private *dev_priv, |
639 | unsigned int stride) | |
640 | { | |
641 | /* These should have been caught earlier. */ | |
642 | WARN_ON(stride < 512); | |
643 | WARN_ON((stride & (64 - 1)) != 0); | |
644 | ||
645 | /* Below are the additional FBC restrictions. */ | |
646 | ||
647 | if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv)) | |
648 | return stride == 4096 || stride == 8192; | |
649 | ||
650 | if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048) | |
651 | return false; | |
652 | ||
653 | if (stride > 16384) | |
654 | return false; | |
655 | ||
656 | return true; | |
657 | } | |
658 | ||
aaf78d27 PZ |
659 | static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, |
660 | uint32_t pixel_format) | |
b9e831dc | 661 | { |
aaf78d27 | 662 | switch (pixel_format) { |
b9e831dc PZ |
663 | case DRM_FORMAT_XRGB8888: |
664 | case DRM_FORMAT_XBGR8888: | |
665 | return true; | |
666 | case DRM_FORMAT_XRGB1555: | |
667 | case DRM_FORMAT_RGB565: | |
668 | /* 16bpp not supported on gen2 */ | |
aaf78d27 | 669 | if (IS_GEN2(dev_priv)) |
b9e831dc PZ |
670 | return false; |
671 | /* WaFbcOnly1to1Ratio:ctg */ | |
672 | if (IS_G4X(dev_priv)) | |
673 | return false; | |
674 | return true; | |
675 | default: | |
676 | return false; | |
677 | } | |
678 | } | |
679 | ||
856312ae PZ |
680 | /* |
681 | * For some reason, the hardware tracking starts looking at whatever we | |
682 | * programmed as the display plane base address register. It does not look at | |
683 | * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y} | |
684 | * variables instead of just looking at the pipe/plane size. | |
685 | */ | |
686 | static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) | |
3c5f174e PZ |
687 | { |
688 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | |
aaf78d27 | 689 | struct intel_fbc *fbc = &dev_priv->fbc; |
856312ae | 690 | unsigned int effective_w, effective_h, max_w, max_h; |
3c5f174e PZ |
691 | |
692 | if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) { | |
693 | max_w = 4096; | |
694 | max_h = 4096; | |
695 | } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { | |
696 | max_w = 4096; | |
697 | max_h = 2048; | |
698 | } else { | |
699 | max_w = 2048; | |
700 | max_h = 1536; | |
701 | } | |
702 | ||
aaf78d27 PZ |
703 | intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, |
704 | &effective_h); | |
856312ae PZ |
705 | effective_w += crtc->adjusted_x; |
706 | effective_h += crtc->adjusted_y; | |
707 | ||
708 | return effective_w <= max_w && effective_h <= max_h; | |
3c5f174e PZ |
709 | } |
710 | ||
aaf78d27 | 711 | static void intel_fbc_update_state_cache(struct intel_crtc *crtc) |
7ff0ebcc | 712 | { |
754d1133 | 713 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
ab34a7e8 | 714 | struct intel_fbc *fbc = &dev_priv->fbc; |
aaf78d27 | 715 | struct intel_fbc_state_cache *cache = &fbc->state_cache; |
1eb52238 PZ |
716 | struct intel_crtc_state *crtc_state = |
717 | to_intel_crtc_state(crtc->base.state); | |
aaf78d27 PZ |
718 | struct intel_plane_state *plane_state = |
719 | to_intel_plane_state(crtc->base.primary->state); | |
720 | struct drm_framebuffer *fb = plane_state->base.fb; | |
7ff0ebcc | 721 | struct drm_i915_gem_object *obj; |
7ff0ebcc | 722 | |
1eb52238 PZ |
723 | WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex)); |
724 | WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex)); | |
725 | ||
aaf78d27 PZ |
726 | cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; |
727 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | |
728 | cache->crtc.hsw_bdw_pixel_rate = | |
729 | ilk_pipe_pixel_rate(crtc_state); | |
730 | ||
731 | cache->plane.rotation = plane_state->base.rotation; | |
732 | cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16; | |
733 | cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16; | |
734 | cache->plane.visible = plane_state->visible; | |
735 | ||
736 | if (!cache->plane.visible) | |
737 | return; | |
7ff0ebcc | 738 | |
7ff0ebcc | 739 | obj = intel_fb_obj(fb); |
615b40d7 | 740 | |
aaf78d27 PZ |
741 | /* FIXME: We lack the proper locking here, so only run this on the |
742 | * platforms that need. */ | |
8c40074c | 743 | if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7) |
aaf78d27 | 744 | cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj); |
aaf78d27 PZ |
745 | cache->fb.pixel_format = fb->pixel_format; |
746 | cache->fb.stride = fb->pitches[0]; | |
747 | cache->fb.fence_reg = obj->fence_reg; | |
748 | cache->fb.tiling_mode = obj->tiling_mode; | |
749 | } | |
750 | ||
751 | static bool intel_fbc_can_activate(struct intel_crtc *crtc) | |
752 | { | |
753 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | |
754 | struct intel_fbc *fbc = &dev_priv->fbc; | |
755 | struct intel_fbc_state_cache *cache = &fbc->state_cache; | |
756 | ||
757 | if (!cache->plane.visible) { | |
913a3a6a | 758 | fbc->no_fbc_reason = "primary plane not visible"; |
615b40d7 PZ |
759 | return false; |
760 | } | |
7ff0ebcc | 761 | |
aaf78d27 PZ |
762 | if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) || |
763 | (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) { | |
913a3a6a | 764 | fbc->no_fbc_reason = "incompatible mode"; |
615b40d7 | 765 | return false; |
7ff0ebcc RV |
766 | } |
767 | ||
45b32a29 | 768 | if (!intel_fbc_hw_tracking_covers_screen(crtc)) { |
913a3a6a | 769 | fbc->no_fbc_reason = "mode too large for compression"; |
615b40d7 | 770 | return false; |
7ff0ebcc | 771 | } |
3c5f174e | 772 | |
7ff0ebcc RV |
773 | /* The use of a CPU fence is mandatory in order to detect writes |
774 | * by the CPU to the scanout and trigger updates to the FBC. | |
775 | */ | |
aaf78d27 PZ |
776 | if (cache->fb.tiling_mode != I915_TILING_X || |
777 | cache->fb.fence_reg == I915_FENCE_REG_NONE) { | |
913a3a6a | 778 | fbc->no_fbc_reason = "framebuffer not tiled or fenced"; |
615b40d7 | 779 | return false; |
7ff0ebcc | 780 | } |
7733b49b | 781 | if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) && |
aaf78d27 | 782 | cache->plane.rotation != BIT(DRM_ROTATE_0)) { |
913a3a6a | 783 | fbc->no_fbc_reason = "rotation unsupported"; |
615b40d7 | 784 | return false; |
7ff0ebcc RV |
785 | } |
786 | ||
aaf78d27 | 787 | if (!stride_is_valid(dev_priv, cache->fb.stride)) { |
913a3a6a | 788 | fbc->no_fbc_reason = "framebuffer stride not supported"; |
615b40d7 | 789 | return false; |
adf70c65 PZ |
790 | } |
791 | ||
aaf78d27 | 792 | if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) { |
913a3a6a | 793 | fbc->no_fbc_reason = "pixel format is invalid"; |
615b40d7 | 794 | return false; |
b9e831dc PZ |
795 | } |
796 | ||
7b24c9a6 PZ |
797 | /* WaFbcExceedCdClockThreshold:hsw,bdw */ |
798 | if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && | |
aaf78d27 | 799 | cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) { |
913a3a6a | 800 | fbc->no_fbc_reason = "pixel rate is too big"; |
615b40d7 | 801 | return false; |
7b24c9a6 PZ |
802 | } |
803 | ||
c5ecd469 PZ |
804 | /* It is possible for the required CFB size change without a |
805 | * crtc->disable + crtc->enable since it is possible to change the | |
806 | * stride without triggering a full modeset. Since we try to | |
807 | * over-allocate the CFB, there's a chance we may keep FBC enabled even | |
808 | * if this happens, but if we exceed the current CFB size we'll have to | |
809 | * disable FBC. Notice that it would be possible to disable FBC, wait | |
810 | * for a frame, free the stolen node, then try to reenable FBC in case | |
811 | * we didn't get any invalidate/deactivate calls, but this would require | |
812 | * a lot of tracking just for a specific case. If we conclude it's an | |
813 | * important case, we can implement it later. */ | |
aaf78d27 | 814 | if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > |
ab34a7e8 | 815 | fbc->compressed_fb.size * fbc->threshold) { |
913a3a6a | 816 | fbc->no_fbc_reason = "CFB requirements changed"; |
615b40d7 PZ |
817 | return false; |
818 | } | |
819 | ||
820 | return true; | |
821 | } | |
822 | ||
f51be2e0 | 823 | static bool intel_fbc_can_choose(struct intel_crtc *crtc) |
44a8a257 PZ |
824 | { |
825 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | |
913a3a6a | 826 | struct intel_fbc *fbc = &dev_priv->fbc; |
a98ee793 PZ |
827 | bool enable_by_default = IS_HASWELL(dev_priv) || |
828 | IS_BROADWELL(dev_priv); | |
44a8a257 PZ |
829 | |
830 | if (intel_vgpu_active(dev_priv->dev)) { | |
913a3a6a | 831 | fbc->no_fbc_reason = "VGPU is active"; |
44a8a257 PZ |
832 | return false; |
833 | } | |
834 | ||
a98ee793 | 835 | if (i915.enable_fbc < 0 && !enable_by_default) { |
913a3a6a | 836 | fbc->no_fbc_reason = "disabled per chip default"; |
44a8a257 PZ |
837 | return false; |
838 | } | |
839 | ||
840 | if (!i915.enable_fbc) { | |
913a3a6a | 841 | fbc->no_fbc_reason = "disabled per module param"; |
44a8a257 PZ |
842 | return false; |
843 | } | |
844 | ||
e35be23f | 845 | if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) { |
913a3a6a | 846 | fbc->no_fbc_reason = "no enabled pipes can have FBC"; |
44a8a257 PZ |
847 | return false; |
848 | } | |
849 | ||
e35be23f PZ |
850 | if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) { |
851 | fbc->no_fbc_reason = "no enabled planes can have FBC"; | |
852 | return false; | |
853 | } | |
854 | ||
44a8a257 PZ |
855 | return true; |
856 | } | |
857 | ||
b183b3f1 PZ |
858 | static void intel_fbc_get_reg_params(struct intel_crtc *crtc, |
859 | struct intel_fbc_reg_params *params) | |
860 | { | |
861 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | |
aaf78d27 PZ |
862 | struct intel_fbc *fbc = &dev_priv->fbc; |
863 | struct intel_fbc_state_cache *cache = &fbc->state_cache; | |
b183b3f1 PZ |
864 | |
865 | /* Since all our fields are integer types, use memset here so the | |
866 | * comparison function can rely on memcmp because the padding will be | |
867 | * zero. */ | |
868 | memset(params, 0, sizeof(*params)); | |
869 | ||
870 | params->crtc.pipe = crtc->pipe; | |
871 | params->crtc.plane = crtc->plane; | |
872 | params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); | |
873 | ||
aaf78d27 PZ |
874 | params->fb.pixel_format = cache->fb.pixel_format; |
875 | params->fb.stride = cache->fb.stride; | |
876 | params->fb.fence_reg = cache->fb.fence_reg; | |
b183b3f1 | 877 | |
aaf78d27 | 878 | params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); |
b183b3f1 | 879 | |
aaf78d27 | 880 | params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset; |
b183b3f1 PZ |
881 | } |
882 | ||
883 | static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, | |
884 | struct intel_fbc_reg_params *params2) | |
885 | { | |
886 | /* We can use this since intel_fbc_get_reg_params() does a memset. */ | |
887 | return memcmp(params1, params2, sizeof(*params1)) == 0; | |
888 | } | |
889 | ||
1eb52238 | 890 | void intel_fbc_pre_update(struct intel_crtc *crtc) |
615b40d7 PZ |
891 | { |
892 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | |
ab34a7e8 | 893 | struct intel_fbc *fbc = &dev_priv->fbc; |
615b40d7 | 894 | |
1eb52238 PZ |
895 | if (!fbc_supported(dev_priv)) |
896 | return; | |
897 | ||
898 | mutex_lock(&fbc->lock); | |
615b40d7 | 899 | |
010cf73d | 900 | if (!multiple_pipes_ok(crtc)) { |
913a3a6a | 901 | fbc->no_fbc_reason = "more than one pipe active"; |
212890cf | 902 | goto deactivate; |
7ff0ebcc RV |
903 | } |
904 | ||
ab34a7e8 | 905 | if (!fbc->enabled || fbc->crtc != crtc) |
1eb52238 | 906 | goto unlock; |
615b40d7 | 907 | |
aaf78d27 PZ |
908 | intel_fbc_update_state_cache(crtc); |
909 | ||
212890cf | 910 | deactivate: |
60eb2cc7 | 911 | intel_fbc_deactivate(dev_priv); |
1eb52238 PZ |
912 | unlock: |
913 | mutex_unlock(&fbc->lock); | |
212890cf PZ |
914 | } |
915 | ||
1eb52238 | 916 | static void __intel_fbc_post_update(struct intel_crtc *crtc) |
212890cf PZ |
917 | { |
918 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | |
919 | struct intel_fbc *fbc = &dev_priv->fbc; | |
920 | struct intel_fbc_reg_params old_params; | |
921 | ||
922 | WARN_ON(!mutex_is_locked(&fbc->lock)); | |
923 | ||
924 | if (!fbc->enabled || fbc->crtc != crtc) | |
925 | return; | |
926 | ||
927 | if (!intel_fbc_can_activate(crtc)) { | |
928 | WARN_ON(fbc->active); | |
929 | return; | |
930 | } | |
615b40d7 | 931 | |
ab34a7e8 PZ |
932 | old_params = fbc->params; |
933 | intel_fbc_get_reg_params(crtc, &fbc->params); | |
b183b3f1 | 934 | |
7ff0ebcc RV |
935 | /* If the scanout has not changed, don't modify the FBC settings. |
936 | * Note that we make the fundamental assumption that the fb->obj | |
937 | * cannot be unpinned (and have its GTT offset and fence revoked) | |
938 | * without first being decoupled from the scanout and FBC disabled. | |
939 | */ | |
ab34a7e8 PZ |
940 | if (fbc->active && |
941 | intel_fbc_reg_params_equal(&old_params, &fbc->params)) | |
7ff0ebcc RV |
942 | return; |
943 | ||
60eb2cc7 | 944 | intel_fbc_deactivate(dev_priv); |
0e631adc | 945 | intel_fbc_schedule_activation(crtc); |
212890cf | 946 | fbc->no_fbc_reason = "FBC enabled (active or scheduled)"; |
25ad93fd PZ |
947 | } |
948 | ||
1eb52238 | 949 | void intel_fbc_post_update(struct intel_crtc *crtc) |
25ad93fd | 950 | { |
754d1133 | 951 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
ab34a7e8 | 952 | struct intel_fbc *fbc = &dev_priv->fbc; |
754d1133 | 953 | |
9f218336 | 954 | if (!fbc_supported(dev_priv)) |
0bf73c36 PZ |
955 | return; |
956 | ||
ab34a7e8 | 957 | mutex_lock(&fbc->lock); |
1eb52238 | 958 | __intel_fbc_post_update(crtc); |
ab34a7e8 | 959 | mutex_unlock(&fbc->lock); |
7ff0ebcc RV |
960 | } |
961 | ||
261fe99a PZ |
962 | static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) |
963 | { | |
964 | if (fbc->enabled) | |
965 | return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; | |
966 | else | |
967 | return fbc->possible_framebuffer_bits; | |
968 | } | |
969 | ||
dbef0f15 PZ |
970 | void intel_fbc_invalidate(struct drm_i915_private *dev_priv, |
971 | unsigned int frontbuffer_bits, | |
972 | enum fb_op_origin origin) | |
973 | { | |
ab34a7e8 | 974 | struct intel_fbc *fbc = &dev_priv->fbc; |
dbef0f15 | 975 | |
9f218336 | 976 | if (!fbc_supported(dev_priv)) |
0bf73c36 PZ |
977 | return; |
978 | ||
0dd81544 | 979 | if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) |
dbef0f15 PZ |
980 | return; |
981 | ||
ab34a7e8 | 982 | mutex_lock(&fbc->lock); |
25ad93fd | 983 | |
261fe99a | 984 | fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; |
dbef0f15 | 985 | |
5bc40472 | 986 | if (fbc->enabled && fbc->busy_bits) |
60eb2cc7 | 987 | intel_fbc_deactivate(dev_priv); |
25ad93fd | 988 | |
ab34a7e8 | 989 | mutex_unlock(&fbc->lock); |
dbef0f15 PZ |
990 | } |
991 | ||
992 | void intel_fbc_flush(struct drm_i915_private *dev_priv, | |
6f4551fe | 993 | unsigned int frontbuffer_bits, enum fb_op_origin origin) |
dbef0f15 | 994 | { |
ab34a7e8 PZ |
995 | struct intel_fbc *fbc = &dev_priv->fbc; |
996 | ||
9f218336 | 997 | if (!fbc_supported(dev_priv)) |
0bf73c36 PZ |
998 | return; |
999 | ||
0dd81544 | 1000 | if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) |
6f4551fe | 1001 | return; |
25ad93fd | 1002 | |
ab34a7e8 | 1003 | mutex_lock(&fbc->lock); |
dbef0f15 | 1004 | |
ab34a7e8 | 1005 | fbc->busy_bits &= ~frontbuffer_bits; |
dbef0f15 | 1006 | |
261fe99a PZ |
1007 | if (!fbc->busy_bits && fbc->enabled && |
1008 | (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { | |
0dd81544 | 1009 | if (fbc->active) |
ee7d6cfa | 1010 | intel_fbc_recompress(dev_priv); |
0dd81544 | 1011 | else |
1eb52238 | 1012 | __intel_fbc_post_update(fbc->crtc); |
6f4551fe | 1013 | } |
25ad93fd | 1014 | |
ab34a7e8 | 1015 | mutex_unlock(&fbc->lock); |
dbef0f15 PZ |
1016 | } |
1017 | ||
f51be2e0 PZ |
1018 | /** |
1019 | * intel_fbc_choose_crtc - select a CRTC to enable FBC on | |
1020 | * @dev_priv: i915 device instance | |
1021 | * @state: the atomic state structure | |
1022 | * | |
1023 | * This function looks at the proposed state for CRTCs and planes, then chooses | |
1024 | * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to | |
1025 | * true. | |
1026 | * | |
1027 | * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe | |
1028 | * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. | |
1029 | */ | |
1030 | void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, | |
1031 | struct drm_atomic_state *state) | |
1032 | { | |
1033 | struct intel_fbc *fbc = &dev_priv->fbc; | |
1034 | struct drm_crtc *crtc; | |
1035 | struct drm_crtc_state *crtc_state; | |
1036 | struct drm_plane *plane; | |
1037 | struct drm_plane_state *plane_state; | |
1038 | bool fbc_crtc_present = false; | |
1039 | int i, j; | |
1040 | ||
1041 | mutex_lock(&fbc->lock); | |
1042 | ||
1043 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | |
1044 | if (fbc->crtc == to_intel_crtc(crtc)) { | |
1045 | fbc_crtc_present = true; | |
1046 | break; | |
1047 | } | |
1048 | } | |
1049 | /* This atomic commit doesn't involve the CRTC currently tied to FBC. */ | |
1050 | if (!fbc_crtc_present && fbc->crtc != NULL) | |
1051 | goto out; | |
1052 | ||
1053 | /* Simply choose the first CRTC that is compatible and has a visible | |
1054 | * plane. We could go for fancier schemes such as checking the plane | |
1055 | * size, but this would just affect the few platforms that don't tie FBC | |
1056 | * to pipe or plane A. */ | |
1057 | for_each_plane_in_state(state, plane, plane_state, i) { | |
1058 | struct intel_plane_state *intel_plane_state = | |
1059 | to_intel_plane_state(plane_state); | |
1060 | ||
1061 | if (!intel_plane_state->visible) | |
1062 | continue; | |
1063 | ||
1064 | for_each_crtc_in_state(state, crtc, crtc_state, j) { | |
1065 | struct intel_crtc_state *intel_crtc_state = | |
1066 | to_intel_crtc_state(crtc_state); | |
1067 | ||
1068 | if (plane_state->crtc != crtc) | |
1069 | continue; | |
1070 | ||
1071 | if (!intel_fbc_can_choose(to_intel_crtc(crtc))) | |
1072 | break; | |
1073 | ||
1074 | intel_crtc_state->enable_fbc = true; | |
1075 | goto out; | |
1076 | } | |
1077 | } | |
1078 | ||
1079 | out: | |
1080 | mutex_unlock(&fbc->lock); | |
1081 | } | |
1082 | ||
d029bcad PZ |
1083 | /** |
1084 | * intel_fbc_enable: tries to enable FBC on the CRTC | |
1085 | * @crtc: the CRTC | |
1086 | * | |
f51be2e0 | 1087 | * This function checks if the given CRTC was chosen for FBC, then enables it if |
49227c4a PZ |
1088 | * possible. Notice that it doesn't activate FBC. It is valid to call |
1089 | * intel_fbc_enable multiple times for the same pipe without an | |
1090 | * intel_fbc_disable in the middle, as long as it is deactivated. | |
d029bcad PZ |
1091 | */ |
1092 | void intel_fbc_enable(struct intel_crtc *crtc) | |
1093 | { | |
1094 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | |
ab34a7e8 | 1095 | struct intel_fbc *fbc = &dev_priv->fbc; |
d029bcad PZ |
1096 | |
1097 | if (!fbc_supported(dev_priv)) | |
1098 | return; | |
1099 | ||
ab34a7e8 | 1100 | mutex_lock(&fbc->lock); |
d029bcad | 1101 | |
ab34a7e8 | 1102 | if (fbc->enabled) { |
49227c4a PZ |
1103 | WARN_ON(fbc->crtc == NULL); |
1104 | if (fbc->crtc == crtc) { | |
1105 | WARN_ON(!crtc->config->enable_fbc); | |
1106 | WARN_ON(fbc->active); | |
1107 | } | |
d029bcad PZ |
1108 | goto out; |
1109 | } | |
1110 | ||
f51be2e0 PZ |
1111 | if (!crtc->config->enable_fbc) |
1112 | goto out; | |
1113 | ||
ab34a7e8 PZ |
1114 | WARN_ON(fbc->active); |
1115 | WARN_ON(fbc->crtc != NULL); | |
d029bcad | 1116 | |
aaf78d27 | 1117 | intel_fbc_update_state_cache(crtc); |
c5ecd469 | 1118 | if (intel_fbc_alloc_cfb(crtc)) { |
913a3a6a | 1119 | fbc->no_fbc_reason = "not enough stolen memory"; |
c5ecd469 PZ |
1120 | goto out; |
1121 | } | |
1122 | ||
d029bcad | 1123 | DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); |
ab34a7e8 | 1124 | fbc->no_fbc_reason = "FBC enabled but not active yet\n"; |
d029bcad | 1125 | |
ab34a7e8 PZ |
1126 | fbc->enabled = true; |
1127 | fbc->crtc = crtc; | |
d029bcad | 1128 | out: |
ab34a7e8 | 1129 | mutex_unlock(&fbc->lock); |
d029bcad PZ |
1130 | } |
1131 | ||
1132 | /** | |
1133 | * __intel_fbc_disable - disable FBC | |
1134 | * @dev_priv: i915 device instance | |
1135 | * | |
1136 | * This is the low level function that actually disables FBC. Callers should | |
1137 | * grab the FBC lock. | |
1138 | */ | |
1139 | static void __intel_fbc_disable(struct drm_i915_private *dev_priv) | |
1140 | { | |
ab34a7e8 PZ |
1141 | struct intel_fbc *fbc = &dev_priv->fbc; |
1142 | struct intel_crtc *crtc = fbc->crtc; | |
d029bcad | 1143 | |
ab34a7e8 PZ |
1144 | WARN_ON(!mutex_is_locked(&fbc->lock)); |
1145 | WARN_ON(!fbc->enabled); | |
1146 | WARN_ON(fbc->active); | |
58f9c0bc | 1147 | WARN_ON(crtc->active); |
d029bcad PZ |
1148 | |
1149 | DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); | |
1150 | ||
c5ecd469 PZ |
1151 | __intel_fbc_cleanup_cfb(dev_priv); |
1152 | ||
ab34a7e8 PZ |
1153 | fbc->enabled = false; |
1154 | fbc->crtc = NULL; | |
d029bcad PZ |
1155 | } |
1156 | ||
1157 | /** | |
c937ab3e | 1158 | * intel_fbc_disable - disable FBC if it's associated with crtc |
d029bcad PZ |
1159 | * @crtc: the CRTC |
1160 | * | |
1161 | * This function disables FBC if it's associated with the provided CRTC. | |
1162 | */ | |
c937ab3e | 1163 | void intel_fbc_disable(struct intel_crtc *crtc) |
d029bcad PZ |
1164 | { |
1165 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | |
ab34a7e8 | 1166 | struct intel_fbc *fbc = &dev_priv->fbc; |
d029bcad PZ |
1167 | |
1168 | if (!fbc_supported(dev_priv)) | |
1169 | return; | |
1170 | ||
ab34a7e8 PZ |
1171 | mutex_lock(&fbc->lock); |
1172 | if (fbc->crtc == crtc) { | |
1173 | WARN_ON(!fbc->enabled); | |
1174 | WARN_ON(fbc->active); | |
d029bcad PZ |
1175 | __intel_fbc_disable(dev_priv); |
1176 | } | |
ab34a7e8 | 1177 | mutex_unlock(&fbc->lock); |
65c7600f PZ |
1178 | |
1179 | cancel_work_sync(&fbc->work.work); | |
d029bcad PZ |
1180 | } |
1181 | ||
1182 | /** | |
c937ab3e | 1183 | * intel_fbc_global_disable - globally disable FBC |
d029bcad PZ |
1184 | * @dev_priv: i915 device instance |
1185 | * | |
1186 | * This function disables FBC regardless of which CRTC is associated with it. | |
1187 | */ | |
c937ab3e | 1188 | void intel_fbc_global_disable(struct drm_i915_private *dev_priv) |
d029bcad | 1189 | { |
ab34a7e8 PZ |
1190 | struct intel_fbc *fbc = &dev_priv->fbc; |
1191 | ||
d029bcad PZ |
1192 | if (!fbc_supported(dev_priv)) |
1193 | return; | |
1194 | ||
ab34a7e8 PZ |
1195 | mutex_lock(&fbc->lock); |
1196 | if (fbc->enabled) | |
d029bcad | 1197 | __intel_fbc_disable(dev_priv); |
ab34a7e8 | 1198 | mutex_unlock(&fbc->lock); |
65c7600f PZ |
1199 | |
1200 | cancel_work_sync(&fbc->work.work); | |
d029bcad PZ |
1201 | } |
1202 | ||
010cf73d PZ |
1203 | /** |
1204 | * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking | |
1205 | * @dev_priv: i915 device instance | |
1206 | * | |
1207 | * The FBC code needs to track CRTC visibility since the older platforms can't | |
1208 | * have FBC enabled while multiple pipes are used. This function does the | |
1209 | * initial setup at driver load to make sure FBC is matching the real hardware. | |
1210 | */ | |
1211 | void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv) | |
1212 | { | |
1213 | struct intel_crtc *crtc; | |
1214 | ||
1215 | /* Don't even bother tracking anything if we don't need. */ | |
1216 | if (!no_fbc_on_multiple_pipes(dev_priv)) | |
1217 | return; | |
1218 | ||
1219 | for_each_intel_crtc(dev_priv->dev, crtc) | |
1220 | if (intel_crtc_active(&crtc->base) && | |
1221 | to_intel_plane_state(crtc->base.primary->state)->visible) | |
1222 | dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe); | |
1223 | } | |
1224 | ||
94b83957 RV |
1225 | /** |
1226 | * intel_fbc_init - Initialize FBC | |
1227 | * @dev_priv: the i915 device | |
1228 | * | |
1229 | * This function might be called during PM init process. | |
1230 | */ | |
7ff0ebcc RV |
1231 | void intel_fbc_init(struct drm_i915_private *dev_priv) |
1232 | { | |
ab34a7e8 | 1233 | struct intel_fbc *fbc = &dev_priv->fbc; |
dbef0f15 PZ |
1234 | enum pipe pipe; |
1235 | ||
ab34a7e8 PZ |
1236 | INIT_WORK(&fbc->work.work, intel_fbc_work_fn); |
1237 | mutex_init(&fbc->lock); | |
1238 | fbc->enabled = false; | |
1239 | fbc->active = false; | |
1240 | fbc->work.scheduled = false; | |
25ad93fd | 1241 | |
7ff0ebcc | 1242 | if (!HAS_FBC(dev_priv)) { |
ab34a7e8 | 1243 | fbc->no_fbc_reason = "unsupported by this chipset"; |
7ff0ebcc RV |
1244 | return; |
1245 | } | |
1246 | ||
dbef0f15 | 1247 | for_each_pipe(dev_priv, pipe) { |
ab34a7e8 | 1248 | fbc->possible_framebuffer_bits |= |
dbef0f15 PZ |
1249 | INTEL_FRONTBUFFER_PRIMARY(pipe); |
1250 | ||
57105022 | 1251 | if (fbc_on_pipe_a_only(dev_priv)) |
dbef0f15 PZ |
1252 | break; |
1253 | } | |
1254 | ||
8c40074c PZ |
1255 | /* This value was pulled out of someone's hat */ |
1256 | if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv)) | |
7ff0ebcc | 1257 | I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); |
7ff0ebcc | 1258 | |
b07ea0fa | 1259 | /* We still don't have any sort of hardware state readout for FBC, so |
0e631adc PZ |
1260 | * deactivate it in case the BIOS activated it to make sure software |
1261 | * matches the hardware state. */ | |
8c40074c PZ |
1262 | if (intel_fbc_hw_is_active(dev_priv)) |
1263 | intel_fbc_hw_deactivate(dev_priv); | |
7ff0ebcc | 1264 | } |