drm/i915/fbc: move intel_fbc_{enable, disable} call one level up
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_fbc.c
CommitLineData
7ff0ebcc
RV
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
94b83957
RV
24/**
25 * DOC: Frame Buffer Compression (FBC)
26 *
27 * FBC tries to save memory bandwidth (and so power consumption) by
28 * compressing the amount of memory used by the display. It is total
29 * transparent to user space and completely handled in the kernel.
7ff0ebcc
RV
30 *
31 * The benefits of FBC are mostly visible with solid backgrounds and
94b83957
RV
32 * variation-less patterns. It comes from keeping the memory footprint small
33 * and having fewer memory pages opened and accessed for refreshing the display.
7ff0ebcc 34 *
94b83957
RV
35 * i915 is responsible to reserve stolen memory for FBC and configure its
36 * offset on proper registers. The hardware takes care of all
37 * compress/decompress. However there are many known cases where we have to
38 * forcibly disable it to allow proper screen updates.
7ff0ebcc
RV
39 */
40
94b83957
RV
41#include "intel_drv.h"
42#include "i915_drv.h"
43
9f218336
PZ
44static inline bool fbc_supported(struct drm_i915_private *dev_priv)
45{
0e631adc 46 return dev_priv->fbc.activate != NULL;
9f218336
PZ
47}
48
57105022
PZ
49static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
50{
51 return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
52}
53
e6cd6dc1
PZ
54static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
55{
56 return INTEL_INFO(dev_priv)->gen < 4;
57}
58
010cf73d
PZ
59static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
60{
61 return INTEL_INFO(dev_priv)->gen <= 3;
62}
63
2db3366b
PZ
64/*
65 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
66 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
67 * origin so the x and y offsets can actually fit the registers. As a
68 * consequence, the fence doesn't really start exactly at the display plane
69 * address we program because it starts at the real start of the buffer, so we
70 * have to take this into consideration here.
71 */
72static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
73{
74 return crtc->base.y - crtc->adjusted_y;
75}
76
c5ecd469
PZ
77/*
78 * For SKL+, the plane source size used by the hardware is based on the value we
79 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
80 * we wrote to PIPESRC.
81 */
aaf78d27 82static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
c5ecd469
PZ
83 int *width, int *height)
84{
c5ecd469
PZ
85 int w, h;
86
aaf78d27
PZ
87 if (intel_rotation_90_or_270(cache->plane.rotation)) {
88 w = cache->plane.src_h;
89 h = cache->plane.src_w;
c5ecd469 90 } else {
aaf78d27
PZ
91 w = cache->plane.src_w;
92 h = cache->plane.src_h;
c5ecd469
PZ
93 }
94
95 if (width)
96 *width = w;
97 if (height)
98 *height = h;
99}
100
aaf78d27
PZ
101static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
102 struct intel_fbc_state_cache *cache)
c5ecd469 103{
c5ecd469
PZ
104 int lines;
105
aaf78d27 106 intel_fbc_get_plane_source_size(cache, NULL, &lines);
c5ecd469
PZ
107 if (INTEL_INFO(dev_priv)->gen >= 7)
108 lines = min(lines, 2048);
109
110 /* Hardware needs the full buffer stride, not just the active area. */
aaf78d27 111 return lines * cache->fb.stride;
c5ecd469
PZ
112}
113
0e631adc 114static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
7ff0ebcc 115{
7ff0ebcc
RV
116 u32 fbc_ctl;
117
0e631adc 118 dev_priv->fbc.active = false;
7ff0ebcc
RV
119
120 /* Disable compression */
121 fbc_ctl = I915_READ(FBC_CONTROL);
122 if ((fbc_ctl & FBC_CTL_EN) == 0)
123 return;
124
125 fbc_ctl &= ~FBC_CTL_EN;
126 I915_WRITE(FBC_CONTROL, fbc_ctl);
127
128 /* Wait for compressing bit to clear */
129 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
130 DRM_DEBUG_KMS("FBC idle timed out\n");
131 return;
132 }
7ff0ebcc
RV
133}
134
b183b3f1 135static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
7ff0ebcc 136{
b183b3f1 137 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
7ff0ebcc
RV
138 int cfb_pitch;
139 int i;
140 u32 fbc_ctl;
141
0e631adc 142 dev_priv->fbc.active = true;
7ff0ebcc 143
60ee5cd2 144 /* Note: fbc.threshold == 1 for i8xx */
b183b3f1
PZ
145 cfb_pitch = params->cfb_size / FBC_LL_SIZE;
146 if (params->fb.stride < cfb_pitch)
147 cfb_pitch = params->fb.stride;
7ff0ebcc
RV
148
149 /* FBC_CTL wants 32B or 64B units */
7733b49b 150 if (IS_GEN2(dev_priv))
7ff0ebcc
RV
151 cfb_pitch = (cfb_pitch / 32) - 1;
152 else
153 cfb_pitch = (cfb_pitch / 64) - 1;
154
155 /* Clear old tags */
156 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
4d110c71 157 I915_WRITE(FBC_TAG(i), 0);
7ff0ebcc 158
7733b49b 159 if (IS_GEN4(dev_priv)) {
7ff0ebcc
RV
160 u32 fbc_ctl2;
161
162 /* Set it up... */
163 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
b183b3f1 164 fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane);
7ff0ebcc 165 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
b183b3f1 166 I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
7ff0ebcc
RV
167 }
168
169 /* enable it... */
170 fbc_ctl = I915_READ(FBC_CONTROL);
171 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
172 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
7733b49b 173 if (IS_I945GM(dev_priv))
7ff0ebcc
RV
174 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
175 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
b183b3f1 176 fbc_ctl |= params->fb.fence_reg;
7ff0ebcc 177 I915_WRITE(FBC_CONTROL, fbc_ctl);
7ff0ebcc
RV
178}
179
0e631adc 180static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
7ff0ebcc 181{
7ff0ebcc
RV
182 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
183}
184
b183b3f1 185static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
7ff0ebcc 186{
b183b3f1 187 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
7ff0ebcc
RV
188 u32 dpfc_ctl;
189
0e631adc 190 dev_priv->fbc.active = true;
7ff0ebcc 191
b183b3f1
PZ
192 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN;
193 if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
7ff0ebcc
RV
194 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
195 else
196 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
b183b3f1 197 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
7ff0ebcc 198
b183b3f1 199 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
7ff0ebcc
RV
200
201 /* enable it... */
202 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
7ff0ebcc
RV
203}
204
0e631adc 205static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
7ff0ebcc 206{
7ff0ebcc
RV
207 u32 dpfc_ctl;
208
0e631adc 209 dev_priv->fbc.active = false;
7ff0ebcc
RV
210
211 /* Disable compression */
212 dpfc_ctl = I915_READ(DPFC_CONTROL);
213 if (dpfc_ctl & DPFC_CTL_EN) {
214 dpfc_ctl &= ~DPFC_CTL_EN;
215 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
7ff0ebcc
RV
216 }
217}
218
0e631adc 219static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
7ff0ebcc 220{
7ff0ebcc
RV
221 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
222}
223
d5ce4164
PZ
224/* This function forces a CFB recompression through the nuke operation. */
225static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
7ff0ebcc 226{
dbef0f15
PZ
227 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
228 POSTING_READ(MSG_FBC_REND_STATE);
7ff0ebcc
RV
229}
230
b183b3f1 231static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
7ff0ebcc 232{
b183b3f1 233 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
7ff0ebcc 234 u32 dpfc_ctl;
ce65e47b 235 int threshold = dev_priv->fbc.threshold;
7ff0ebcc 236
0e631adc 237 dev_priv->fbc.active = true;
7ff0ebcc 238
b183b3f1
PZ
239 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane);
240 if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
ce65e47b 241 threshold++;
7ff0ebcc 242
ce65e47b 243 switch (threshold) {
7ff0ebcc
RV
244 case 4:
245 case 3:
246 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
247 break;
248 case 2:
249 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
250 break;
251 case 1:
252 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
253 break;
254 }
255 dpfc_ctl |= DPFC_CTL_FENCE_EN;
7733b49b 256 if (IS_GEN5(dev_priv))
b183b3f1 257 dpfc_ctl |= params->fb.fence_reg;
7ff0ebcc 258
b183b3f1
PZ
259 I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
260 I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
7ff0ebcc
RV
261 /* enable it... */
262 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
263
7733b49b 264 if (IS_GEN6(dev_priv)) {
7ff0ebcc 265 I915_WRITE(SNB_DPFC_CTL_SA,
b183b3f1
PZ
266 SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
267 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
7ff0ebcc
RV
268 }
269
d5ce4164 270 intel_fbc_recompress(dev_priv);
7ff0ebcc
RV
271}
272
0e631adc 273static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
7ff0ebcc 274{
7ff0ebcc
RV
275 u32 dpfc_ctl;
276
0e631adc 277 dev_priv->fbc.active = false;
7ff0ebcc
RV
278
279 /* Disable compression */
280 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
281 if (dpfc_ctl & DPFC_CTL_EN) {
282 dpfc_ctl &= ~DPFC_CTL_EN;
283 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
7ff0ebcc
RV
284 }
285}
286
0e631adc 287static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
7ff0ebcc 288{
7ff0ebcc
RV
289 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
290}
291
b183b3f1 292static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
7ff0ebcc 293{
b183b3f1 294 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
7ff0ebcc 295 u32 dpfc_ctl;
ce65e47b 296 int threshold = dev_priv->fbc.threshold;
7ff0ebcc 297
0e631adc 298 dev_priv->fbc.active = true;
7ff0ebcc 299
d8514d63 300 dpfc_ctl = 0;
7733b49b 301 if (IS_IVYBRIDGE(dev_priv))
b183b3f1 302 dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
d8514d63 303
b183b3f1 304 if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
ce65e47b 305 threshold++;
7ff0ebcc 306
ce65e47b 307 switch (threshold) {
7ff0ebcc
RV
308 case 4:
309 case 3:
310 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
311 break;
312 case 2:
313 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
314 break;
315 case 1:
316 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
317 break;
318 }
319
320 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
321
322 if (dev_priv->fbc.false_color)
323 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
324
7733b49b 325 if (IS_IVYBRIDGE(dev_priv)) {
7ff0ebcc
RV
326 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
327 I915_WRITE(ILK_DISPLAY_CHICKEN1,
328 I915_READ(ILK_DISPLAY_CHICKEN1) |
329 ILK_FBCQ_DIS);
40f4022e 330 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7ff0ebcc 331 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
b183b3f1
PZ
332 I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
333 I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
7ff0ebcc
RV
334 HSW_FBCQ_DIS);
335 }
336
57012be9
PZ
337 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
338
7ff0ebcc 339 I915_WRITE(SNB_DPFC_CTL_SA,
b183b3f1
PZ
340 SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
341 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
7ff0ebcc 342
d5ce4164 343 intel_fbc_recompress(dev_priv);
7ff0ebcc
RV
344}
345
94b83957 346/**
0e631adc 347 * intel_fbc_is_active - Is FBC active?
7733b49b 348 * @dev_priv: i915 device instance
94b83957
RV
349 *
350 * This function is used to verify the current state of FBC.
351 * FIXME: This should be tracked in the plane config eventually
352 * instead of queried at runtime for most callers.
353 */
0e631adc 354bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
7ff0ebcc 355{
0e631adc 356 return dev_priv->fbc.active;
7ff0ebcc
RV
357}
358
7ff0ebcc
RV
359static void intel_fbc_work_fn(struct work_struct *__work)
360{
128d7356
PZ
361 struct drm_i915_private *dev_priv =
362 container_of(__work, struct drm_i915_private, fbc.work.work);
ab34a7e8
PZ
363 struct intel_fbc *fbc = &dev_priv->fbc;
364 struct intel_fbc_work *work = &fbc->work;
365 struct intel_crtc *crtc = fbc->crtc;
ca18d51d
PZ
366 struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
367
368 if (drm_crtc_vblank_get(&crtc->base)) {
369 DRM_ERROR("vblank not available for FBC on pipe %c\n",
370 pipe_name(crtc->pipe));
371
ab34a7e8 372 mutex_lock(&fbc->lock);
ca18d51d 373 work->scheduled = false;
ab34a7e8 374 mutex_unlock(&fbc->lock);
ca18d51d
PZ
375 return;
376 }
128d7356
PZ
377
378retry:
379 /* Delay the actual enabling to let pageflipping cease and the
380 * display to settle before starting the compression. Note that
381 * this delay also serves a second purpose: it allows for a
382 * vblank to pass after disabling the FBC before we attempt
383 * to modify the control registers.
384 *
128d7356 385 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
ca18d51d
PZ
386 *
387 * It is also worth mentioning that since work->scheduled_vblank can be
388 * updated multiple times by the other threads, hitting the timeout is
389 * not an error condition. We'll just end up hitting the "goto retry"
390 * case below.
128d7356 391 */
ca18d51d
PZ
392 wait_event_timeout(vblank->queue,
393 drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
394 msecs_to_jiffies(50));
7ff0ebcc 395
ab34a7e8 396 mutex_lock(&fbc->lock);
7ff0ebcc 397
128d7356
PZ
398 /* Were we cancelled? */
399 if (!work->scheduled)
400 goto out;
401
402 /* Were we delayed again while this function was sleeping? */
ca18d51d 403 if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
ab34a7e8 404 mutex_unlock(&fbc->lock);
128d7356 405 goto retry;
7ff0ebcc 406 }
7ff0ebcc 407
128d7356 408 if (crtc->base.primary->fb == work->fb)
ab34a7e8 409 fbc->activate(dev_priv);
128d7356
PZ
410
411 work->scheduled = false;
412
413out:
ab34a7e8 414 mutex_unlock(&fbc->lock);
ca18d51d 415 drm_crtc_vblank_put(&crtc->base);
7ff0ebcc
RV
416}
417
418static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
419{
ab34a7e8
PZ
420 struct intel_fbc *fbc = &dev_priv->fbc;
421
422 WARN_ON(!mutex_is_locked(&fbc->lock));
423 fbc->work.scheduled = false;
7ff0ebcc
RV
424}
425
0e631adc 426static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
7ff0ebcc 427{
220285f2 428 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
ab34a7e8
PZ
429 struct intel_fbc *fbc = &dev_priv->fbc;
430 struct intel_fbc_work *work = &fbc->work;
7ff0ebcc 431
ab34a7e8 432 WARN_ON(!mutex_is_locked(&fbc->lock));
25ad93fd 433
ca18d51d
PZ
434 if (drm_crtc_vblank_get(&crtc->base)) {
435 DRM_ERROR("vblank not available for FBC on pipe %c\n",
436 pipe_name(crtc->pipe));
437 return;
438 }
439
128d7356
PZ
440 /* It is useless to call intel_fbc_cancel_work() in this function since
441 * we're not releasing fbc.lock, so it won't have an opportunity to grab
442 * it to discover that it was cancelled. So we just update the expected
443 * jiffy count. */
220285f2 444 work->fb = crtc->base.primary->fb;
128d7356 445 work->scheduled = true;
ca18d51d
PZ
446 work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
447 drm_crtc_vblank_put(&crtc->base);
7ff0ebcc 448
128d7356 449 schedule_work(&work->work);
7ff0ebcc
RV
450}
451
60eb2cc7 452static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
25ad93fd 453{
ab34a7e8
PZ
454 struct intel_fbc *fbc = &dev_priv->fbc;
455
456 WARN_ON(!mutex_is_locked(&fbc->lock));
25ad93fd
PZ
457
458 intel_fbc_cancel_work(dev_priv);
459
ab34a7e8
PZ
460 if (fbc->active)
461 fbc->deactivate(dev_priv);
754d1133
PZ
462}
463
2e8144a5 464static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
bf6189c6 465 const char *reason)
7ff0ebcc 466{
ab34a7e8
PZ
467 struct intel_fbc *fbc = &dev_priv->fbc;
468
469 if (fbc->no_fbc_reason == reason)
2e8144a5 470 return;
7ff0ebcc 471
ab34a7e8 472 fbc->no_fbc_reason = reason;
bf6189c6 473 DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
7ff0ebcc
RV
474}
475
d029bcad 476static bool crtc_can_fbc(struct intel_crtc *crtc)
30c58d58
PZ
477{
478 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
479
480 if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
481 return false;
482
e6cd6dc1
PZ
483 if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
484 return false;
485
d029bcad
PZ
486 return true;
487}
488
010cf73d 489static bool multiple_pipes_ok(struct intel_crtc *crtc)
232fd934 490{
010cf73d
PZ
491 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
492 struct drm_plane *primary = crtc->base.primary;
493 struct intel_fbc *fbc = &dev_priv->fbc;
494 enum pipe pipe = crtc->pipe;
232fd934 495
010cf73d
PZ
496 /* Don't even bother tracking anything we don't need. */
497 if (!no_fbc_on_multiple_pipes(dev_priv))
232fd934
PZ
498 return true;
499
010cf73d 500 WARN_ON(!drm_modeset_is_locked(&primary->mutex));
232fd934 501
010cf73d
PZ
502 if (to_intel_plane_state(primary->state)->visible)
503 fbc->visible_pipes_mask |= (1 << pipe);
504 else
505 fbc->visible_pipes_mask &= ~(1 << pipe);
232fd934 506
010cf73d 507 return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
232fd934
PZ
508}
509
7733b49b 510static int find_compression_threshold(struct drm_i915_private *dev_priv,
fc786728
PZ
511 struct drm_mm_node *node,
512 int size,
513 int fb_cpp)
514{
fc786728
PZ
515 int compression_threshold = 1;
516 int ret;
a9da512b
PZ
517 u64 end;
518
519 /* The FBC hardware for BDW/SKL doesn't have access to the stolen
520 * reserved range size, so it always assumes the maximum (8mb) is used.
521 * If we enable FBC using a CFB on that memory range we'll get FIFO
522 * underruns, even if that range is not reserved by the BIOS. */
ef11bdb3
RV
523 if (IS_BROADWELL(dev_priv) ||
524 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
a9da512b
PZ
525 end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
526 else
527 end = dev_priv->gtt.stolen_usable_size;
fc786728
PZ
528
529 /* HACK: This code depends on what we will do in *_enable_fbc. If that
530 * code changes, this code needs to change as well.
531 *
532 * The enable_fbc code will attempt to use one of our 2 compression
533 * thresholds, therefore, in that case, we only have 1 resort.
534 */
535
536 /* Try to over-allocate to reduce reallocations and fragmentation. */
a9da512b
PZ
537 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
538 4096, 0, end);
fc786728
PZ
539 if (ret == 0)
540 return compression_threshold;
541
542again:
543 /* HW's ability to limit the CFB is 1:4 */
544 if (compression_threshold > 4 ||
545 (fb_cpp == 2 && compression_threshold == 2))
546 return 0;
547
a9da512b
PZ
548 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
549 4096, 0, end);
7733b49b 550 if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
fc786728
PZ
551 return 0;
552 } else if (ret) {
553 compression_threshold <<= 1;
554 goto again;
555 } else {
556 return compression_threshold;
557 }
558}
559
c5ecd469 560static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
fc786728 561{
c5ecd469 562 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
ab34a7e8 563 struct intel_fbc *fbc = &dev_priv->fbc;
fc786728 564 struct drm_mm_node *uninitialized_var(compressed_llb);
c5ecd469
PZ
565 int size, fb_cpp, ret;
566
ab34a7e8 567 WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
c5ecd469 568
aaf78d27
PZ
569 size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
570 fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0);
fc786728 571
ab34a7e8 572 ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
fc786728
PZ
573 size, fb_cpp);
574 if (!ret)
575 goto err_llb;
576 else if (ret > 1) {
577 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
578
579 }
580
ab34a7e8 581 fbc->threshold = ret;
fc786728
PZ
582
583 if (INTEL_INFO(dev_priv)->gen >= 5)
ab34a7e8 584 I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
7733b49b 585 else if (IS_GM45(dev_priv)) {
ab34a7e8 586 I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
fc786728
PZ
587 } else {
588 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
589 if (!compressed_llb)
590 goto err_fb;
591
592 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
593 4096, 4096);
594 if (ret)
595 goto err_fb;
596
ab34a7e8 597 fbc->compressed_llb = compressed_llb;
fc786728
PZ
598
599 I915_WRITE(FBC_CFB_BASE,
ab34a7e8 600 dev_priv->mm.stolen_base + fbc->compressed_fb.start);
fc786728
PZ
601 I915_WRITE(FBC_LL_BASE,
602 dev_priv->mm.stolen_base + compressed_llb->start);
603 }
604
b8bf5d7f 605 DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
ab34a7e8 606 fbc->compressed_fb.size, fbc->threshold);
fc786728
PZ
607
608 return 0;
609
610err_fb:
611 kfree(compressed_llb);
ab34a7e8 612 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
fc786728
PZ
613err_llb:
614 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
615 return -ENOSPC;
616}
617
7733b49b 618static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
fc786728 619{
ab34a7e8
PZ
620 struct intel_fbc *fbc = &dev_priv->fbc;
621
622 if (drm_mm_node_allocated(&fbc->compressed_fb))
623 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
624
625 if (fbc->compressed_llb) {
626 i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
627 kfree(fbc->compressed_llb);
fc786728 628 }
fc786728
PZ
629}
630
7733b49b 631void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
25ad93fd 632{
ab34a7e8
PZ
633 struct intel_fbc *fbc = &dev_priv->fbc;
634
9f218336 635 if (!fbc_supported(dev_priv))
0bf73c36
PZ
636 return;
637
ab34a7e8 638 mutex_lock(&fbc->lock);
7733b49b 639 __intel_fbc_cleanup_cfb(dev_priv);
ab34a7e8 640 mutex_unlock(&fbc->lock);
25ad93fd
PZ
641}
642
adf70c65
PZ
643static bool stride_is_valid(struct drm_i915_private *dev_priv,
644 unsigned int stride)
645{
646 /* These should have been caught earlier. */
647 WARN_ON(stride < 512);
648 WARN_ON((stride & (64 - 1)) != 0);
649
650 /* Below are the additional FBC restrictions. */
651
652 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
653 return stride == 4096 || stride == 8192;
654
655 if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
656 return false;
657
658 if (stride > 16384)
659 return false;
660
661 return true;
662}
663
aaf78d27
PZ
664static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
665 uint32_t pixel_format)
b9e831dc 666{
aaf78d27 667 switch (pixel_format) {
b9e831dc
PZ
668 case DRM_FORMAT_XRGB8888:
669 case DRM_FORMAT_XBGR8888:
670 return true;
671 case DRM_FORMAT_XRGB1555:
672 case DRM_FORMAT_RGB565:
673 /* 16bpp not supported on gen2 */
aaf78d27 674 if (IS_GEN2(dev_priv))
b9e831dc
PZ
675 return false;
676 /* WaFbcOnly1to1Ratio:ctg */
677 if (IS_G4X(dev_priv))
678 return false;
679 return true;
680 default:
681 return false;
682 }
683}
684
856312ae
PZ
685/*
686 * For some reason, the hardware tracking starts looking at whatever we
687 * programmed as the display plane base address register. It does not look at
688 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
689 * variables instead of just looking at the pipe/plane size.
690 */
691static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
3c5f174e
PZ
692{
693 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
aaf78d27 694 struct intel_fbc *fbc = &dev_priv->fbc;
856312ae 695 unsigned int effective_w, effective_h, max_w, max_h;
3c5f174e
PZ
696
697 if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
698 max_w = 4096;
699 max_h = 4096;
700 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
701 max_w = 4096;
702 max_h = 2048;
703 } else {
704 max_w = 2048;
705 max_h = 1536;
706 }
707
aaf78d27
PZ
708 intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
709 &effective_h);
856312ae
PZ
710 effective_w += crtc->adjusted_x;
711 effective_h += crtc->adjusted_y;
712
713 return effective_w <= max_w && effective_h <= max_h;
3c5f174e
PZ
714}
715
aaf78d27 716static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
7ff0ebcc 717{
754d1133 718 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
ab34a7e8 719 struct intel_fbc *fbc = &dev_priv->fbc;
aaf78d27 720 struct intel_fbc_state_cache *cache = &fbc->state_cache;
1eb52238
PZ
721 struct intel_crtc_state *crtc_state =
722 to_intel_crtc_state(crtc->base.state);
aaf78d27
PZ
723 struct intel_plane_state *plane_state =
724 to_intel_plane_state(crtc->base.primary->state);
725 struct drm_framebuffer *fb = plane_state->base.fb;
7ff0ebcc 726 struct drm_i915_gem_object *obj;
7ff0ebcc 727
1eb52238
PZ
728 WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
729 WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
730
aaf78d27
PZ
731 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
732 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
733 cache->crtc.hsw_bdw_pixel_rate =
734 ilk_pipe_pixel_rate(crtc_state);
735
736 cache->plane.rotation = plane_state->base.rotation;
737 cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16;
738 cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16;
739 cache->plane.visible = plane_state->visible;
740
741 if (!cache->plane.visible)
742 return;
7ff0ebcc 743
7ff0ebcc 744 obj = intel_fb_obj(fb);
615b40d7 745
aaf78d27
PZ
746 /* FIXME: We lack the proper locking here, so only run this on the
747 * platforms that need. */
748 if (dev_priv->fbc.activate == ilk_fbc_activate)
749 cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
750 cache->fb.id = fb->base.id;
751 cache->fb.pixel_format = fb->pixel_format;
752 cache->fb.stride = fb->pitches[0];
753 cache->fb.fence_reg = obj->fence_reg;
754 cache->fb.tiling_mode = obj->tiling_mode;
755}
756
757static bool intel_fbc_can_activate(struct intel_crtc *crtc)
758{
759 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
760 struct intel_fbc *fbc = &dev_priv->fbc;
761 struct intel_fbc_state_cache *cache = &fbc->state_cache;
762
763 if (!cache->plane.visible) {
615b40d7
PZ
764 set_no_fbc_reason(dev_priv, "primary plane not visible");
765 return false;
766 }
7ff0ebcc 767
aaf78d27
PZ
768 if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) ||
769 (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) {
bf6189c6 770 set_no_fbc_reason(dev_priv, "incompatible mode");
615b40d7 771 return false;
7ff0ebcc
RV
772 }
773
45b32a29 774 if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
bf6189c6 775 set_no_fbc_reason(dev_priv, "mode too large for compression");
615b40d7 776 return false;
7ff0ebcc 777 }
3c5f174e 778
7ff0ebcc
RV
779 /* The use of a CPU fence is mandatory in order to detect writes
780 * by the CPU to the scanout and trigger updates to the FBC.
781 */
aaf78d27
PZ
782 if (cache->fb.tiling_mode != I915_TILING_X ||
783 cache->fb.fence_reg == I915_FENCE_REG_NONE) {
bf6189c6 784 set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced");
615b40d7 785 return false;
7ff0ebcc 786 }
7733b49b 787 if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
aaf78d27 788 cache->plane.rotation != BIT(DRM_ROTATE_0)) {
bf6189c6 789 set_no_fbc_reason(dev_priv, "rotation unsupported");
615b40d7 790 return false;
7ff0ebcc
RV
791 }
792
aaf78d27 793 if (!stride_is_valid(dev_priv, cache->fb.stride)) {
bf6189c6 794 set_no_fbc_reason(dev_priv, "framebuffer stride not supported");
615b40d7 795 return false;
adf70c65
PZ
796 }
797
aaf78d27 798 if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) {
bf6189c6 799 set_no_fbc_reason(dev_priv, "pixel format is invalid");
615b40d7 800 return false;
b9e831dc
PZ
801 }
802
7b24c9a6
PZ
803 /* WaFbcExceedCdClockThreshold:hsw,bdw */
804 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
aaf78d27 805 cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) {
bf6189c6 806 set_no_fbc_reason(dev_priv, "pixel rate is too big");
615b40d7 807 return false;
7b24c9a6
PZ
808 }
809
c5ecd469
PZ
810 /* It is possible for the required CFB size change without a
811 * crtc->disable + crtc->enable since it is possible to change the
812 * stride without triggering a full modeset. Since we try to
813 * over-allocate the CFB, there's a chance we may keep FBC enabled even
814 * if this happens, but if we exceed the current CFB size we'll have to
815 * disable FBC. Notice that it would be possible to disable FBC, wait
816 * for a frame, free the stolen node, then try to reenable FBC in case
817 * we didn't get any invalidate/deactivate calls, but this would require
818 * a lot of tracking just for a specific case. If we conclude it's an
819 * important case, we can implement it later. */
aaf78d27 820 if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
ab34a7e8 821 fbc->compressed_fb.size * fbc->threshold) {
c5ecd469 822 set_no_fbc_reason(dev_priv, "CFB requirements changed");
615b40d7
PZ
823 return false;
824 }
825
826 return true;
827}
828
f51be2e0 829static bool intel_fbc_can_choose(struct intel_crtc *crtc)
44a8a257
PZ
830{
831 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
832
833 if (intel_vgpu_active(dev_priv->dev)) {
834 set_no_fbc_reason(dev_priv, "VGPU is active");
835 return false;
836 }
837
838 if (i915.enable_fbc < 0) {
839 set_no_fbc_reason(dev_priv, "disabled per chip default");
840 return false;
841 }
842
843 if (!i915.enable_fbc) {
844 set_no_fbc_reason(dev_priv, "disabled per module param");
845 return false;
846 }
847
848 if (!crtc_can_fbc(crtc)) {
849 set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC");
850 return false;
851 }
852
853 return true;
854}
855
b183b3f1
PZ
856static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
857 struct intel_fbc_reg_params *params)
858{
859 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
aaf78d27
PZ
860 struct intel_fbc *fbc = &dev_priv->fbc;
861 struct intel_fbc_state_cache *cache = &fbc->state_cache;
b183b3f1
PZ
862
863 /* Since all our fields are integer types, use memset here so the
864 * comparison function can rely on memcmp because the padding will be
865 * zero. */
866 memset(params, 0, sizeof(*params));
867
868 params->crtc.pipe = crtc->pipe;
869 params->crtc.plane = crtc->plane;
870 params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
871
aaf78d27
PZ
872 params->fb.id = cache->fb.id;
873 params->fb.pixel_format = cache->fb.pixel_format;
874 params->fb.stride = cache->fb.stride;
875 params->fb.fence_reg = cache->fb.fence_reg;
b183b3f1 876
aaf78d27 877 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
b183b3f1 878
aaf78d27 879 params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
b183b3f1
PZ
880}
881
882static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
883 struct intel_fbc_reg_params *params2)
884{
885 /* We can use this since intel_fbc_get_reg_params() does a memset. */
886 return memcmp(params1, params2, sizeof(*params1)) == 0;
887}
888
1eb52238 889void intel_fbc_pre_update(struct intel_crtc *crtc)
615b40d7
PZ
890{
891 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
ab34a7e8 892 struct intel_fbc *fbc = &dev_priv->fbc;
615b40d7 893
1eb52238
PZ
894 if (!fbc_supported(dev_priv))
895 return;
896
897 mutex_lock(&fbc->lock);
615b40d7 898
010cf73d 899 if (!multiple_pipes_ok(crtc)) {
615b40d7 900 set_no_fbc_reason(dev_priv, "more than one pipe active");
212890cf 901 goto deactivate;
7ff0ebcc
RV
902 }
903
ab34a7e8 904 if (!fbc->enabled || fbc->crtc != crtc)
1eb52238 905 goto unlock;
615b40d7 906
aaf78d27
PZ
907 intel_fbc_update_state_cache(crtc);
908
212890cf 909deactivate:
60eb2cc7 910 intel_fbc_deactivate(dev_priv);
1eb52238
PZ
911unlock:
912 mutex_unlock(&fbc->lock);
212890cf
PZ
913}
914
1eb52238 915static void __intel_fbc_post_update(struct intel_crtc *crtc)
212890cf
PZ
916{
917 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
918 struct intel_fbc *fbc = &dev_priv->fbc;
919 struct intel_fbc_reg_params old_params;
920
921 WARN_ON(!mutex_is_locked(&fbc->lock));
922
923 if (!fbc->enabled || fbc->crtc != crtc)
924 return;
925
926 if (!intel_fbc_can_activate(crtc)) {
927 WARN_ON(fbc->active);
928 return;
929 }
615b40d7 930
ab34a7e8
PZ
931 old_params = fbc->params;
932 intel_fbc_get_reg_params(crtc, &fbc->params);
b183b3f1 933
7ff0ebcc
RV
934 /* If the scanout has not changed, don't modify the FBC settings.
935 * Note that we make the fundamental assumption that the fb->obj
936 * cannot be unpinned (and have its GTT offset and fence revoked)
937 * without first being decoupled from the scanout and FBC disabled.
938 */
ab34a7e8
PZ
939 if (fbc->active &&
940 intel_fbc_reg_params_equal(&old_params, &fbc->params))
7ff0ebcc
RV
941 return;
942
60eb2cc7 943 intel_fbc_deactivate(dev_priv);
0e631adc 944 intel_fbc_schedule_activation(crtc);
212890cf 945 fbc->no_fbc_reason = "FBC enabled (active or scheduled)";
25ad93fd
PZ
946}
947
1eb52238 948void intel_fbc_post_update(struct intel_crtc *crtc)
25ad93fd 949{
754d1133 950 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
ab34a7e8 951 struct intel_fbc *fbc = &dev_priv->fbc;
754d1133 952
9f218336 953 if (!fbc_supported(dev_priv))
0bf73c36
PZ
954 return;
955
ab34a7e8 956 mutex_lock(&fbc->lock);
1eb52238 957 __intel_fbc_post_update(crtc);
ab34a7e8 958 mutex_unlock(&fbc->lock);
7ff0ebcc
RV
959}
960
261fe99a
PZ
961static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
962{
963 if (fbc->enabled)
964 return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
965 else
966 return fbc->possible_framebuffer_bits;
967}
968
dbef0f15
PZ
969void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
970 unsigned int frontbuffer_bits,
971 enum fb_op_origin origin)
972{
ab34a7e8 973 struct intel_fbc *fbc = &dev_priv->fbc;
dbef0f15 974
9f218336 975 if (!fbc_supported(dev_priv))
0bf73c36
PZ
976 return;
977
0dd81544 978 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
dbef0f15
PZ
979 return;
980
ab34a7e8 981 mutex_lock(&fbc->lock);
25ad93fd 982
261fe99a 983 fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
dbef0f15 984
ab34a7e8 985 if (fbc->busy_bits)
60eb2cc7 986 intel_fbc_deactivate(dev_priv);
25ad93fd 987
ab34a7e8 988 mutex_unlock(&fbc->lock);
dbef0f15
PZ
989}
990
991void intel_fbc_flush(struct drm_i915_private *dev_priv,
6f4551fe 992 unsigned int frontbuffer_bits, enum fb_op_origin origin)
dbef0f15 993{
ab34a7e8
PZ
994 struct intel_fbc *fbc = &dev_priv->fbc;
995
9f218336 996 if (!fbc_supported(dev_priv))
0bf73c36
PZ
997 return;
998
0dd81544 999 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
6f4551fe 1000 return;
25ad93fd 1001
ab34a7e8 1002 mutex_lock(&fbc->lock);
dbef0f15 1003
ab34a7e8 1004 fbc->busy_bits &= ~frontbuffer_bits;
dbef0f15 1005
261fe99a
PZ
1006 if (!fbc->busy_bits && fbc->enabled &&
1007 (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
0dd81544 1008 if (fbc->active)
ee7d6cfa 1009 intel_fbc_recompress(dev_priv);
0dd81544 1010 else
1eb52238 1011 __intel_fbc_post_update(fbc->crtc);
6f4551fe 1012 }
25ad93fd 1013
ab34a7e8 1014 mutex_unlock(&fbc->lock);
dbef0f15
PZ
1015}
1016
f51be2e0
PZ
1017/**
1018 * intel_fbc_choose_crtc - select a CRTC to enable FBC on
1019 * @dev_priv: i915 device instance
1020 * @state: the atomic state structure
1021 *
1022 * This function looks at the proposed state for CRTCs and planes, then chooses
1023 * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
1024 * true.
1025 *
1026 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
1027 * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
1028 */
1029void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
1030 struct drm_atomic_state *state)
1031{
1032 struct intel_fbc *fbc = &dev_priv->fbc;
1033 struct drm_crtc *crtc;
1034 struct drm_crtc_state *crtc_state;
1035 struct drm_plane *plane;
1036 struct drm_plane_state *plane_state;
1037 bool fbc_crtc_present = false;
1038 int i, j;
1039
1040 mutex_lock(&fbc->lock);
1041
1042 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1043 if (fbc->crtc == to_intel_crtc(crtc)) {
1044 fbc_crtc_present = true;
1045 break;
1046 }
1047 }
1048 /* This atomic commit doesn't involve the CRTC currently tied to FBC. */
1049 if (!fbc_crtc_present && fbc->crtc != NULL)
1050 goto out;
1051
1052 /* Simply choose the first CRTC that is compatible and has a visible
1053 * plane. We could go for fancier schemes such as checking the plane
1054 * size, but this would just affect the few platforms that don't tie FBC
1055 * to pipe or plane A. */
1056 for_each_plane_in_state(state, plane, plane_state, i) {
1057 struct intel_plane_state *intel_plane_state =
1058 to_intel_plane_state(plane_state);
1059
1060 if (!intel_plane_state->visible)
1061 continue;
1062
1063 for_each_crtc_in_state(state, crtc, crtc_state, j) {
1064 struct intel_crtc_state *intel_crtc_state =
1065 to_intel_crtc_state(crtc_state);
1066
1067 if (plane_state->crtc != crtc)
1068 continue;
1069
1070 if (!intel_fbc_can_choose(to_intel_crtc(crtc)))
1071 break;
1072
1073 intel_crtc_state->enable_fbc = true;
1074 goto out;
1075 }
1076 }
1077
1078out:
1079 mutex_unlock(&fbc->lock);
1080}
1081
d029bcad
PZ
1082/**
1083 * intel_fbc_enable: tries to enable FBC on the CRTC
1084 * @crtc: the CRTC
1085 *
f51be2e0
PZ
1086 * This function checks if the given CRTC was chosen for FBC, then enables it if
1087 * possible. Notice that it doesn't activate FBC.
d029bcad
PZ
1088 */
1089void intel_fbc_enable(struct intel_crtc *crtc)
1090{
1091 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
ab34a7e8 1092 struct intel_fbc *fbc = &dev_priv->fbc;
d029bcad
PZ
1093
1094 if (!fbc_supported(dev_priv))
1095 return;
1096
ab34a7e8 1097 mutex_lock(&fbc->lock);
d029bcad 1098
ab34a7e8
PZ
1099 if (fbc->enabled) {
1100 WARN_ON(fbc->crtc == crtc);
d029bcad
PZ
1101 goto out;
1102 }
1103
f51be2e0
PZ
1104 if (!crtc->config->enable_fbc)
1105 goto out;
1106
ab34a7e8
PZ
1107 WARN_ON(fbc->active);
1108 WARN_ON(fbc->crtc != NULL);
d029bcad 1109
aaf78d27 1110 intel_fbc_update_state_cache(crtc);
c5ecd469
PZ
1111 if (intel_fbc_alloc_cfb(crtc)) {
1112 set_no_fbc_reason(dev_priv, "not enough stolen memory");
1113 goto out;
1114 }
1115
d029bcad 1116 DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
ab34a7e8 1117 fbc->no_fbc_reason = "FBC enabled but not active yet\n";
d029bcad 1118
ab34a7e8
PZ
1119 fbc->enabled = true;
1120 fbc->crtc = crtc;
d029bcad 1121out:
ab34a7e8 1122 mutex_unlock(&fbc->lock);
d029bcad
PZ
1123}
1124
1125/**
1126 * __intel_fbc_disable - disable FBC
1127 * @dev_priv: i915 device instance
1128 *
1129 * This is the low level function that actually disables FBC. Callers should
1130 * grab the FBC lock.
1131 */
1132static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
1133{
ab34a7e8
PZ
1134 struct intel_fbc *fbc = &dev_priv->fbc;
1135 struct intel_crtc *crtc = fbc->crtc;
d029bcad 1136
ab34a7e8
PZ
1137 WARN_ON(!mutex_is_locked(&fbc->lock));
1138 WARN_ON(!fbc->enabled);
1139 WARN_ON(fbc->active);
58f9c0bc 1140 WARN_ON(crtc->active);
d029bcad
PZ
1141
1142 DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
1143
c5ecd469
PZ
1144 __intel_fbc_cleanup_cfb(dev_priv);
1145
ab34a7e8
PZ
1146 fbc->enabled = false;
1147 fbc->crtc = NULL;
d029bcad
PZ
1148}
1149
1150/**
c937ab3e 1151 * intel_fbc_disable - disable FBC if it's associated with crtc
d029bcad
PZ
1152 * @crtc: the CRTC
1153 *
1154 * This function disables FBC if it's associated with the provided CRTC.
1155 */
c937ab3e 1156void intel_fbc_disable(struct intel_crtc *crtc)
d029bcad
PZ
1157{
1158 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
ab34a7e8 1159 struct intel_fbc *fbc = &dev_priv->fbc;
d029bcad
PZ
1160
1161 if (!fbc_supported(dev_priv))
1162 return;
1163
ab34a7e8
PZ
1164 mutex_lock(&fbc->lock);
1165 if (fbc->crtc == crtc) {
1166 WARN_ON(!fbc->enabled);
1167 WARN_ON(fbc->active);
d029bcad
PZ
1168 __intel_fbc_disable(dev_priv);
1169 }
ab34a7e8 1170 mutex_unlock(&fbc->lock);
65c7600f
PZ
1171
1172 cancel_work_sync(&fbc->work.work);
d029bcad
PZ
1173}
1174
1175/**
c937ab3e 1176 * intel_fbc_global_disable - globally disable FBC
d029bcad
PZ
1177 * @dev_priv: i915 device instance
1178 *
1179 * This function disables FBC regardless of which CRTC is associated with it.
1180 */
c937ab3e 1181void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
d029bcad 1182{
ab34a7e8
PZ
1183 struct intel_fbc *fbc = &dev_priv->fbc;
1184
d029bcad
PZ
1185 if (!fbc_supported(dev_priv))
1186 return;
1187
ab34a7e8
PZ
1188 mutex_lock(&fbc->lock);
1189 if (fbc->enabled)
d029bcad 1190 __intel_fbc_disable(dev_priv);
ab34a7e8 1191 mutex_unlock(&fbc->lock);
65c7600f
PZ
1192
1193 cancel_work_sync(&fbc->work.work);
d029bcad
PZ
1194}
1195
010cf73d
PZ
1196/**
1197 * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
1198 * @dev_priv: i915 device instance
1199 *
1200 * The FBC code needs to track CRTC visibility since the older platforms can't
1201 * have FBC enabled while multiple pipes are used. This function does the
1202 * initial setup at driver load to make sure FBC is matching the real hardware.
1203 */
1204void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
1205{
1206 struct intel_crtc *crtc;
1207
1208 /* Don't even bother tracking anything if we don't need. */
1209 if (!no_fbc_on_multiple_pipes(dev_priv))
1210 return;
1211
1212 for_each_intel_crtc(dev_priv->dev, crtc)
1213 if (intel_crtc_active(&crtc->base) &&
1214 to_intel_plane_state(crtc->base.primary->state)->visible)
1215 dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
1216}
1217
94b83957
RV
1218/**
1219 * intel_fbc_init - Initialize FBC
1220 * @dev_priv: the i915 device
1221 *
1222 * This function might be called during PM init process.
1223 */
7ff0ebcc
RV
1224void intel_fbc_init(struct drm_i915_private *dev_priv)
1225{
ab34a7e8 1226 struct intel_fbc *fbc = &dev_priv->fbc;
dbef0f15
PZ
1227 enum pipe pipe;
1228
ab34a7e8
PZ
1229 INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
1230 mutex_init(&fbc->lock);
1231 fbc->enabled = false;
1232 fbc->active = false;
1233 fbc->work.scheduled = false;
25ad93fd 1234
7ff0ebcc 1235 if (!HAS_FBC(dev_priv)) {
ab34a7e8 1236 fbc->no_fbc_reason = "unsupported by this chipset";
7ff0ebcc
RV
1237 return;
1238 }
1239
dbef0f15 1240 for_each_pipe(dev_priv, pipe) {
ab34a7e8 1241 fbc->possible_framebuffer_bits |=
dbef0f15
PZ
1242 INTEL_FRONTBUFFER_PRIMARY(pipe);
1243
57105022 1244 if (fbc_on_pipe_a_only(dev_priv))
dbef0f15
PZ
1245 break;
1246 }
1247
7ff0ebcc 1248 if (INTEL_INFO(dev_priv)->gen >= 7) {
ab34a7e8
PZ
1249 fbc->is_active = ilk_fbc_is_active;
1250 fbc->activate = gen7_fbc_activate;
1251 fbc->deactivate = ilk_fbc_deactivate;
7ff0ebcc 1252 } else if (INTEL_INFO(dev_priv)->gen >= 5) {
ab34a7e8
PZ
1253 fbc->is_active = ilk_fbc_is_active;
1254 fbc->activate = ilk_fbc_activate;
1255 fbc->deactivate = ilk_fbc_deactivate;
7ff0ebcc 1256 } else if (IS_GM45(dev_priv)) {
ab34a7e8
PZ
1257 fbc->is_active = g4x_fbc_is_active;
1258 fbc->activate = g4x_fbc_activate;
1259 fbc->deactivate = g4x_fbc_deactivate;
7ff0ebcc 1260 } else {
ab34a7e8
PZ
1261 fbc->is_active = i8xx_fbc_is_active;
1262 fbc->activate = i8xx_fbc_activate;
1263 fbc->deactivate = i8xx_fbc_deactivate;
7ff0ebcc
RV
1264
1265 /* This value was pulled out of someone's hat */
1266 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
1267 }
1268
b07ea0fa 1269 /* We still don't have any sort of hardware state readout for FBC, so
0e631adc
PZ
1270 * deactivate it in case the BIOS activated it to make sure software
1271 * matches the hardware state. */
ab34a7e8
PZ
1272 if (fbc->is_active(dev_priv))
1273 fbc->deactivate(dev_priv);
7ff0ebcc 1274}
This page took 0.289501 seconds and 5 git commands to generate.