2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Frame Buffer Compression (FBC)
27 * FBC tries to save memory bandwidth (and so power consumption) by
28 * compressing the amount of memory used by the display. It is total
29 * transparent to user space and completely handled in the kernel.
31 * The benefits of FBC are mostly visible with solid backgrounds and
32 * variation-less patterns. It comes from keeping the memory footprint small
33 * and having fewer memory pages opened and accessed for refreshing the display.
35 * i915 is responsible to reserve stolen memory for FBC and configure its
36 * offset on proper registers. The hardware takes care of all
37 * compress/decompress. However there are many known cases where we have to
38 * forcibly disable it to allow proper screen updates.
41 #include "intel_drv.h"
44 static inline bool fbc_supported(struct drm_i915_private
*dev_priv
)
46 return dev_priv
->fbc
.enable_fbc
!= NULL
;
49 static inline bool fbc_on_pipe_a_only(struct drm_i915_private
*dev_priv
)
51 return IS_HASWELL(dev_priv
) || INTEL_INFO(dev_priv
)->gen
>= 8;
55 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
56 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
57 * origin so the x and y offsets can actually fit the registers. As a
58 * consequence, the fence doesn't really start exactly at the display plane
59 * address we program because it starts at the real start of the buffer, so we
60 * have to take this into consideration here.
62 static unsigned int get_crtc_fence_y_offset(struct intel_crtc
*crtc
)
64 return crtc
->base
.y
- crtc
->adjusted_y
;
67 static void i8xx_fbc_disable(struct drm_i915_private
*dev_priv
)
71 dev_priv
->fbc
.enabled
= false;
73 /* Disable compression */
74 fbc_ctl
= I915_READ(FBC_CONTROL
);
75 if ((fbc_ctl
& FBC_CTL_EN
) == 0)
78 fbc_ctl
&= ~FBC_CTL_EN
;
79 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
81 /* Wait for compressing bit to clear */
82 if (wait_for((I915_READ(FBC_STATUS
) & FBC_STAT_COMPRESSING
) == 0, 10)) {
83 DRM_DEBUG_KMS("FBC idle timed out\n");
87 DRM_DEBUG_KMS("disabled FBC\n");
90 static void i8xx_fbc_enable(struct intel_crtc
*crtc
)
92 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
93 struct drm_framebuffer
*fb
= crtc
->base
.primary
->fb
;
94 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
99 dev_priv
->fbc
.enabled
= true;
101 /* Note: fbc.threshold == 1 for i8xx */
102 cfb_pitch
= dev_priv
->fbc
.uncompressed_size
/ FBC_LL_SIZE
;
103 if (fb
->pitches
[0] < cfb_pitch
)
104 cfb_pitch
= fb
->pitches
[0];
106 /* FBC_CTL wants 32B or 64B units */
107 if (IS_GEN2(dev_priv
))
108 cfb_pitch
= (cfb_pitch
/ 32) - 1;
110 cfb_pitch
= (cfb_pitch
/ 64) - 1;
113 for (i
= 0; i
< (FBC_LL_SIZE
/ 32) + 1; i
++)
114 I915_WRITE(FBC_TAG(i
), 0);
116 if (IS_GEN4(dev_priv
)) {
120 fbc_ctl2
= FBC_CTL_FENCE_DBL
| FBC_CTL_IDLE_IMM
| FBC_CTL_CPU_FENCE
;
121 fbc_ctl2
|= FBC_CTL_PLANE(crtc
->plane
);
122 I915_WRITE(FBC_CONTROL2
, fbc_ctl2
);
123 I915_WRITE(FBC_FENCE_OFF
, get_crtc_fence_y_offset(crtc
));
127 fbc_ctl
= I915_READ(FBC_CONTROL
);
128 fbc_ctl
&= 0x3fff << FBC_CTL_INTERVAL_SHIFT
;
129 fbc_ctl
|= FBC_CTL_EN
| FBC_CTL_PERIODIC
;
130 if (IS_I945GM(dev_priv
))
131 fbc_ctl
|= FBC_CTL_C3_IDLE
; /* 945 needs special SR handling */
132 fbc_ctl
|= (cfb_pitch
& 0xff) << FBC_CTL_STRIDE_SHIFT
;
133 fbc_ctl
|= obj
->fence_reg
;
134 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
136 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
137 cfb_pitch
, crtc
->base
.y
, plane_name(crtc
->plane
));
140 static bool i8xx_fbc_enabled(struct drm_i915_private
*dev_priv
)
142 return I915_READ(FBC_CONTROL
) & FBC_CTL_EN
;
145 static void g4x_fbc_enable(struct intel_crtc
*crtc
)
147 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
148 struct drm_framebuffer
*fb
= crtc
->base
.primary
->fb
;
149 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
152 dev_priv
->fbc
.enabled
= true;
154 dpfc_ctl
= DPFC_CTL_PLANE(crtc
->plane
) | DPFC_SR_EN
;
155 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
156 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
158 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
159 dpfc_ctl
|= DPFC_CTL_FENCE_EN
| obj
->fence_reg
;
161 I915_WRITE(DPFC_FENCE_YOFF
, get_crtc_fence_y_offset(crtc
));
164 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
166 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc
->plane
));
169 static void g4x_fbc_disable(struct drm_i915_private
*dev_priv
)
173 dev_priv
->fbc
.enabled
= false;
175 /* Disable compression */
176 dpfc_ctl
= I915_READ(DPFC_CONTROL
);
177 if (dpfc_ctl
& DPFC_CTL_EN
) {
178 dpfc_ctl
&= ~DPFC_CTL_EN
;
179 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
);
181 DRM_DEBUG_KMS("disabled FBC\n");
185 static bool g4x_fbc_enabled(struct drm_i915_private
*dev_priv
)
187 return I915_READ(DPFC_CONTROL
) & DPFC_CTL_EN
;
190 /* This function forces a CFB recompression through the nuke operation. */
191 static void intel_fbc_recompress(struct drm_i915_private
*dev_priv
)
193 I915_WRITE(MSG_FBC_REND_STATE
, FBC_REND_NUKE
);
194 POSTING_READ(MSG_FBC_REND_STATE
);
197 static void ilk_fbc_enable(struct intel_crtc
*crtc
)
199 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
200 struct drm_framebuffer
*fb
= crtc
->base
.primary
->fb
;
201 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
203 int threshold
= dev_priv
->fbc
.threshold
;
204 unsigned int y_offset
;
206 dev_priv
->fbc
.enabled
= true;
208 dpfc_ctl
= DPFC_CTL_PLANE(crtc
->plane
);
209 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
215 dpfc_ctl
|= DPFC_CTL_LIMIT_4X
;
218 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
221 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
224 dpfc_ctl
|= DPFC_CTL_FENCE_EN
;
225 if (IS_GEN5(dev_priv
))
226 dpfc_ctl
|= obj
->fence_reg
;
228 y_offset
= get_crtc_fence_y_offset(crtc
);
229 I915_WRITE(ILK_DPFC_FENCE_YOFF
, y_offset
);
230 I915_WRITE(ILK_FBC_RT_BASE
, i915_gem_obj_ggtt_offset(obj
) | ILK_FBC_RT_VALID
);
232 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
234 if (IS_GEN6(dev_priv
)) {
235 I915_WRITE(SNB_DPFC_CTL_SA
,
236 SNB_CPU_FENCE_ENABLE
| obj
->fence_reg
);
237 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, y_offset
);
240 intel_fbc_recompress(dev_priv
);
242 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc
->plane
));
245 static void ilk_fbc_disable(struct drm_i915_private
*dev_priv
)
249 dev_priv
->fbc
.enabled
= false;
251 /* Disable compression */
252 dpfc_ctl
= I915_READ(ILK_DPFC_CONTROL
);
253 if (dpfc_ctl
& DPFC_CTL_EN
) {
254 dpfc_ctl
&= ~DPFC_CTL_EN
;
255 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
);
257 DRM_DEBUG_KMS("disabled FBC\n");
261 static bool ilk_fbc_enabled(struct drm_i915_private
*dev_priv
)
263 return I915_READ(ILK_DPFC_CONTROL
) & DPFC_CTL_EN
;
266 static void gen7_fbc_enable(struct intel_crtc
*crtc
)
268 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
269 struct drm_framebuffer
*fb
= crtc
->base
.primary
->fb
;
270 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
272 int threshold
= dev_priv
->fbc
.threshold
;
274 dev_priv
->fbc
.enabled
= true;
277 if (IS_IVYBRIDGE(dev_priv
))
278 dpfc_ctl
|= IVB_DPFC_CTL_PLANE(crtc
->plane
);
280 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
286 dpfc_ctl
|= DPFC_CTL_LIMIT_4X
;
289 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
292 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
296 dpfc_ctl
|= IVB_DPFC_CTL_FENCE_EN
;
298 if (dev_priv
->fbc
.false_color
)
299 dpfc_ctl
|= FBC_CTL_FALSE_COLOR
;
301 if (IS_IVYBRIDGE(dev_priv
)) {
302 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
303 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
304 I915_READ(ILK_DISPLAY_CHICKEN1
) |
306 } else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
307 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
308 I915_WRITE(CHICKEN_PIPESL_1(crtc
->pipe
),
309 I915_READ(CHICKEN_PIPESL_1(crtc
->pipe
)) |
313 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
315 I915_WRITE(SNB_DPFC_CTL_SA
,
316 SNB_CPU_FENCE_ENABLE
| obj
->fence_reg
);
317 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, get_crtc_fence_y_offset(crtc
));
319 intel_fbc_recompress(dev_priv
);
321 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc
->plane
));
325 * intel_fbc_enabled - Is FBC enabled?
326 * @dev_priv: i915 device instance
328 * This function is used to verify the current state of FBC.
329 * FIXME: This should be tracked in the plane config eventually
330 * instead of queried at runtime for most callers.
332 bool intel_fbc_enabled(struct drm_i915_private
*dev_priv
)
334 return dev_priv
->fbc
.enabled
;
337 static void intel_fbc_enable(struct intel_crtc
*crtc
,
338 const struct drm_framebuffer
*fb
)
340 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
342 dev_priv
->fbc
.enable_fbc(crtc
);
344 dev_priv
->fbc
.crtc
= crtc
;
345 dev_priv
->fbc
.fb_id
= fb
->base
.id
;
346 dev_priv
->fbc
.y
= crtc
->base
.y
;
349 static void intel_fbc_work_fn(struct work_struct
*__work
)
351 struct intel_fbc_work
*work
=
352 container_of(to_delayed_work(__work
),
353 struct intel_fbc_work
, work
);
354 struct drm_i915_private
*dev_priv
= work
->crtc
->base
.dev
->dev_private
;
355 struct drm_framebuffer
*crtc_fb
= work
->crtc
->base
.primary
->fb
;
357 mutex_lock(&dev_priv
->fbc
.lock
);
358 if (work
== dev_priv
->fbc
.fbc_work
) {
359 /* Double check that we haven't switched fb without cancelling
362 if (crtc_fb
== work
->fb
)
363 intel_fbc_enable(work
->crtc
, work
->fb
);
365 dev_priv
->fbc
.fbc_work
= NULL
;
367 mutex_unlock(&dev_priv
->fbc
.lock
);
372 static void intel_fbc_cancel_work(struct drm_i915_private
*dev_priv
)
374 WARN_ON(!mutex_is_locked(&dev_priv
->fbc
.lock
));
376 if (dev_priv
->fbc
.fbc_work
== NULL
)
379 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
381 /* Synchronisation is provided by struct_mutex and checking of
382 * dev_priv->fbc.fbc_work, so we can perform the cancellation
383 * entirely asynchronously.
385 if (cancel_delayed_work(&dev_priv
->fbc
.fbc_work
->work
))
386 /* tasklet was killed before being run, clean up */
387 kfree(dev_priv
->fbc
.fbc_work
);
389 /* Mark the work as no longer wanted so that if it does
390 * wake-up (because the work was already running and waiting
391 * for our mutex), it will discover that is no longer
394 dev_priv
->fbc
.fbc_work
= NULL
;
397 static void intel_fbc_schedule_enable(struct intel_crtc
*crtc
)
399 struct intel_fbc_work
*work
;
400 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
402 WARN_ON(!mutex_is_locked(&dev_priv
->fbc
.lock
));
404 intel_fbc_cancel_work(dev_priv
);
406 work
= kzalloc(sizeof(*work
), GFP_KERNEL
);
408 DRM_ERROR("Failed to allocate FBC work structure\n");
409 intel_fbc_enable(crtc
, crtc
->base
.primary
->fb
);
414 work
->fb
= crtc
->base
.primary
->fb
;
415 INIT_DELAYED_WORK(&work
->work
, intel_fbc_work_fn
);
417 dev_priv
->fbc
.fbc_work
= work
;
419 /* Delay the actual enabling to let pageflipping cease and the
420 * display to settle before starting the compression. Note that
421 * this delay also serves a second purpose: it allows for a
422 * vblank to pass after disabling the FBC before we attempt
423 * to modify the control registers.
425 * A more complicated solution would involve tracking vblanks
426 * following the termination of the page-flipping sequence
427 * and indeed performing the enable as a co-routine and not
428 * waiting synchronously upon the vblank.
430 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
432 schedule_delayed_work(&work
->work
, msecs_to_jiffies(50));
435 static void __intel_fbc_disable(struct drm_i915_private
*dev_priv
)
437 WARN_ON(!mutex_is_locked(&dev_priv
->fbc
.lock
));
439 intel_fbc_cancel_work(dev_priv
);
441 dev_priv
->fbc
.disable_fbc(dev_priv
);
442 dev_priv
->fbc
.crtc
= NULL
;
446 * intel_fbc_disable - disable FBC
447 * @dev_priv: i915 device instance
449 * This function disables FBC.
451 void intel_fbc_disable(struct drm_i915_private
*dev_priv
)
453 if (!fbc_supported(dev_priv
))
456 mutex_lock(&dev_priv
->fbc
.lock
);
457 __intel_fbc_disable(dev_priv
);
458 mutex_unlock(&dev_priv
->fbc
.lock
);
462 * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
465 * This function disables FBC if it's associated with the provided CRTC.
467 void intel_fbc_disable_crtc(struct intel_crtc
*crtc
)
469 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
471 if (!fbc_supported(dev_priv
))
474 mutex_lock(&dev_priv
->fbc
.lock
);
475 if (dev_priv
->fbc
.crtc
== crtc
)
476 __intel_fbc_disable(dev_priv
);
477 mutex_unlock(&dev_priv
->fbc
.lock
);
480 static void set_no_fbc_reason(struct drm_i915_private
*dev_priv
,
483 if (dev_priv
->fbc
.no_fbc_reason
== reason
)
486 dev_priv
->fbc
.no_fbc_reason
= reason
;
487 DRM_DEBUG_KMS("Disabling FBC: %s\n", reason
);
490 static struct drm_crtc
*intel_fbc_find_crtc(struct drm_i915_private
*dev_priv
)
492 struct drm_crtc
*crtc
= NULL
, *tmp_crtc
;
495 for_each_pipe(dev_priv
, pipe
) {
496 tmp_crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
498 if (intel_crtc_active(tmp_crtc
) &&
499 to_intel_plane_state(tmp_crtc
->primary
->state
)->visible
)
502 if (fbc_on_pipe_a_only(dev_priv
))
506 if (!crtc
|| crtc
->primary
->fb
== NULL
)
512 static bool multiple_pipes_ok(struct drm_i915_private
*dev_priv
)
516 struct drm_crtc
*crtc
;
518 if (INTEL_INFO(dev_priv
)->gen
> 4)
521 for_each_pipe(dev_priv
, pipe
) {
522 crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
524 if (intel_crtc_active(crtc
) &&
525 to_intel_plane_state(crtc
->primary
->state
)->visible
)
529 return (n_pipes
< 2);
532 static int find_compression_threshold(struct drm_i915_private
*dev_priv
,
533 struct drm_mm_node
*node
,
537 int compression_threshold
= 1;
541 /* The FBC hardware for BDW/SKL doesn't have access to the stolen
542 * reserved range size, so it always assumes the maximum (8mb) is used.
543 * If we enable FBC using a CFB on that memory range we'll get FIFO
544 * underruns, even if that range is not reserved by the BIOS. */
545 if (IS_BROADWELL(dev_priv
) ||
546 IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
))
547 end
= dev_priv
->gtt
.stolen_size
- 8 * 1024 * 1024;
549 end
= dev_priv
->gtt
.stolen_usable_size
;
551 /* HACK: This code depends on what we will do in *_enable_fbc. If that
552 * code changes, this code needs to change as well.
554 * The enable_fbc code will attempt to use one of our 2 compression
555 * thresholds, therefore, in that case, we only have 1 resort.
558 /* Try to over-allocate to reduce reallocations and fragmentation. */
559 ret
= i915_gem_stolen_insert_node_in_range(dev_priv
, node
, size
<<= 1,
562 return compression_threshold
;
565 /* HW's ability to limit the CFB is 1:4 */
566 if (compression_threshold
> 4 ||
567 (fb_cpp
== 2 && compression_threshold
== 2))
570 ret
= i915_gem_stolen_insert_node_in_range(dev_priv
, node
, size
>>= 1,
572 if (ret
&& INTEL_INFO(dev_priv
)->gen
<= 4) {
575 compression_threshold
<<= 1;
578 return compression_threshold
;
582 static int intel_fbc_alloc_cfb(struct drm_i915_private
*dev_priv
, int size
,
585 struct drm_mm_node
*uninitialized_var(compressed_llb
);
588 ret
= find_compression_threshold(dev_priv
, &dev_priv
->fbc
.compressed_fb
,
593 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
597 dev_priv
->fbc
.threshold
= ret
;
599 if (INTEL_INFO(dev_priv
)->gen
>= 5)
600 I915_WRITE(ILK_DPFC_CB_BASE
, dev_priv
->fbc
.compressed_fb
.start
);
601 else if (IS_GM45(dev_priv
)) {
602 I915_WRITE(DPFC_CB_BASE
, dev_priv
->fbc
.compressed_fb
.start
);
604 compressed_llb
= kzalloc(sizeof(*compressed_llb
), GFP_KERNEL
);
608 ret
= i915_gem_stolen_insert_node(dev_priv
, compressed_llb
,
613 dev_priv
->fbc
.compressed_llb
= compressed_llb
;
615 I915_WRITE(FBC_CFB_BASE
,
616 dev_priv
->mm
.stolen_base
+ dev_priv
->fbc
.compressed_fb
.start
);
617 I915_WRITE(FBC_LL_BASE
,
618 dev_priv
->mm
.stolen_base
+ compressed_llb
->start
);
621 dev_priv
->fbc
.uncompressed_size
= size
;
623 DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
624 dev_priv
->fbc
.compressed_fb
.size
,
625 dev_priv
->fbc
.threshold
);
630 kfree(compressed_llb
);
631 i915_gem_stolen_remove_node(dev_priv
, &dev_priv
->fbc
.compressed_fb
);
633 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size
);
637 static void __intel_fbc_cleanup_cfb(struct drm_i915_private
*dev_priv
)
639 if (dev_priv
->fbc
.uncompressed_size
== 0)
642 i915_gem_stolen_remove_node(dev_priv
, &dev_priv
->fbc
.compressed_fb
);
644 if (dev_priv
->fbc
.compressed_llb
) {
645 i915_gem_stolen_remove_node(dev_priv
,
646 dev_priv
->fbc
.compressed_llb
);
647 kfree(dev_priv
->fbc
.compressed_llb
);
650 dev_priv
->fbc
.uncompressed_size
= 0;
653 void intel_fbc_cleanup_cfb(struct drm_i915_private
*dev_priv
)
655 if (!fbc_supported(dev_priv
))
658 mutex_lock(&dev_priv
->fbc
.lock
);
659 __intel_fbc_cleanup_cfb(dev_priv
);
660 mutex_unlock(&dev_priv
->fbc
.lock
);
664 * For SKL+, the plane source size used by the hardware is based on the value we
665 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
666 * we wrote to PIPESRC.
668 static void intel_fbc_get_plane_source_size(struct intel_crtc
*crtc
,
669 int *width
, int *height
)
671 struct intel_plane_state
*plane_state
=
672 to_intel_plane_state(crtc
->base
.primary
->state
);
675 if (intel_rotation_90_or_270(plane_state
->base
.rotation
)) {
676 w
= drm_rect_height(&plane_state
->src
) >> 16;
677 h
= drm_rect_width(&plane_state
->src
) >> 16;
679 w
= drm_rect_width(&plane_state
->src
) >> 16;
680 h
= drm_rect_height(&plane_state
->src
) >> 16;
689 static int intel_fbc_calculate_cfb_size(struct intel_crtc
*crtc
)
691 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
692 struct drm_framebuffer
*fb
= crtc
->base
.primary
->fb
;
695 intel_fbc_get_plane_source_size(crtc
, NULL
, &lines
);
696 if (INTEL_INFO(dev_priv
)->gen
>= 7)
697 lines
= min(lines
, 2048);
699 return lines
* fb
->pitches
[0];
702 static int intel_fbc_setup_cfb(struct intel_crtc
*crtc
)
704 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
705 struct drm_framebuffer
*fb
= crtc
->base
.primary
->fb
;
708 size
= intel_fbc_calculate_cfb_size(crtc
);
709 cpp
= drm_format_plane_cpp(fb
->pixel_format
, 0);
711 if (size
<= dev_priv
->fbc
.uncompressed_size
)
714 /* Release any current block */
715 __intel_fbc_cleanup_cfb(dev_priv
);
717 return intel_fbc_alloc_cfb(dev_priv
, size
, cpp
);
720 static bool stride_is_valid(struct drm_i915_private
*dev_priv
,
723 /* These should have been caught earlier. */
724 WARN_ON(stride
< 512);
725 WARN_ON((stride
& (64 - 1)) != 0);
727 /* Below are the additional FBC restrictions. */
729 if (IS_GEN2(dev_priv
) || IS_GEN3(dev_priv
))
730 return stride
== 4096 || stride
== 8192;
732 if (IS_GEN4(dev_priv
) && !IS_G4X(dev_priv
) && stride
< 2048)
741 static bool pixel_format_is_valid(struct drm_framebuffer
*fb
)
743 struct drm_device
*dev
= fb
->dev
;
744 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
746 switch (fb
->pixel_format
) {
747 case DRM_FORMAT_XRGB8888
:
748 case DRM_FORMAT_XBGR8888
:
750 case DRM_FORMAT_XRGB1555
:
751 case DRM_FORMAT_RGB565
:
752 /* 16bpp not supported on gen2 */
755 /* WaFbcOnly1to1Ratio:ctg */
756 if (IS_G4X(dev_priv
))
765 * For some reason, the hardware tracking starts looking at whatever we
766 * programmed as the display plane base address register. It does not look at
767 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
768 * variables instead of just looking at the pipe/plane size.
770 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc
*crtc
)
772 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
773 unsigned int effective_w
, effective_h
, max_w
, max_h
;
775 if (INTEL_INFO(dev_priv
)->gen
>= 8 || IS_HASWELL(dev_priv
)) {
778 } else if (IS_G4X(dev_priv
) || INTEL_INFO(dev_priv
)->gen
>= 5) {
786 intel_fbc_get_plane_source_size(crtc
, &effective_w
, &effective_h
);
787 effective_w
+= crtc
->adjusted_x
;
788 effective_h
+= crtc
->adjusted_y
;
790 return effective_w
<= max_w
&& effective_h
<= max_h
;
794 * __intel_fbc_update - enable/disable FBC as needed, unlocked
795 * @dev_priv: i915 device instance
797 * Set up the framebuffer compression hardware at mode set time. We
798 * enable it if possible:
799 * - plane A only (on pre-965)
800 * - no pixel mulitply/line duplication
801 * - no alpha buffer discard
803 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
805 * We can't assume that any compression will take place (worst case),
806 * so the compressed buffer has to be the same size as the uncompressed
807 * one. It also must reside (along with the line length buffer) in
810 * We need to enable/disable FBC on a global basis.
812 static void __intel_fbc_update(struct drm_i915_private
*dev_priv
)
814 struct drm_crtc
*crtc
= NULL
;
815 struct intel_crtc
*intel_crtc
;
816 struct drm_framebuffer
*fb
;
817 struct drm_i915_gem_object
*obj
;
818 const struct drm_display_mode
*adjusted_mode
;
820 WARN_ON(!mutex_is_locked(&dev_priv
->fbc
.lock
));
822 /* disable framebuffer compression in vGPU */
823 if (intel_vgpu_active(dev_priv
->dev
))
826 if (i915
.enable_fbc
< 0) {
827 set_no_fbc_reason(dev_priv
, "disabled per chip default");
831 if (!i915
.enable_fbc
) {
832 set_no_fbc_reason(dev_priv
, "disabled per module param");
837 * If FBC is already on, we just have to verify that we can
838 * keep it that way...
839 * Need to disable if:
840 * - more than one pipe is active
841 * - changing FBC params (stride, fence, mode)
842 * - new fb is too large to fit in compressed buffer
843 * - going to an unsupported config (interlace, pixel multiply, etc.)
845 crtc
= intel_fbc_find_crtc(dev_priv
);
847 set_no_fbc_reason(dev_priv
, "no output");
851 if (!multiple_pipes_ok(dev_priv
)) {
852 set_no_fbc_reason(dev_priv
, "more than one pipe active");
856 intel_crtc
= to_intel_crtc(crtc
);
857 fb
= crtc
->primary
->fb
;
858 obj
= intel_fb_obj(fb
);
859 adjusted_mode
= &intel_crtc
->config
->base
.adjusted_mode
;
861 if ((adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
862 (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLSCAN
)) {
863 set_no_fbc_reason(dev_priv
, "incompatible mode");
867 if (!intel_fbc_hw_tracking_covers_screen(intel_crtc
)) {
868 set_no_fbc_reason(dev_priv
, "mode too large for compression");
872 if ((INTEL_INFO(dev_priv
)->gen
< 4 || HAS_DDI(dev_priv
)) &&
873 intel_crtc
->plane
!= PLANE_A
) {
874 set_no_fbc_reason(dev_priv
, "FBC unsupported on plane");
878 /* The use of a CPU fence is mandatory in order to detect writes
879 * by the CPU to the scanout and trigger updates to the FBC.
881 if (obj
->tiling_mode
!= I915_TILING_X
||
882 obj
->fence_reg
== I915_FENCE_REG_NONE
) {
883 set_no_fbc_reason(dev_priv
, "framebuffer not tiled or fenced");
886 if (INTEL_INFO(dev_priv
)->gen
<= 4 && !IS_G4X(dev_priv
) &&
887 crtc
->primary
->state
->rotation
!= BIT(DRM_ROTATE_0
)) {
888 set_no_fbc_reason(dev_priv
, "rotation unsupported");
892 if (!stride_is_valid(dev_priv
, fb
->pitches
[0])) {
893 set_no_fbc_reason(dev_priv
, "framebuffer stride not supported");
897 if (!pixel_format_is_valid(fb
)) {
898 set_no_fbc_reason(dev_priv
, "pixel format is invalid");
902 /* If the kernel debugger is active, always disable compression */
903 if (in_dbg_master()) {
904 set_no_fbc_reason(dev_priv
, "Kernel debugger is active");
908 /* WaFbcExceedCdClockThreshold:hsw,bdw */
909 if ((IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) &&
910 ilk_pipe_pixel_rate(intel_crtc
->config
) >=
911 dev_priv
->cdclk_freq
* 95 / 100) {
912 set_no_fbc_reason(dev_priv
, "pixel rate is too big");
916 if (intel_fbc_setup_cfb(intel_crtc
)) {
917 set_no_fbc_reason(dev_priv
, "not enough stolen memory");
921 /* If the scanout has not changed, don't modify the FBC settings.
922 * Note that we make the fundamental assumption that the fb->obj
923 * cannot be unpinned (and have its GTT offset and fence revoked)
924 * without first being decoupled from the scanout and FBC disabled.
926 if (dev_priv
->fbc
.crtc
== intel_crtc
&&
927 dev_priv
->fbc
.fb_id
== fb
->base
.id
&&
928 dev_priv
->fbc
.y
== crtc
->y
)
931 if (intel_fbc_enabled(dev_priv
)) {
932 /* We update FBC along two paths, after changing fb/crtc
933 * configuration (modeswitching) and after page-flipping
934 * finishes. For the latter, we know that not only did
935 * we disable the FBC at the start of the page-flip
936 * sequence, but also more than one vblank has passed.
938 * For the former case of modeswitching, it is possible
939 * to switch between two FBC valid configurations
940 * instantaneously so we do need to disable the FBC
941 * before we can modify its control registers. We also
942 * have to wait for the next vblank for that to take
943 * effect. However, since we delay enabling FBC we can
944 * assume that a vblank has passed since disabling and
945 * that we can safely alter the registers in the deferred
948 * In the scenario that we go from a valid to invalid
949 * and then back to valid FBC configuration we have
950 * no strict enforcement that a vblank occurred since
951 * disabling the FBC. However, along all current pipe
952 * disabling paths we do need to wait for a vblank at
953 * some point. And we wait before enabling FBC anyway.
955 DRM_DEBUG_KMS("disabling active FBC for update\n");
956 __intel_fbc_disable(dev_priv
);
959 intel_fbc_schedule_enable(intel_crtc
);
960 dev_priv
->fbc
.no_fbc_reason
= "FBC enabled (not necessarily active)";
964 /* Multiple disables should be harmless */
965 if (intel_fbc_enabled(dev_priv
)) {
966 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
967 __intel_fbc_disable(dev_priv
);
969 __intel_fbc_cleanup_cfb(dev_priv
);
973 * intel_fbc_update - enable/disable FBC as needed
974 * @dev_priv: i915 device instance
976 * This function reevaluates the overall state and enables or disables FBC.
978 void intel_fbc_update(struct drm_i915_private
*dev_priv
)
980 if (!fbc_supported(dev_priv
))
983 mutex_lock(&dev_priv
->fbc
.lock
);
984 __intel_fbc_update(dev_priv
);
985 mutex_unlock(&dev_priv
->fbc
.lock
);
988 void intel_fbc_invalidate(struct drm_i915_private
*dev_priv
,
989 unsigned int frontbuffer_bits
,
990 enum fb_op_origin origin
)
992 unsigned int fbc_bits
;
994 if (!fbc_supported(dev_priv
))
997 if (origin
== ORIGIN_GTT
)
1000 mutex_lock(&dev_priv
->fbc
.lock
);
1002 if (dev_priv
->fbc
.enabled
)
1003 fbc_bits
= INTEL_FRONTBUFFER_PRIMARY(dev_priv
->fbc
.crtc
->pipe
);
1004 else if (dev_priv
->fbc
.fbc_work
)
1005 fbc_bits
= INTEL_FRONTBUFFER_PRIMARY(
1006 dev_priv
->fbc
.fbc_work
->crtc
->pipe
);
1008 fbc_bits
= dev_priv
->fbc
.possible_framebuffer_bits
;
1010 dev_priv
->fbc
.busy_bits
|= (fbc_bits
& frontbuffer_bits
);
1012 if (dev_priv
->fbc
.busy_bits
)
1013 __intel_fbc_disable(dev_priv
);
1015 mutex_unlock(&dev_priv
->fbc
.lock
);
1018 void intel_fbc_flush(struct drm_i915_private
*dev_priv
,
1019 unsigned int frontbuffer_bits
, enum fb_op_origin origin
)
1021 if (!fbc_supported(dev_priv
))
1024 if (origin
== ORIGIN_GTT
)
1027 mutex_lock(&dev_priv
->fbc
.lock
);
1029 dev_priv
->fbc
.busy_bits
&= ~frontbuffer_bits
;
1031 if (!dev_priv
->fbc
.busy_bits
) {
1032 __intel_fbc_disable(dev_priv
);
1033 __intel_fbc_update(dev_priv
);
1036 mutex_unlock(&dev_priv
->fbc
.lock
);
1040 * intel_fbc_init - Initialize FBC
1041 * @dev_priv: the i915 device
1043 * This function might be called during PM init process.
1045 void intel_fbc_init(struct drm_i915_private
*dev_priv
)
1049 mutex_init(&dev_priv
->fbc
.lock
);
1051 if (!HAS_FBC(dev_priv
)) {
1052 dev_priv
->fbc
.enabled
= false;
1053 dev_priv
->fbc
.no_fbc_reason
= "unsupported by this chipset";
1057 for_each_pipe(dev_priv
, pipe
) {
1058 dev_priv
->fbc
.possible_framebuffer_bits
|=
1059 INTEL_FRONTBUFFER_PRIMARY(pipe
);
1061 if (fbc_on_pipe_a_only(dev_priv
))
1065 if (INTEL_INFO(dev_priv
)->gen
>= 7) {
1066 dev_priv
->fbc
.fbc_enabled
= ilk_fbc_enabled
;
1067 dev_priv
->fbc
.enable_fbc
= gen7_fbc_enable
;
1068 dev_priv
->fbc
.disable_fbc
= ilk_fbc_disable
;
1069 } else if (INTEL_INFO(dev_priv
)->gen
>= 5) {
1070 dev_priv
->fbc
.fbc_enabled
= ilk_fbc_enabled
;
1071 dev_priv
->fbc
.enable_fbc
= ilk_fbc_enable
;
1072 dev_priv
->fbc
.disable_fbc
= ilk_fbc_disable
;
1073 } else if (IS_GM45(dev_priv
)) {
1074 dev_priv
->fbc
.fbc_enabled
= g4x_fbc_enabled
;
1075 dev_priv
->fbc
.enable_fbc
= g4x_fbc_enable
;
1076 dev_priv
->fbc
.disable_fbc
= g4x_fbc_disable
;
1078 dev_priv
->fbc
.fbc_enabled
= i8xx_fbc_enabled
;
1079 dev_priv
->fbc
.enable_fbc
= i8xx_fbc_enable
;
1080 dev_priv
->fbc
.disable_fbc
= i8xx_fbc_disable
;
1082 /* This value was pulled out of someone's hat */
1083 I915_WRITE(FBC_CONTROL
, 500 << FBC_CTL_INTERVAL_SHIFT
);
1086 dev_priv
->fbc
.enabled
= dev_priv
->fbc
.fbc_enabled(dev_priv
);