2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Frame Buffer Compression (FBC)
27 * FBC tries to save memory bandwidth (and so power consumption) by
28 * compressing the amount of memory used by the display. It is total
29 * transparent to user space and completely handled in the kernel.
31 * The benefits of FBC are mostly visible with solid backgrounds and
32 * variation-less patterns. It comes from keeping the memory footprint small
33 * and having fewer memory pages opened and accessed for refreshing the display.
35 * i915 is responsible to reserve stolen memory for FBC and configure its
36 * offset on proper registers. The hardware takes care of all
37 * compress/decompress. However there are many known cases where we have to
38 * forcibly disable it to allow proper screen updates.
41 #include "intel_drv.h"
44 static void i8xx_fbc_disable(struct drm_device
*dev
)
46 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
49 dev_priv
->fbc
.enabled
= false;
51 /* Disable compression */
52 fbc_ctl
= I915_READ(FBC_CONTROL
);
53 if ((fbc_ctl
& FBC_CTL_EN
) == 0)
56 fbc_ctl
&= ~FBC_CTL_EN
;
57 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
59 /* Wait for compressing bit to clear */
60 if (wait_for((I915_READ(FBC_STATUS
) & FBC_STAT_COMPRESSING
) == 0, 10)) {
61 DRM_DEBUG_KMS("FBC idle timed out\n");
65 DRM_DEBUG_KMS("disabled FBC\n");
68 static void i8xx_fbc_enable(struct drm_crtc
*crtc
)
70 struct drm_device
*dev
= crtc
->dev
;
71 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
72 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
73 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
74 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
79 dev_priv
->fbc
.enabled
= true;
81 /* Note: fbc.threshold == 1 for i8xx */
82 cfb_pitch
= dev_priv
->fbc
.uncompressed_size
/ FBC_LL_SIZE
;
83 if (fb
->pitches
[0] < cfb_pitch
)
84 cfb_pitch
= fb
->pitches
[0];
86 /* FBC_CTL wants 32B or 64B units */
88 cfb_pitch
= (cfb_pitch
/ 32) - 1;
90 cfb_pitch
= (cfb_pitch
/ 64) - 1;
93 for (i
= 0; i
< (FBC_LL_SIZE
/ 32) + 1; i
++)
94 I915_WRITE(FBC_TAG
+ (i
* 4), 0);
100 fbc_ctl2
= FBC_CTL_FENCE_DBL
| FBC_CTL_IDLE_IMM
| FBC_CTL_CPU_FENCE
;
101 fbc_ctl2
|= FBC_CTL_PLANE(intel_crtc
->plane
);
102 I915_WRITE(FBC_CONTROL2
, fbc_ctl2
);
103 I915_WRITE(FBC_FENCE_OFF
, crtc
->y
);
107 fbc_ctl
= I915_READ(FBC_CONTROL
);
108 fbc_ctl
&= 0x3fff << FBC_CTL_INTERVAL_SHIFT
;
109 fbc_ctl
|= FBC_CTL_EN
| FBC_CTL_PERIODIC
;
111 fbc_ctl
|= FBC_CTL_C3_IDLE
; /* 945 needs special SR handling */
112 fbc_ctl
|= (cfb_pitch
& 0xff) << FBC_CTL_STRIDE_SHIFT
;
113 fbc_ctl
|= obj
->fence_reg
;
114 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
116 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
117 cfb_pitch
, crtc
->y
, plane_name(intel_crtc
->plane
));
120 static bool i8xx_fbc_enabled(struct drm_device
*dev
)
122 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
124 return I915_READ(FBC_CONTROL
) & FBC_CTL_EN
;
127 static void g4x_fbc_enable(struct drm_crtc
*crtc
)
129 struct drm_device
*dev
= crtc
->dev
;
130 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
131 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
132 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
133 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
136 dev_priv
->fbc
.enabled
= true;
138 dpfc_ctl
= DPFC_CTL_PLANE(intel_crtc
->plane
) | DPFC_SR_EN
;
139 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
140 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
142 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
143 dpfc_ctl
|= DPFC_CTL_FENCE_EN
| obj
->fence_reg
;
145 I915_WRITE(DPFC_FENCE_YOFF
, crtc
->y
);
148 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
150 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc
->plane
));
153 static void g4x_fbc_disable(struct drm_device
*dev
)
155 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
158 dev_priv
->fbc
.enabled
= false;
160 /* Disable compression */
161 dpfc_ctl
= I915_READ(DPFC_CONTROL
);
162 if (dpfc_ctl
& DPFC_CTL_EN
) {
163 dpfc_ctl
&= ~DPFC_CTL_EN
;
164 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
);
166 DRM_DEBUG_KMS("disabled FBC\n");
170 static bool g4x_fbc_enabled(struct drm_device
*dev
)
172 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
174 return I915_READ(DPFC_CONTROL
) & DPFC_CTL_EN
;
177 static void intel_fbc_nuke(struct drm_i915_private
*dev_priv
)
179 I915_WRITE(MSG_FBC_REND_STATE
, FBC_REND_NUKE
);
180 POSTING_READ(MSG_FBC_REND_STATE
);
183 static void ilk_fbc_enable(struct drm_crtc
*crtc
)
185 struct drm_device
*dev
= crtc
->dev
;
186 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
187 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
188 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
189 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
191 int threshold
= dev_priv
->fbc
.threshold
;
193 dev_priv
->fbc
.enabled
= true;
195 dpfc_ctl
= DPFC_CTL_PLANE(intel_crtc
->plane
);
196 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
202 dpfc_ctl
|= DPFC_CTL_LIMIT_4X
;
205 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
208 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
211 dpfc_ctl
|= DPFC_CTL_FENCE_EN
;
213 dpfc_ctl
|= obj
->fence_reg
;
215 I915_WRITE(ILK_DPFC_FENCE_YOFF
, crtc
->y
);
216 I915_WRITE(ILK_FBC_RT_BASE
, i915_gem_obj_ggtt_offset(obj
) | ILK_FBC_RT_VALID
);
218 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
221 I915_WRITE(SNB_DPFC_CTL_SA
,
222 SNB_CPU_FENCE_ENABLE
| obj
->fence_reg
);
223 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, crtc
->y
);
226 intel_fbc_nuke(dev_priv
);
228 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc
->plane
));
231 static void ilk_fbc_disable(struct drm_device
*dev
)
233 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
236 dev_priv
->fbc
.enabled
= false;
238 /* Disable compression */
239 dpfc_ctl
= I915_READ(ILK_DPFC_CONTROL
);
240 if (dpfc_ctl
& DPFC_CTL_EN
) {
241 dpfc_ctl
&= ~DPFC_CTL_EN
;
242 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
);
244 DRM_DEBUG_KMS("disabled FBC\n");
248 static bool ilk_fbc_enabled(struct drm_device
*dev
)
250 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
252 return I915_READ(ILK_DPFC_CONTROL
) & DPFC_CTL_EN
;
255 static void gen7_fbc_enable(struct drm_crtc
*crtc
)
257 struct drm_device
*dev
= crtc
->dev
;
258 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
259 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
260 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
261 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
263 int threshold
= dev_priv
->fbc
.threshold
;
265 dev_priv
->fbc
.enabled
= true;
268 if (IS_IVYBRIDGE(dev
))
269 dpfc_ctl
|= IVB_DPFC_CTL_PLANE(intel_crtc
->plane
);
271 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
277 dpfc_ctl
|= DPFC_CTL_LIMIT_4X
;
280 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
283 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
287 dpfc_ctl
|= IVB_DPFC_CTL_FENCE_EN
;
289 if (dev_priv
->fbc
.false_color
)
290 dpfc_ctl
|= FBC_CTL_FALSE_COLOR
;
292 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
294 if (IS_IVYBRIDGE(dev
)) {
295 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
296 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
297 I915_READ(ILK_DISPLAY_CHICKEN1
) |
300 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
301 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc
->pipe
),
302 I915_READ(CHICKEN_PIPESL_1(intel_crtc
->pipe
)) |
306 I915_WRITE(SNB_DPFC_CTL_SA
,
307 SNB_CPU_FENCE_ENABLE
| obj
->fence_reg
);
308 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, crtc
->y
);
310 intel_fbc_nuke(dev_priv
);
312 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc
->plane
));
316 * intel_fbc_enabled - Is FBC enabled?
317 * @dev: the drm_device
319 * This function is used to verify the current state of FBC.
320 * FIXME: This should be tracked in the plane config eventually
321 * instead of queried at runtime for most callers.
323 bool intel_fbc_enabled(struct drm_device
*dev
)
325 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
327 return dev_priv
->fbc
.enabled
;
330 static void intel_fbc_work_fn(struct work_struct
*__work
)
332 struct intel_fbc_work
*work
=
333 container_of(to_delayed_work(__work
),
334 struct intel_fbc_work
, work
);
335 struct drm_device
*dev
= work
->crtc
->dev
;
336 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
338 mutex_lock(&dev
->struct_mutex
);
339 if (work
== dev_priv
->fbc
.fbc_work
) {
340 /* Double check that we haven't switched fb without cancelling
343 if (work
->crtc
->primary
->fb
== work
->fb
) {
344 dev_priv
->display
.enable_fbc(work
->crtc
);
346 dev_priv
->fbc
.crtc
= to_intel_crtc(work
->crtc
);
347 dev_priv
->fbc
.fb_id
= work
->crtc
->primary
->fb
->base
.id
;
348 dev_priv
->fbc
.y
= work
->crtc
->y
;
351 dev_priv
->fbc
.fbc_work
= NULL
;
353 mutex_unlock(&dev
->struct_mutex
);
358 static void intel_fbc_cancel_work(struct drm_i915_private
*dev_priv
)
360 if (dev_priv
->fbc
.fbc_work
== NULL
)
363 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
365 /* Synchronisation is provided by struct_mutex and checking of
366 * dev_priv->fbc.fbc_work, so we can perform the cancellation
367 * entirely asynchronously.
369 if (cancel_delayed_work(&dev_priv
->fbc
.fbc_work
->work
))
370 /* tasklet was killed before being run, clean up */
371 kfree(dev_priv
->fbc
.fbc_work
);
373 /* Mark the work as no longer wanted so that if it does
374 * wake-up (because the work was already running and waiting
375 * for our mutex), it will discover that is no longer
378 dev_priv
->fbc
.fbc_work
= NULL
;
381 static void intel_fbc_enable(struct drm_crtc
*crtc
)
383 struct intel_fbc_work
*work
;
384 struct drm_device
*dev
= crtc
->dev
;
385 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
387 if (!dev_priv
->display
.enable_fbc
)
390 intel_fbc_cancel_work(dev_priv
);
392 work
= kzalloc(sizeof(*work
), GFP_KERNEL
);
394 DRM_ERROR("Failed to allocate FBC work structure\n");
395 dev_priv
->display
.enable_fbc(crtc
);
400 work
->fb
= crtc
->primary
->fb
;
401 INIT_DELAYED_WORK(&work
->work
, intel_fbc_work_fn
);
403 dev_priv
->fbc
.fbc_work
= work
;
405 /* Delay the actual enabling to let pageflipping cease and the
406 * display to settle before starting the compression. Note that
407 * this delay also serves a second purpose: it allows for a
408 * vblank to pass after disabling the FBC before we attempt
409 * to modify the control registers.
411 * A more complicated solution would involve tracking vblanks
412 * following the termination of the page-flipping sequence
413 * and indeed performing the enable as a co-routine and not
414 * waiting synchronously upon the vblank.
416 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
418 schedule_delayed_work(&work
->work
, msecs_to_jiffies(50));
422 * intel_fbc_disable - disable FBC
423 * @dev: the drm_device
425 * This function disables FBC.
427 void intel_fbc_disable(struct drm_device
*dev
)
429 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
431 intel_fbc_cancel_work(dev_priv
);
433 if (!dev_priv
->display
.disable_fbc
)
436 dev_priv
->display
.disable_fbc(dev
);
437 dev_priv
->fbc
.crtc
= NULL
;
440 const char *intel_no_fbc_reason_str(enum no_fbc_reason reason
)
444 return "FBC enabled but currently disabled in hardware";
445 case FBC_UNSUPPORTED
:
446 return "unsupported by this chipset";
449 case FBC_STOLEN_TOO_SMALL
:
450 return "not enough stolen memory";
451 case FBC_UNSUPPORTED_MODE
:
452 return "mode incompatible with compression";
453 case FBC_MODE_TOO_LARGE
:
454 return "mode too large for compression";
456 return "FBC unsupported on plane";
458 return "framebuffer not tiled or fenced";
459 case FBC_MULTIPLE_PIPES
:
460 return "more than one pipe active";
461 case FBC_MODULE_PARAM
:
462 return "disabled per module param";
463 case FBC_CHIP_DEFAULT
:
464 return "disabled per chip default";
466 return "rotation unsupported";
468 MISSING_CASE(reason
);
469 return "unknown reason";
473 static void set_no_fbc_reason(struct drm_i915_private
*dev_priv
,
474 enum no_fbc_reason reason
)
476 if (dev_priv
->fbc
.no_fbc_reason
== reason
)
479 dev_priv
->fbc
.no_fbc_reason
= reason
;
480 DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason
));
483 static struct drm_crtc
*intel_fbc_find_crtc(struct drm_i915_private
*dev_priv
)
485 struct drm_crtc
*crtc
= NULL
, *tmp_crtc
;
487 bool pipe_a_only
= false, one_pipe_only
= false;
489 if (IS_HASWELL(dev_priv
) || INTEL_INFO(dev_priv
)->gen
>= 8)
491 else if (INTEL_INFO(dev_priv
)->gen
<= 4)
492 one_pipe_only
= true;
494 for_each_pipe(dev_priv
, pipe
) {
495 tmp_crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
497 if (intel_crtc_active(tmp_crtc
) &&
498 to_intel_plane_state(tmp_crtc
->primary
->state
)->visible
) {
499 if (one_pipe_only
&& crtc
) {
500 set_no_fbc_reason(dev_priv
, FBC_MULTIPLE_PIPES
);
510 if (!crtc
|| crtc
->primary
->fb
== NULL
) {
511 set_no_fbc_reason(dev_priv
, FBC_NO_OUTPUT
);
518 static int find_compression_threshold(struct drm_device
*dev
,
519 struct drm_mm_node
*node
,
523 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
524 int compression_threshold
= 1;
527 /* HACK: This code depends on what we will do in *_enable_fbc. If that
528 * code changes, this code needs to change as well.
530 * The enable_fbc code will attempt to use one of our 2 compression
531 * thresholds, therefore, in that case, we only have 1 resort.
534 /* Try to over-allocate to reduce reallocations and fragmentation. */
535 ret
= i915_gem_stolen_insert_node(dev_priv
, node
, size
<<= 1, 4096);
537 return compression_threshold
;
540 /* HW's ability to limit the CFB is 1:4 */
541 if (compression_threshold
> 4 ||
542 (fb_cpp
== 2 && compression_threshold
== 2))
545 ret
= i915_gem_stolen_insert_node(dev_priv
, node
, size
>>= 1, 4096);
546 if (ret
&& INTEL_INFO(dev
)->gen
<= 4) {
549 compression_threshold
<<= 1;
552 return compression_threshold
;
556 static int intel_fbc_alloc_cfb(struct drm_device
*dev
, int size
, int fb_cpp
)
558 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
559 struct drm_mm_node
*uninitialized_var(compressed_llb
);
562 ret
= find_compression_threshold(dev
, &dev_priv
->fbc
.compressed_fb
,
567 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
571 dev_priv
->fbc
.threshold
= ret
;
573 if (INTEL_INFO(dev_priv
)->gen
>= 5)
574 I915_WRITE(ILK_DPFC_CB_BASE
, dev_priv
->fbc
.compressed_fb
.start
);
575 else if (IS_GM45(dev
)) {
576 I915_WRITE(DPFC_CB_BASE
, dev_priv
->fbc
.compressed_fb
.start
);
578 compressed_llb
= kzalloc(sizeof(*compressed_llb
), GFP_KERNEL
);
582 ret
= i915_gem_stolen_insert_node(dev_priv
, compressed_llb
,
587 dev_priv
->fbc
.compressed_llb
= compressed_llb
;
589 I915_WRITE(FBC_CFB_BASE
,
590 dev_priv
->mm
.stolen_base
+ dev_priv
->fbc
.compressed_fb
.start
);
591 I915_WRITE(FBC_LL_BASE
,
592 dev_priv
->mm
.stolen_base
+ compressed_llb
->start
);
595 dev_priv
->fbc
.uncompressed_size
= size
;
597 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
603 kfree(compressed_llb
);
604 i915_gem_stolen_remove_node(dev_priv
, &dev_priv
->fbc
.compressed_fb
);
606 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size
);
610 void intel_fbc_cleanup_cfb(struct drm_device
*dev
)
612 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
614 if (dev_priv
->fbc
.uncompressed_size
== 0)
617 i915_gem_stolen_remove_node(dev_priv
, &dev_priv
->fbc
.compressed_fb
);
619 if (dev_priv
->fbc
.compressed_llb
) {
620 i915_gem_stolen_remove_node(dev_priv
,
621 dev_priv
->fbc
.compressed_llb
);
622 kfree(dev_priv
->fbc
.compressed_llb
);
625 dev_priv
->fbc
.uncompressed_size
= 0;
628 static int intel_fbc_setup_cfb(struct drm_device
*dev
, int size
, int fb_cpp
)
630 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
632 if (size
<= dev_priv
->fbc
.uncompressed_size
)
635 /* Release any current block */
636 intel_fbc_cleanup_cfb(dev
);
638 return intel_fbc_alloc_cfb(dev
, size
, fb_cpp
);
642 * intel_fbc_update - enable/disable FBC as needed
643 * @dev: the drm_device
645 * Set up the framebuffer compression hardware at mode set time. We
646 * enable it if possible:
647 * - plane A only (on pre-965)
648 * - no pixel mulitply/line duplication
649 * - no alpha buffer discard
651 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
653 * We can't assume that any compression will take place (worst case),
654 * so the compressed buffer has to be the same size as the uncompressed
655 * one. It also must reside (along with the line length buffer) in
658 * We need to enable/disable FBC on a global basis.
660 void intel_fbc_update(struct drm_device
*dev
)
662 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
663 struct drm_crtc
*crtc
= NULL
;
664 struct intel_crtc
*intel_crtc
;
665 struct drm_framebuffer
*fb
;
666 struct drm_i915_gem_object
*obj
;
667 const struct drm_display_mode
*adjusted_mode
;
668 unsigned int max_width
, max_height
;
673 /* disable framebuffer compression in vGPU */
674 if (intel_vgpu_active(dev
))
677 if (i915
.enable_fbc
< 0) {
678 set_no_fbc_reason(dev_priv
, FBC_CHIP_DEFAULT
);
682 if (!i915
.enable_fbc
) {
683 set_no_fbc_reason(dev_priv
, FBC_MODULE_PARAM
);
688 * If FBC is already on, we just have to verify that we can
689 * keep it that way...
690 * Need to disable if:
691 * - more than one pipe is active
692 * - changing FBC params (stride, fence, mode)
693 * - new fb is too large to fit in compressed buffer
694 * - going to an unsupported config (interlace, pixel multiply, etc.)
696 crtc
= intel_fbc_find_crtc(dev_priv
);
700 intel_crtc
= to_intel_crtc(crtc
);
701 fb
= crtc
->primary
->fb
;
702 obj
= intel_fb_obj(fb
);
703 adjusted_mode
= &intel_crtc
->config
->base
.adjusted_mode
;
705 if ((adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
706 (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLSCAN
)) {
707 set_no_fbc_reason(dev_priv
, FBC_UNSUPPORTED_MODE
);
711 if (INTEL_INFO(dev
)->gen
>= 8 || IS_HASWELL(dev
)) {
714 } else if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
721 if (intel_crtc
->config
->pipe_src_w
> max_width
||
722 intel_crtc
->config
->pipe_src_h
> max_height
) {
723 set_no_fbc_reason(dev_priv
, FBC_MODE_TOO_LARGE
);
726 if ((INTEL_INFO(dev
)->gen
< 4 || HAS_DDI(dev
)) &&
727 intel_crtc
->plane
!= PLANE_A
) {
728 set_no_fbc_reason(dev_priv
, FBC_BAD_PLANE
);
732 /* The use of a CPU fence is mandatory in order to detect writes
733 * by the CPU to the scanout and trigger updates to the FBC.
735 if (obj
->tiling_mode
!= I915_TILING_X
||
736 obj
->fence_reg
== I915_FENCE_REG_NONE
) {
737 set_no_fbc_reason(dev_priv
, FBC_NOT_TILED
);
740 if (INTEL_INFO(dev
)->gen
<= 4 && !IS_G4X(dev
) &&
741 crtc
->primary
->state
->rotation
!= BIT(DRM_ROTATE_0
)) {
742 set_no_fbc_reason(dev_priv
, FBC_ROTATION
);
746 /* If the kernel debugger is active, always disable compression */
750 if (intel_fbc_setup_cfb(dev
, obj
->base
.size
,
751 drm_format_plane_cpp(fb
->pixel_format
, 0))) {
752 set_no_fbc_reason(dev_priv
, FBC_STOLEN_TOO_SMALL
);
756 /* If the scanout has not changed, don't modify the FBC settings.
757 * Note that we make the fundamental assumption that the fb->obj
758 * cannot be unpinned (and have its GTT offset and fence revoked)
759 * without first being decoupled from the scanout and FBC disabled.
761 if (dev_priv
->fbc
.crtc
== intel_crtc
&&
762 dev_priv
->fbc
.fb_id
== fb
->base
.id
&&
763 dev_priv
->fbc
.y
== crtc
->y
)
766 if (intel_fbc_enabled(dev
)) {
767 /* We update FBC along two paths, after changing fb/crtc
768 * configuration (modeswitching) and after page-flipping
769 * finishes. For the latter, we know that not only did
770 * we disable the FBC at the start of the page-flip
771 * sequence, but also more than one vblank has passed.
773 * For the former case of modeswitching, it is possible
774 * to switch between two FBC valid configurations
775 * instantaneously so we do need to disable the FBC
776 * before we can modify its control registers. We also
777 * have to wait for the next vblank for that to take
778 * effect. However, since we delay enabling FBC we can
779 * assume that a vblank has passed since disabling and
780 * that we can safely alter the registers in the deferred
783 * In the scenario that we go from a valid to invalid
784 * and then back to valid FBC configuration we have
785 * no strict enforcement that a vblank occurred since
786 * disabling the FBC. However, along all current pipe
787 * disabling paths we do need to wait for a vblank at
788 * some point. And we wait before enabling FBC anyway.
790 DRM_DEBUG_KMS("disabling active FBC for update\n");
791 intel_fbc_disable(dev
);
794 intel_fbc_enable(crtc
);
795 dev_priv
->fbc
.no_fbc_reason
= FBC_OK
;
799 /* Multiple disables should be harmless */
800 if (intel_fbc_enabled(dev
)) {
801 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
802 intel_fbc_disable(dev
);
804 intel_fbc_cleanup_cfb(dev
);
807 void intel_fbc_invalidate(struct drm_i915_private
*dev_priv
,
808 unsigned int frontbuffer_bits
,
809 enum fb_op_origin origin
)
811 struct drm_device
*dev
= dev_priv
->dev
;
812 unsigned int fbc_bits
;
814 if (origin
== ORIGIN_GTT
)
817 if (dev_priv
->fbc
.enabled
)
818 fbc_bits
= INTEL_FRONTBUFFER_PRIMARY(dev_priv
->fbc
.crtc
->pipe
);
819 else if (dev_priv
->fbc
.fbc_work
)
820 fbc_bits
= INTEL_FRONTBUFFER_PRIMARY(
821 to_intel_crtc(dev_priv
->fbc
.fbc_work
->crtc
)->pipe
);
823 fbc_bits
= dev_priv
->fbc
.possible_framebuffer_bits
;
825 dev_priv
->fbc
.busy_bits
|= (fbc_bits
& frontbuffer_bits
);
827 if (dev_priv
->fbc
.busy_bits
)
828 intel_fbc_disable(dev
);
831 void intel_fbc_flush(struct drm_i915_private
*dev_priv
,
832 unsigned int frontbuffer_bits
)
834 struct drm_device
*dev
= dev_priv
->dev
;
836 if (!dev_priv
->fbc
.busy_bits
)
839 dev_priv
->fbc
.busy_bits
&= ~frontbuffer_bits
;
841 if (!dev_priv
->fbc
.busy_bits
)
842 intel_fbc_update(dev
);
846 * intel_fbc_init - Initialize FBC
847 * @dev_priv: the i915 device
849 * This function might be called during PM init process.
851 void intel_fbc_init(struct drm_i915_private
*dev_priv
)
855 if (!HAS_FBC(dev_priv
)) {
856 dev_priv
->fbc
.enabled
= false;
857 dev_priv
->fbc
.no_fbc_reason
= FBC_UNSUPPORTED
;
861 for_each_pipe(dev_priv
, pipe
) {
862 dev_priv
->fbc
.possible_framebuffer_bits
|=
863 INTEL_FRONTBUFFER_PRIMARY(pipe
);
865 if (IS_HASWELL(dev_priv
) || INTEL_INFO(dev_priv
)->gen
>= 8)
869 if (INTEL_INFO(dev_priv
)->gen
>= 7) {
870 dev_priv
->display
.fbc_enabled
= ilk_fbc_enabled
;
871 dev_priv
->display
.enable_fbc
= gen7_fbc_enable
;
872 dev_priv
->display
.disable_fbc
= ilk_fbc_disable
;
873 } else if (INTEL_INFO(dev_priv
)->gen
>= 5) {
874 dev_priv
->display
.fbc_enabled
= ilk_fbc_enabled
;
875 dev_priv
->display
.enable_fbc
= ilk_fbc_enable
;
876 dev_priv
->display
.disable_fbc
= ilk_fbc_disable
;
877 } else if (IS_GM45(dev_priv
)) {
878 dev_priv
->display
.fbc_enabled
= g4x_fbc_enabled
;
879 dev_priv
->display
.enable_fbc
= g4x_fbc_enable
;
880 dev_priv
->display
.disable_fbc
= g4x_fbc_disable
;
882 dev_priv
->display
.fbc_enabled
= i8xx_fbc_enabled
;
883 dev_priv
->display
.enable_fbc
= i8xx_fbc_enable
;
884 dev_priv
->display
.disable_fbc
= i8xx_fbc_disable
;
886 /* This value was pulled out of someone's hat */
887 I915_WRITE(FBC_CONTROL
, 500 << FBC_CTL_INTERVAL_SHIFT
);
890 dev_priv
->fbc
.enabled
= dev_priv
->display
.fbc_enabled(dev_priv
->dev
);