drm/i915: Move FBC stuff to intel_fbc.c
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_fbc.c
CommitLineData
7ff0ebcc
RV
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include "intel_drv.h"
25#include "i915_drv.h"
26
27/* FBC, or Frame Buffer Compression, is a technique employed to compress the
28 * framebuffer contents in-memory, aiming at reducing the required bandwidth
29 * during in-memory transfers and, therefore, reduce the power packet.
30 *
31 * The benefits of FBC are mostly visible with solid backgrounds and
32 * variation-less patterns.
33 *
34 * FBC-related functionality can be enabled by the means of the
35 * i915.i915_fbc_enable parameter
36 */
37
38static void i8xx_fbc_disable(struct drm_device *dev)
39{
40 struct drm_i915_private *dev_priv = dev->dev_private;
41 u32 fbc_ctl;
42
43 dev_priv->fbc.enabled = false;
44
45 /* Disable compression */
46 fbc_ctl = I915_READ(FBC_CONTROL);
47 if ((fbc_ctl & FBC_CTL_EN) == 0)
48 return;
49
50 fbc_ctl &= ~FBC_CTL_EN;
51 I915_WRITE(FBC_CONTROL, fbc_ctl);
52
53 /* Wait for compressing bit to clear */
54 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
55 DRM_DEBUG_KMS("FBC idle timed out\n");
56 return;
57 }
58
59 DRM_DEBUG_KMS("disabled FBC\n");
60}
61
62static void i8xx_fbc_enable(struct drm_crtc *crtc)
63{
64 struct drm_device *dev = crtc->dev;
65 struct drm_i915_private *dev_priv = dev->dev_private;
66 struct drm_framebuffer *fb = crtc->primary->fb;
67 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
68 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
69 int cfb_pitch;
70 int i;
71 u32 fbc_ctl;
72
73 dev_priv->fbc.enabled = true;
74
75 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
76 if (fb->pitches[0] < cfb_pitch)
77 cfb_pitch = fb->pitches[0];
78
79 /* FBC_CTL wants 32B or 64B units */
80 if (IS_GEN2(dev))
81 cfb_pitch = (cfb_pitch / 32) - 1;
82 else
83 cfb_pitch = (cfb_pitch / 64) - 1;
84
85 /* Clear old tags */
86 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
87 I915_WRITE(FBC_TAG + (i * 4), 0);
88
89 if (IS_GEN4(dev)) {
90 u32 fbc_ctl2;
91
92 /* Set it up... */
93 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
94 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
95 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
96 I915_WRITE(FBC_FENCE_OFF, crtc->y);
97 }
98
99 /* enable it... */
100 fbc_ctl = I915_READ(FBC_CONTROL);
101 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
102 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
103 if (IS_I945GM(dev))
104 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
105 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
106 fbc_ctl |= obj->fence_reg;
107 I915_WRITE(FBC_CONTROL, fbc_ctl);
108
109 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
110 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
111}
112
113static bool i8xx_fbc_enabled(struct drm_device *dev)
114{
115 struct drm_i915_private *dev_priv = dev->dev_private;
116
117 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
118}
119
120static void g4x_fbc_enable(struct drm_crtc *crtc)
121{
122 struct drm_device *dev = crtc->dev;
123 struct drm_i915_private *dev_priv = dev->dev_private;
124 struct drm_framebuffer *fb = crtc->primary->fb;
125 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
126 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
127 u32 dpfc_ctl;
128
129 dev_priv->fbc.enabled = true;
130
131 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
132 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
133 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
134 else
135 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
136 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
137
138 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
139
140 /* enable it... */
141 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
142
143 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
144}
145
146static void g4x_fbc_disable(struct drm_device *dev)
147{
148 struct drm_i915_private *dev_priv = dev->dev_private;
149 u32 dpfc_ctl;
150
151 dev_priv->fbc.enabled = false;
152
153 /* Disable compression */
154 dpfc_ctl = I915_READ(DPFC_CONTROL);
155 if (dpfc_ctl & DPFC_CTL_EN) {
156 dpfc_ctl &= ~DPFC_CTL_EN;
157 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
158
159 DRM_DEBUG_KMS("disabled FBC\n");
160 }
161}
162
163static bool g4x_fbc_enabled(struct drm_device *dev)
164{
165 struct drm_i915_private *dev_priv = dev->dev_private;
166
167 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
168}
169
170static void snb_fbc_blit_update(struct drm_device *dev)
171{
172 struct drm_i915_private *dev_priv = dev->dev_private;
173 u32 blt_ecoskpd;
174
175 /* Make sure blitter notifies FBC of writes */
176
177 /* Blitter is part of Media powerwell on VLV. No impact of
178 * his param in other platforms for now */
179 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
180
181 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
182 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
183 GEN6_BLITTER_LOCK_SHIFT;
184 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
185 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
186 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
187 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
188 GEN6_BLITTER_LOCK_SHIFT);
189 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
190 POSTING_READ(GEN6_BLITTER_ECOSKPD);
191
192 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
193}
194
195static void ilk_fbc_enable(struct drm_crtc *crtc)
196{
197 struct drm_device *dev = crtc->dev;
198 struct drm_i915_private *dev_priv = dev->dev_private;
199 struct drm_framebuffer *fb = crtc->primary->fb;
200 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
201 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
202 u32 dpfc_ctl;
203
204 dev_priv->fbc.enabled = true;
205
206 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
207 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
208 dev_priv->fbc.threshold++;
209
210 switch (dev_priv->fbc.threshold) {
211 case 4:
212 case 3:
213 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
214 break;
215 case 2:
216 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
217 break;
218 case 1:
219 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
220 break;
221 }
222 dpfc_ctl |= DPFC_CTL_FENCE_EN;
223 if (IS_GEN5(dev))
224 dpfc_ctl |= obj->fence_reg;
225
226 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
227 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
228 /* enable it... */
229 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
230
231 if (IS_GEN6(dev)) {
232 I915_WRITE(SNB_DPFC_CTL_SA,
233 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
234 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
235 snb_fbc_blit_update(dev);
236 }
237
238 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
239}
240
241static void ilk_fbc_disable(struct drm_device *dev)
242{
243 struct drm_i915_private *dev_priv = dev->dev_private;
244 u32 dpfc_ctl;
245
246 dev_priv->fbc.enabled = false;
247
248 /* Disable compression */
249 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
250 if (dpfc_ctl & DPFC_CTL_EN) {
251 dpfc_ctl &= ~DPFC_CTL_EN;
252 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
253
254 DRM_DEBUG_KMS("disabled FBC\n");
255 }
256}
257
258static bool ilk_fbc_enabled(struct drm_device *dev)
259{
260 struct drm_i915_private *dev_priv = dev->dev_private;
261
262 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
263}
264
265static void gen7_fbc_enable(struct drm_crtc *crtc)
266{
267 struct drm_device *dev = crtc->dev;
268 struct drm_i915_private *dev_priv = dev->dev_private;
269 struct drm_framebuffer *fb = crtc->primary->fb;
270 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
271 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
272 u32 dpfc_ctl;
273
274 dev_priv->fbc.enabled = true;
275
276 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
277 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
278 dev_priv->fbc.threshold++;
279
280 switch (dev_priv->fbc.threshold) {
281 case 4:
282 case 3:
283 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
284 break;
285 case 2:
286 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
287 break;
288 case 1:
289 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
290 break;
291 }
292
293 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
294
295 if (dev_priv->fbc.false_color)
296 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
297
298 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
299
300 if (IS_IVYBRIDGE(dev)) {
301 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
302 I915_WRITE(ILK_DISPLAY_CHICKEN1,
303 I915_READ(ILK_DISPLAY_CHICKEN1) |
304 ILK_FBCQ_DIS);
305 } else {
306 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
307 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
308 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
309 HSW_FBCQ_DIS);
310 }
311
312 I915_WRITE(SNB_DPFC_CTL_SA,
313 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
314 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
315
316 snb_fbc_blit_update(dev);
317
318 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
319}
320
321bool intel_fbc_enabled(struct drm_device *dev)
322{
323 struct drm_i915_private *dev_priv = dev->dev_private;
324
325 return dev_priv->fbc.enabled;
326}
327
328void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
329{
330 struct drm_i915_private *dev_priv = dev->dev_private;
331
332 if (!IS_GEN8(dev))
333 return;
334
335 if (!intel_fbc_enabled(dev))
336 return;
337
338 I915_WRITE(MSG_FBC_REND_STATE, value);
339}
340
341static void intel_fbc_work_fn(struct work_struct *__work)
342{
343 struct intel_fbc_work *work =
344 container_of(to_delayed_work(__work),
345 struct intel_fbc_work, work);
346 struct drm_device *dev = work->crtc->dev;
347 struct drm_i915_private *dev_priv = dev->dev_private;
348
349 mutex_lock(&dev->struct_mutex);
350 if (work == dev_priv->fbc.fbc_work) {
351 /* Double check that we haven't switched fb without cancelling
352 * the prior work.
353 */
354 if (work->crtc->primary->fb == work->fb) {
355 dev_priv->display.enable_fbc(work->crtc);
356
357 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
358 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
359 dev_priv->fbc.y = work->crtc->y;
360 }
361
362 dev_priv->fbc.fbc_work = NULL;
363 }
364 mutex_unlock(&dev->struct_mutex);
365
366 kfree(work);
367}
368
369static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
370{
371 if (dev_priv->fbc.fbc_work == NULL)
372 return;
373
374 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
375
376 /* Synchronisation is provided by struct_mutex and checking of
377 * dev_priv->fbc.fbc_work, so we can perform the cancellation
378 * entirely asynchronously.
379 */
380 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
381 /* tasklet was killed before being run, clean up */
382 kfree(dev_priv->fbc.fbc_work);
383
384 /* Mark the work as no longer wanted so that if it does
385 * wake-up (because the work was already running and waiting
386 * for our mutex), it will discover that is no longer
387 * necessary to run.
388 */
389 dev_priv->fbc.fbc_work = NULL;
390}
391
392static void intel_fbc_enable(struct drm_crtc *crtc)
393{
394 struct intel_fbc_work *work;
395 struct drm_device *dev = crtc->dev;
396 struct drm_i915_private *dev_priv = dev->dev_private;
397
398 if (!dev_priv->display.enable_fbc)
399 return;
400
401 intel_fbc_cancel_work(dev_priv);
402
403 work = kzalloc(sizeof(*work), GFP_KERNEL);
404 if (work == NULL) {
405 DRM_ERROR("Failed to allocate FBC work structure\n");
406 dev_priv->display.enable_fbc(crtc);
407 return;
408 }
409
410 work->crtc = crtc;
411 work->fb = crtc->primary->fb;
412 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
413
414 dev_priv->fbc.fbc_work = work;
415
416 /* Delay the actual enabling to let pageflipping cease and the
417 * display to settle before starting the compression. Note that
418 * this delay also serves a second purpose: it allows for a
419 * vblank to pass after disabling the FBC before we attempt
420 * to modify the control registers.
421 *
422 * A more complicated solution would involve tracking vblanks
423 * following the termination of the page-flipping sequence
424 * and indeed performing the enable as a co-routine and not
425 * waiting synchronously upon the vblank.
426 *
427 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
428 */
429 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
430}
431
432void intel_fbc_disable(struct drm_device *dev)
433{
434 struct drm_i915_private *dev_priv = dev->dev_private;
435
436 intel_fbc_cancel_work(dev_priv);
437
438 if (!dev_priv->display.disable_fbc)
439 return;
440
441 dev_priv->display.disable_fbc(dev);
442 dev_priv->fbc.plane = -1;
443}
444
445static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
446 enum no_fbc_reason reason)
447{
448 if (dev_priv->fbc.no_fbc_reason == reason)
449 return false;
450
451 dev_priv->fbc.no_fbc_reason = reason;
452 return true;
453}
454
455/**
456 * intel_fbc_update - enable/disable FBC as needed
457 * @dev: the drm_device
458 *
459 * Set up the framebuffer compression hardware at mode set time. We
460 * enable it if possible:
461 * - plane A only (on pre-965)
462 * - no pixel mulitply/line duplication
463 * - no alpha buffer discard
464 * - no dual wide
465 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
466 *
467 * We can't assume that any compression will take place (worst case),
468 * so the compressed buffer has to be the same size as the uncompressed
469 * one. It also must reside (along with the line length buffer) in
470 * stolen memory.
471 *
472 * We need to enable/disable FBC on a global basis.
473 */
474void intel_fbc_update(struct drm_device *dev)
475{
476 struct drm_i915_private *dev_priv = dev->dev_private;
477 struct drm_crtc *crtc = NULL, *tmp_crtc;
478 struct intel_crtc *intel_crtc;
479 struct drm_framebuffer *fb;
480 struct drm_i915_gem_object *obj;
481 const struct drm_display_mode *adjusted_mode;
482 unsigned int max_width, max_height;
483
484 if (!HAS_FBC(dev)) {
485 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
486 return;
487 }
488
489 if (!i915.powersave) {
490 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
491 DRM_DEBUG_KMS("fbc disabled per module param\n");
492 return;
493 }
494
495 /*
496 * If FBC is already on, we just have to verify that we can
497 * keep it that way...
498 * Need to disable if:
499 * - more than one pipe is active
500 * - changing FBC params (stride, fence, mode)
501 * - new fb is too large to fit in compressed buffer
502 * - going to an unsupported config (interlace, pixel multiply, etc.)
503 */
504 for_each_crtc(dev, tmp_crtc) {
505 if (intel_crtc_active(tmp_crtc) &&
506 to_intel_crtc(tmp_crtc)->primary_enabled) {
507 if (crtc) {
508 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
509 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
510 goto out_disable;
511 }
512 crtc = tmp_crtc;
513 }
514 }
515
516 if (!crtc || crtc->primary->fb == NULL) {
517 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
518 DRM_DEBUG_KMS("no output, disabling\n");
519 goto out_disable;
520 }
521
522 intel_crtc = to_intel_crtc(crtc);
523 fb = crtc->primary->fb;
524 obj = intel_fb_obj(fb);
525 adjusted_mode = &intel_crtc->config.adjusted_mode;
526
527 if (i915.enable_fbc < 0) {
528 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
529 DRM_DEBUG_KMS("disabled per chip default\n");
530 goto out_disable;
531 }
532 if (!i915.enable_fbc) {
533 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
534 DRM_DEBUG_KMS("fbc disabled per module param\n");
535 goto out_disable;
536 }
537 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
538 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
539 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
540 DRM_DEBUG_KMS("mode incompatible with compression, "
541 "disabling\n");
542 goto out_disable;
543 }
544
545 if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
546 max_width = 4096;
547 max_height = 4096;
548 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
549 max_width = 4096;
550 max_height = 2048;
551 } else {
552 max_width = 2048;
553 max_height = 1536;
554 }
555 if (intel_crtc->config.pipe_src_w > max_width ||
556 intel_crtc->config.pipe_src_h > max_height) {
557 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
558 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
559 goto out_disable;
560 }
561 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
562 intel_crtc->plane != PLANE_A) {
563 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
564 DRM_DEBUG_KMS("plane not A, disabling compression\n");
565 goto out_disable;
566 }
567
568 /* The use of a CPU fence is mandatory in order to detect writes
569 * by the CPU to the scanout and trigger updates to the FBC.
570 */
571 if (obj->tiling_mode != I915_TILING_X ||
572 obj->fence_reg == I915_FENCE_REG_NONE) {
573 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
574 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
575 goto out_disable;
576 }
577 if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
578 to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
579 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
580 DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
581 goto out_disable;
582 }
583
584 /* If the kernel debugger is active, always disable compression */
585 if (in_dbg_master())
586 goto out_disable;
587
588 if (i915_gem_stolen_setup_compression(dev, obj->base.size,
589 drm_format_plane_cpp(fb->pixel_format, 0))) {
590 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
591 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
592 goto out_disable;
593 }
594
595 /* If the scanout has not changed, don't modify the FBC settings.
596 * Note that we make the fundamental assumption that the fb->obj
597 * cannot be unpinned (and have its GTT offset and fence revoked)
598 * without first being decoupled from the scanout and FBC disabled.
599 */
600 if (dev_priv->fbc.plane == intel_crtc->plane &&
601 dev_priv->fbc.fb_id == fb->base.id &&
602 dev_priv->fbc.y == crtc->y)
603 return;
604
605 if (intel_fbc_enabled(dev)) {
606 /* We update FBC along two paths, after changing fb/crtc
607 * configuration (modeswitching) and after page-flipping
608 * finishes. For the latter, we know that not only did
609 * we disable the FBC at the start of the page-flip
610 * sequence, but also more than one vblank has passed.
611 *
612 * For the former case of modeswitching, it is possible
613 * to switch between two FBC valid configurations
614 * instantaneously so we do need to disable the FBC
615 * before we can modify its control registers. We also
616 * have to wait for the next vblank for that to take
617 * effect. However, since we delay enabling FBC we can
618 * assume that a vblank has passed since disabling and
619 * that we can safely alter the registers in the deferred
620 * callback.
621 *
622 * In the scenario that we go from a valid to invalid
623 * and then back to valid FBC configuration we have
624 * no strict enforcement that a vblank occurred since
625 * disabling the FBC. However, along all current pipe
626 * disabling paths we do need to wait for a vblank at
627 * some point. And we wait before enabling FBC anyway.
628 */
629 DRM_DEBUG_KMS("disabling active FBC for update\n");
630 intel_fbc_disable(dev);
631 }
632
633 intel_fbc_enable(crtc);
634 dev_priv->fbc.no_fbc_reason = FBC_OK;
635 return;
636
637out_disable:
638 /* Multiple disables should be harmless */
639 if (intel_fbc_enabled(dev)) {
640 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
641 intel_fbc_disable(dev);
642 }
643 i915_gem_stolen_cleanup_compression(dev);
644}
645
646void intel_fbc_init(struct drm_i915_private *dev_priv)
647{
648 if (!HAS_FBC(dev_priv)) {
649 dev_priv->fbc.enabled = false;
650 return;
651 }
652
653 if (INTEL_INFO(dev_priv)->gen >= 7) {
654 dev_priv->display.fbc_enabled = ilk_fbc_enabled;
655 dev_priv->display.enable_fbc = gen7_fbc_enable;
656 dev_priv->display.disable_fbc = ilk_fbc_disable;
657 } else if (INTEL_INFO(dev_priv)->gen >= 5) {
658 dev_priv->display.fbc_enabled = ilk_fbc_enabled;
659 dev_priv->display.enable_fbc = ilk_fbc_enable;
660 dev_priv->display.disable_fbc = ilk_fbc_disable;
661 } else if (IS_GM45(dev_priv)) {
662 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
663 dev_priv->display.enable_fbc = g4x_fbc_enable;
664 dev_priv->display.disable_fbc = g4x_fbc_disable;
665 } else {
666 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
667 dev_priv->display.enable_fbc = i8xx_fbc_enable;
668 dev_priv->display.disable_fbc = i8xx_fbc_disable;
669
670 /* This value was pulled out of someone's hat */
671 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
672 }
673
674 dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
675}
This page took 0.046978 seconds and 5 git commands to generate.