9e42079e17054e66b7d7d8dce8c1c564aa419295
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_fbc.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * DOC: Frame Buffer Compression (FBC)
26 *
27 * FBC tries to save memory bandwidth (and so power consumption) by
28 * compressing the amount of memory used by the display. It is total
29 * transparent to user space and completely handled in the kernel.
30 *
31 * The benefits of FBC are mostly visible with solid backgrounds and
32 * variation-less patterns. It comes from keeping the memory footprint small
33 * and having fewer memory pages opened and accessed for refreshing the display.
34 *
35 * i915 is responsible to reserve stolen memory for FBC and configure its
36 * offset on proper registers. The hardware takes care of all
37 * compress/decompress. However there are many known cases where we have to
38 * forcibly disable it to allow proper screen updates.
39 */
40
41 #include "intel_drv.h"
42 #include "i915_drv.h"
43
44 static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
45 {
46 u32 fbc_ctl;
47
48 dev_priv->fbc.enabled = false;
49
50 /* Disable compression */
51 fbc_ctl = I915_READ(FBC_CONTROL);
52 if ((fbc_ctl & FBC_CTL_EN) == 0)
53 return;
54
55 fbc_ctl &= ~FBC_CTL_EN;
56 I915_WRITE(FBC_CONTROL, fbc_ctl);
57
58 /* Wait for compressing bit to clear */
59 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
60 DRM_DEBUG_KMS("FBC idle timed out\n");
61 return;
62 }
63
64 DRM_DEBUG_KMS("disabled FBC\n");
65 }
66
67 static void i8xx_fbc_enable(struct intel_crtc *crtc)
68 {
69 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
70 struct drm_framebuffer *fb = crtc->base.primary->fb;
71 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
72 int cfb_pitch;
73 int i;
74 u32 fbc_ctl;
75
76 dev_priv->fbc.enabled = true;
77
78 /* Note: fbc.threshold == 1 for i8xx */
79 cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
80 if (fb->pitches[0] < cfb_pitch)
81 cfb_pitch = fb->pitches[0];
82
83 /* FBC_CTL wants 32B or 64B units */
84 if (IS_GEN2(dev_priv))
85 cfb_pitch = (cfb_pitch / 32) - 1;
86 else
87 cfb_pitch = (cfb_pitch / 64) - 1;
88
89 /* Clear old tags */
90 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
91 I915_WRITE(FBC_TAG + (i * 4), 0);
92
93 if (IS_GEN4(dev_priv)) {
94 u32 fbc_ctl2;
95
96 /* Set it up... */
97 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
98 fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
99 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
100 I915_WRITE(FBC_FENCE_OFF, crtc->base.y);
101 }
102
103 /* enable it... */
104 fbc_ctl = I915_READ(FBC_CONTROL);
105 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
106 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
107 if (IS_I945GM(dev_priv))
108 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
109 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
110 fbc_ctl |= obj->fence_reg;
111 I915_WRITE(FBC_CONTROL, fbc_ctl);
112
113 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
114 cfb_pitch, crtc->base.y, plane_name(crtc->plane));
115 }
116
117 static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
118 {
119 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
120 }
121
122 static void g4x_fbc_enable(struct intel_crtc *crtc)
123 {
124 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
125 struct drm_framebuffer *fb = crtc->base.primary->fb;
126 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
127 u32 dpfc_ctl;
128
129 dev_priv->fbc.enabled = true;
130
131 dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
132 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
133 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
134 else
135 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
136 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
137
138 I915_WRITE(DPFC_FENCE_YOFF, crtc->base.y);
139
140 /* enable it... */
141 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
142
143 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
144 }
145
146 static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
147 {
148 u32 dpfc_ctl;
149
150 dev_priv->fbc.enabled = false;
151
152 /* Disable compression */
153 dpfc_ctl = I915_READ(DPFC_CONTROL);
154 if (dpfc_ctl & DPFC_CTL_EN) {
155 dpfc_ctl &= ~DPFC_CTL_EN;
156 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
157
158 DRM_DEBUG_KMS("disabled FBC\n");
159 }
160 }
161
162 static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
163 {
164 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
165 }
166
167 static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
168 {
169 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
170 POSTING_READ(MSG_FBC_REND_STATE);
171 }
172
173 static void ilk_fbc_enable(struct intel_crtc *crtc)
174 {
175 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
176 struct drm_framebuffer *fb = crtc->base.primary->fb;
177 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
178 u32 dpfc_ctl;
179 int threshold = dev_priv->fbc.threshold;
180
181 dev_priv->fbc.enabled = true;
182
183 dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
184 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
185 threshold++;
186
187 switch (threshold) {
188 case 4:
189 case 3:
190 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
191 break;
192 case 2:
193 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
194 break;
195 case 1:
196 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
197 break;
198 }
199 dpfc_ctl |= DPFC_CTL_FENCE_EN;
200 if (IS_GEN5(dev_priv))
201 dpfc_ctl |= obj->fence_reg;
202
203 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->base.y);
204 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
205 /* enable it... */
206 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
207
208 if (IS_GEN6(dev_priv)) {
209 I915_WRITE(SNB_DPFC_CTL_SA,
210 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
211 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
212 }
213
214 intel_fbc_nuke(dev_priv);
215
216 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
217 }
218
219 static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
220 {
221 u32 dpfc_ctl;
222
223 dev_priv->fbc.enabled = false;
224
225 /* Disable compression */
226 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
227 if (dpfc_ctl & DPFC_CTL_EN) {
228 dpfc_ctl &= ~DPFC_CTL_EN;
229 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
230
231 DRM_DEBUG_KMS("disabled FBC\n");
232 }
233 }
234
235 static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
236 {
237 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
238 }
239
240 static void gen7_fbc_enable(struct intel_crtc *crtc)
241 {
242 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
243 struct drm_framebuffer *fb = crtc->base.primary->fb;
244 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
245 u32 dpfc_ctl;
246 int threshold = dev_priv->fbc.threshold;
247
248 dev_priv->fbc.enabled = true;
249
250 dpfc_ctl = 0;
251 if (IS_IVYBRIDGE(dev_priv))
252 dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
253
254 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
255 threshold++;
256
257 switch (threshold) {
258 case 4:
259 case 3:
260 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
261 break;
262 case 2:
263 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
264 break;
265 case 1:
266 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
267 break;
268 }
269
270 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
271
272 if (dev_priv->fbc.false_color)
273 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
274
275 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
276
277 if (IS_IVYBRIDGE(dev_priv)) {
278 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
279 I915_WRITE(ILK_DISPLAY_CHICKEN1,
280 I915_READ(ILK_DISPLAY_CHICKEN1) |
281 ILK_FBCQ_DIS);
282 } else {
283 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
284 I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
285 I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
286 HSW_FBCQ_DIS);
287 }
288
289 I915_WRITE(SNB_DPFC_CTL_SA,
290 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
291 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
292
293 intel_fbc_nuke(dev_priv);
294
295 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
296 }
297
298 /**
299 * intel_fbc_enabled - Is FBC enabled?
300 * @dev_priv: i915 device instance
301 *
302 * This function is used to verify the current state of FBC.
303 * FIXME: This should be tracked in the plane config eventually
304 * instead of queried at runtime for most callers.
305 */
306 bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
307 {
308 return dev_priv->fbc.enabled;
309 }
310
311 static void intel_fbc_enable(struct intel_crtc *crtc,
312 const struct drm_framebuffer *fb)
313 {
314 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
315
316 dev_priv->fbc.enable_fbc(crtc);
317
318 dev_priv->fbc.crtc = crtc;
319 dev_priv->fbc.fb_id = fb->base.id;
320 dev_priv->fbc.y = crtc->base.y;
321 }
322
323 static void intel_fbc_work_fn(struct work_struct *__work)
324 {
325 struct intel_fbc_work *work =
326 container_of(to_delayed_work(__work),
327 struct intel_fbc_work, work);
328 struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
329 struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
330
331 mutex_lock(&dev_priv->fbc.lock);
332 if (work == dev_priv->fbc.fbc_work) {
333 /* Double check that we haven't switched fb without cancelling
334 * the prior work.
335 */
336 if (crtc_fb == work->fb)
337 intel_fbc_enable(work->crtc, work->fb);
338
339 dev_priv->fbc.fbc_work = NULL;
340 }
341 mutex_unlock(&dev_priv->fbc.lock);
342
343 kfree(work);
344 }
345
346 static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
347 {
348 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
349
350 if (dev_priv->fbc.fbc_work == NULL)
351 return;
352
353 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
354
355 /* Synchronisation is provided by struct_mutex and checking of
356 * dev_priv->fbc.fbc_work, so we can perform the cancellation
357 * entirely asynchronously.
358 */
359 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
360 /* tasklet was killed before being run, clean up */
361 kfree(dev_priv->fbc.fbc_work);
362
363 /* Mark the work as no longer wanted so that if it does
364 * wake-up (because the work was already running and waiting
365 * for our mutex), it will discover that is no longer
366 * necessary to run.
367 */
368 dev_priv->fbc.fbc_work = NULL;
369 }
370
371 static void intel_fbc_schedule_enable(struct intel_crtc *crtc)
372 {
373 struct intel_fbc_work *work;
374 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
375
376 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
377
378 intel_fbc_cancel_work(dev_priv);
379
380 work = kzalloc(sizeof(*work), GFP_KERNEL);
381 if (work == NULL) {
382 DRM_ERROR("Failed to allocate FBC work structure\n");
383 intel_fbc_enable(crtc, crtc->base.primary->fb);
384 return;
385 }
386
387 work->crtc = crtc;
388 work->fb = crtc->base.primary->fb;
389 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
390
391 dev_priv->fbc.fbc_work = work;
392
393 /* Delay the actual enabling to let pageflipping cease and the
394 * display to settle before starting the compression. Note that
395 * this delay also serves a second purpose: it allows for a
396 * vblank to pass after disabling the FBC before we attempt
397 * to modify the control registers.
398 *
399 * A more complicated solution would involve tracking vblanks
400 * following the termination of the page-flipping sequence
401 * and indeed performing the enable as a co-routine and not
402 * waiting synchronously upon the vblank.
403 *
404 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
405 */
406 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
407 }
408
409 static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
410 {
411 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
412
413 intel_fbc_cancel_work(dev_priv);
414
415 dev_priv->fbc.disable_fbc(dev_priv);
416 dev_priv->fbc.crtc = NULL;
417 }
418
419 /**
420 * intel_fbc_disable - disable FBC
421 * @dev_priv: i915 device instance
422 *
423 * This function disables FBC.
424 */
425 void intel_fbc_disable(struct drm_i915_private *dev_priv)
426 {
427 if (!dev_priv->fbc.enable_fbc)
428 return;
429
430 mutex_lock(&dev_priv->fbc.lock);
431 __intel_fbc_disable(dev_priv);
432 mutex_unlock(&dev_priv->fbc.lock);
433 }
434
435 /*
436 * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
437 * @crtc: the CRTC
438 *
439 * This function disables FBC if it's associated with the provided CRTC.
440 */
441 void intel_fbc_disable_crtc(struct intel_crtc *crtc)
442 {
443 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
444
445 if (!dev_priv->fbc.enable_fbc)
446 return;
447
448 mutex_lock(&dev_priv->fbc.lock);
449 if (dev_priv->fbc.crtc == crtc)
450 __intel_fbc_disable(dev_priv);
451 mutex_unlock(&dev_priv->fbc.lock);
452 }
453
454 const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
455 {
456 switch (reason) {
457 case FBC_OK:
458 return "FBC enabled but currently disabled in hardware";
459 case FBC_UNSUPPORTED:
460 return "unsupported by this chipset";
461 case FBC_NO_OUTPUT:
462 return "no output";
463 case FBC_STOLEN_TOO_SMALL:
464 return "not enough stolen memory";
465 case FBC_UNSUPPORTED_MODE:
466 return "mode incompatible with compression";
467 case FBC_MODE_TOO_LARGE:
468 return "mode too large for compression";
469 case FBC_BAD_PLANE:
470 return "FBC unsupported on plane";
471 case FBC_NOT_TILED:
472 return "framebuffer not tiled or fenced";
473 case FBC_MULTIPLE_PIPES:
474 return "more than one pipe active";
475 case FBC_MODULE_PARAM:
476 return "disabled per module param";
477 case FBC_CHIP_DEFAULT:
478 return "disabled per chip default";
479 case FBC_ROTATION:
480 return "rotation unsupported";
481 case FBC_IN_DBG_MASTER:
482 return "Kernel debugger is active";
483 default:
484 MISSING_CASE(reason);
485 return "unknown reason";
486 }
487 }
488
489 static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
490 enum no_fbc_reason reason)
491 {
492 if (dev_priv->fbc.no_fbc_reason == reason)
493 return;
494
495 dev_priv->fbc.no_fbc_reason = reason;
496 DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
497 }
498
499 static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
500 {
501 struct drm_crtc *crtc = NULL, *tmp_crtc;
502 enum pipe pipe;
503 bool pipe_a_only = false;
504
505 if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
506 pipe_a_only = true;
507
508 for_each_pipe(dev_priv, pipe) {
509 tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
510
511 if (intel_crtc_active(tmp_crtc) &&
512 to_intel_plane_state(tmp_crtc->primary->state)->visible)
513 crtc = tmp_crtc;
514
515 if (pipe_a_only)
516 break;
517 }
518
519 if (!crtc || crtc->primary->fb == NULL)
520 return NULL;
521
522 return crtc;
523 }
524
525 static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
526 {
527 enum pipe pipe;
528 int n_pipes = 0;
529 struct drm_crtc *crtc;
530
531 if (INTEL_INFO(dev_priv)->gen > 4)
532 return true;
533
534 for_each_pipe(dev_priv, pipe) {
535 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
536
537 if (intel_crtc_active(crtc) &&
538 to_intel_plane_state(crtc->primary->state)->visible)
539 n_pipes++;
540 }
541
542 return (n_pipes < 2);
543 }
544
545 static int find_compression_threshold(struct drm_i915_private *dev_priv,
546 struct drm_mm_node *node,
547 int size,
548 int fb_cpp)
549 {
550 int compression_threshold = 1;
551 int ret;
552
553 /* HACK: This code depends on what we will do in *_enable_fbc. If that
554 * code changes, this code needs to change as well.
555 *
556 * The enable_fbc code will attempt to use one of our 2 compression
557 * thresholds, therefore, in that case, we only have 1 resort.
558 */
559
560 /* Try to over-allocate to reduce reallocations and fragmentation. */
561 ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096);
562 if (ret == 0)
563 return compression_threshold;
564
565 again:
566 /* HW's ability to limit the CFB is 1:4 */
567 if (compression_threshold > 4 ||
568 (fb_cpp == 2 && compression_threshold == 2))
569 return 0;
570
571 ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096);
572 if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
573 return 0;
574 } else if (ret) {
575 compression_threshold <<= 1;
576 goto again;
577 } else {
578 return compression_threshold;
579 }
580 }
581
582 static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
583 int fb_cpp)
584 {
585 struct drm_mm_node *uninitialized_var(compressed_llb);
586 int ret;
587
588 ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
589 size, fb_cpp);
590 if (!ret)
591 goto err_llb;
592 else if (ret > 1) {
593 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
594
595 }
596
597 dev_priv->fbc.threshold = ret;
598
599 if (INTEL_INFO(dev_priv)->gen >= 5)
600 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
601 else if (IS_GM45(dev_priv)) {
602 I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
603 } else {
604 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
605 if (!compressed_llb)
606 goto err_fb;
607
608 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
609 4096, 4096);
610 if (ret)
611 goto err_fb;
612
613 dev_priv->fbc.compressed_llb = compressed_llb;
614
615 I915_WRITE(FBC_CFB_BASE,
616 dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
617 I915_WRITE(FBC_LL_BASE,
618 dev_priv->mm.stolen_base + compressed_llb->start);
619 }
620
621 dev_priv->fbc.uncompressed_size = size;
622
623 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
624 size);
625
626 return 0;
627
628 err_fb:
629 kfree(compressed_llb);
630 i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
631 err_llb:
632 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
633 return -ENOSPC;
634 }
635
636 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
637 {
638 if (dev_priv->fbc.uncompressed_size == 0)
639 return;
640
641 i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
642
643 if (dev_priv->fbc.compressed_llb) {
644 i915_gem_stolen_remove_node(dev_priv,
645 dev_priv->fbc.compressed_llb);
646 kfree(dev_priv->fbc.compressed_llb);
647 }
648
649 dev_priv->fbc.uncompressed_size = 0;
650 }
651
652 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
653 {
654 if (!dev_priv->fbc.enable_fbc)
655 return;
656
657 mutex_lock(&dev_priv->fbc.lock);
658 __intel_fbc_cleanup_cfb(dev_priv);
659 mutex_unlock(&dev_priv->fbc.lock);
660 }
661
662 static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size,
663 int fb_cpp)
664 {
665 if (size <= dev_priv->fbc.uncompressed_size)
666 return 0;
667
668 /* Release any current block */
669 __intel_fbc_cleanup_cfb(dev_priv);
670
671 return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp);
672 }
673
674 /**
675 * __intel_fbc_update - enable/disable FBC as needed, unlocked
676 * @dev_priv: i915 device instance
677 *
678 * Set up the framebuffer compression hardware at mode set time. We
679 * enable it if possible:
680 * - plane A only (on pre-965)
681 * - no pixel mulitply/line duplication
682 * - no alpha buffer discard
683 * - no dual wide
684 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
685 *
686 * We can't assume that any compression will take place (worst case),
687 * so the compressed buffer has to be the same size as the uncompressed
688 * one. It also must reside (along with the line length buffer) in
689 * stolen memory.
690 *
691 * We need to enable/disable FBC on a global basis.
692 */
693 static void __intel_fbc_update(struct drm_i915_private *dev_priv)
694 {
695 struct drm_crtc *crtc = NULL;
696 struct intel_crtc *intel_crtc;
697 struct drm_framebuffer *fb;
698 struct drm_i915_gem_object *obj;
699 const struct drm_display_mode *adjusted_mode;
700 unsigned int max_width, max_height;
701
702 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
703
704 /* disable framebuffer compression in vGPU */
705 if (intel_vgpu_active(dev_priv->dev))
706 i915.enable_fbc = 0;
707
708 if (i915.enable_fbc < 0) {
709 set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
710 goto out_disable;
711 }
712
713 if (!i915.enable_fbc) {
714 set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
715 goto out_disable;
716 }
717
718 /*
719 * If FBC is already on, we just have to verify that we can
720 * keep it that way...
721 * Need to disable if:
722 * - more than one pipe is active
723 * - changing FBC params (stride, fence, mode)
724 * - new fb is too large to fit in compressed buffer
725 * - going to an unsupported config (interlace, pixel multiply, etc.)
726 */
727 crtc = intel_fbc_find_crtc(dev_priv);
728 if (!crtc) {
729 set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
730 goto out_disable;
731 }
732
733 if (!multiple_pipes_ok(dev_priv)) {
734 set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
735 goto out_disable;
736 }
737
738 intel_crtc = to_intel_crtc(crtc);
739 fb = crtc->primary->fb;
740 obj = intel_fb_obj(fb);
741 adjusted_mode = &intel_crtc->config->base.adjusted_mode;
742
743 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
744 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
745 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
746 goto out_disable;
747 }
748
749 if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
750 max_width = 4096;
751 max_height = 4096;
752 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
753 max_width = 4096;
754 max_height = 2048;
755 } else {
756 max_width = 2048;
757 max_height = 1536;
758 }
759 if (intel_crtc->config->pipe_src_w > max_width ||
760 intel_crtc->config->pipe_src_h > max_height) {
761 set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
762 goto out_disable;
763 }
764 if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
765 intel_crtc->plane != PLANE_A) {
766 set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
767 goto out_disable;
768 }
769
770 /* The use of a CPU fence is mandatory in order to detect writes
771 * by the CPU to the scanout and trigger updates to the FBC.
772 */
773 if (obj->tiling_mode != I915_TILING_X ||
774 obj->fence_reg == I915_FENCE_REG_NONE) {
775 set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
776 goto out_disable;
777 }
778 if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
779 crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
780 set_no_fbc_reason(dev_priv, FBC_ROTATION);
781 goto out_disable;
782 }
783
784 /* If the kernel debugger is active, always disable compression */
785 if (in_dbg_master()) {
786 set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
787 goto out_disable;
788 }
789
790 if (intel_fbc_setup_cfb(dev_priv, obj->base.size,
791 drm_format_plane_cpp(fb->pixel_format, 0))) {
792 set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
793 goto out_disable;
794 }
795
796 /* If the scanout has not changed, don't modify the FBC settings.
797 * Note that we make the fundamental assumption that the fb->obj
798 * cannot be unpinned (and have its GTT offset and fence revoked)
799 * without first being decoupled from the scanout and FBC disabled.
800 */
801 if (dev_priv->fbc.crtc == intel_crtc &&
802 dev_priv->fbc.fb_id == fb->base.id &&
803 dev_priv->fbc.y == crtc->y)
804 return;
805
806 if (intel_fbc_enabled(dev_priv)) {
807 /* We update FBC along two paths, after changing fb/crtc
808 * configuration (modeswitching) and after page-flipping
809 * finishes. For the latter, we know that not only did
810 * we disable the FBC at the start of the page-flip
811 * sequence, but also more than one vblank has passed.
812 *
813 * For the former case of modeswitching, it is possible
814 * to switch between two FBC valid configurations
815 * instantaneously so we do need to disable the FBC
816 * before we can modify its control registers. We also
817 * have to wait for the next vblank for that to take
818 * effect. However, since we delay enabling FBC we can
819 * assume that a vblank has passed since disabling and
820 * that we can safely alter the registers in the deferred
821 * callback.
822 *
823 * In the scenario that we go from a valid to invalid
824 * and then back to valid FBC configuration we have
825 * no strict enforcement that a vblank occurred since
826 * disabling the FBC. However, along all current pipe
827 * disabling paths we do need to wait for a vblank at
828 * some point. And we wait before enabling FBC anyway.
829 */
830 DRM_DEBUG_KMS("disabling active FBC for update\n");
831 __intel_fbc_disable(dev_priv);
832 }
833
834 intel_fbc_schedule_enable(intel_crtc);
835 dev_priv->fbc.no_fbc_reason = FBC_OK;
836 return;
837
838 out_disable:
839 /* Multiple disables should be harmless */
840 if (intel_fbc_enabled(dev_priv)) {
841 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
842 __intel_fbc_disable(dev_priv);
843 }
844 __intel_fbc_cleanup_cfb(dev_priv);
845 }
846
847 /*
848 * intel_fbc_update - enable/disable FBC as needed
849 * @dev_priv: i915 device instance
850 *
851 * This function reevaluates the overall state and enables or disables FBC.
852 */
853 void intel_fbc_update(struct drm_i915_private *dev_priv)
854 {
855 if (!dev_priv->fbc.enable_fbc)
856 return;
857
858 mutex_lock(&dev_priv->fbc.lock);
859 __intel_fbc_update(dev_priv);
860 mutex_unlock(&dev_priv->fbc.lock);
861 }
862
863 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
864 unsigned int frontbuffer_bits,
865 enum fb_op_origin origin)
866 {
867 unsigned int fbc_bits;
868
869 if (!dev_priv->fbc.enable_fbc)
870 return;
871
872 if (origin == ORIGIN_GTT)
873 return;
874
875 mutex_lock(&dev_priv->fbc.lock);
876
877 if (dev_priv->fbc.enabled)
878 fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
879 else if (dev_priv->fbc.fbc_work)
880 fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
881 dev_priv->fbc.fbc_work->crtc->pipe);
882 else
883 fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
884
885 dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
886
887 if (dev_priv->fbc.busy_bits)
888 __intel_fbc_disable(dev_priv);
889
890 mutex_unlock(&dev_priv->fbc.lock);
891 }
892
893 void intel_fbc_flush(struct drm_i915_private *dev_priv,
894 unsigned int frontbuffer_bits, enum fb_op_origin origin)
895 {
896 if (!dev_priv->fbc.enable_fbc)
897 return;
898
899 if (origin == ORIGIN_GTT)
900 return;
901
902 mutex_lock(&dev_priv->fbc.lock);
903
904 dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
905
906 if (!dev_priv->fbc.busy_bits) {
907 __intel_fbc_disable(dev_priv);
908 __intel_fbc_update(dev_priv);
909 }
910
911 mutex_unlock(&dev_priv->fbc.lock);
912 }
913
914 /**
915 * intel_fbc_init - Initialize FBC
916 * @dev_priv: the i915 device
917 *
918 * This function might be called during PM init process.
919 */
920 void intel_fbc_init(struct drm_i915_private *dev_priv)
921 {
922 enum pipe pipe;
923
924 mutex_init(&dev_priv->fbc.lock);
925
926 if (!HAS_FBC(dev_priv)) {
927 dev_priv->fbc.enabled = false;
928 dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
929 return;
930 }
931
932 for_each_pipe(dev_priv, pipe) {
933 dev_priv->fbc.possible_framebuffer_bits |=
934 INTEL_FRONTBUFFER_PRIMARY(pipe);
935
936 if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
937 break;
938 }
939
940 if (INTEL_INFO(dev_priv)->gen >= 7) {
941 dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
942 dev_priv->fbc.enable_fbc = gen7_fbc_enable;
943 dev_priv->fbc.disable_fbc = ilk_fbc_disable;
944 } else if (INTEL_INFO(dev_priv)->gen >= 5) {
945 dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
946 dev_priv->fbc.enable_fbc = ilk_fbc_enable;
947 dev_priv->fbc.disable_fbc = ilk_fbc_disable;
948 } else if (IS_GM45(dev_priv)) {
949 dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
950 dev_priv->fbc.enable_fbc = g4x_fbc_enable;
951 dev_priv->fbc.disable_fbc = g4x_fbc_disable;
952 } else {
953 dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
954 dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
955 dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
956
957 /* This value was pulled out of someone's hat */
958 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
959 }
960
961 dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
962 }
This page took 0.050916 seconds and 4 git commands to generate.