69726a7ed0d0f1ad8268481e24a504376a84d431
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_fbc.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * DOC: Frame Buffer Compression (FBC)
26 *
27 * FBC tries to save memory bandwidth (and so power consumption) by
28 * compressing the amount of memory used by the display. It is total
29 * transparent to user space and completely handled in the kernel.
30 *
31 * The benefits of FBC are mostly visible with solid backgrounds and
32 * variation-less patterns. It comes from keeping the memory footprint small
33 * and having fewer memory pages opened and accessed for refreshing the display.
34 *
35 * i915 is responsible to reserve stolen memory for FBC and configure its
36 * offset on proper registers. The hardware takes care of all
37 * compress/decompress. However there are many known cases where we have to
38 * forcibly disable it to allow proper screen updates.
39 */
40
41 #include "intel_drv.h"
42 #include "i915_drv.h"
43
44 static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
45 {
46 u32 fbc_ctl;
47
48 dev_priv->fbc.enabled = false;
49
50 /* Disable compression */
51 fbc_ctl = I915_READ(FBC_CONTROL);
52 if ((fbc_ctl & FBC_CTL_EN) == 0)
53 return;
54
55 fbc_ctl &= ~FBC_CTL_EN;
56 I915_WRITE(FBC_CONTROL, fbc_ctl);
57
58 /* Wait for compressing bit to clear */
59 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
60 DRM_DEBUG_KMS("FBC idle timed out\n");
61 return;
62 }
63
64 DRM_DEBUG_KMS("disabled FBC\n");
65 }
66
67 static void i8xx_fbc_enable(struct intel_crtc *crtc)
68 {
69 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
70 struct drm_framebuffer *fb = crtc->base.primary->fb;
71 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
72 int cfb_pitch;
73 int i;
74 u32 fbc_ctl;
75
76 dev_priv->fbc.enabled = true;
77
78 /* Note: fbc.threshold == 1 for i8xx */
79 cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
80 if (fb->pitches[0] < cfb_pitch)
81 cfb_pitch = fb->pitches[0];
82
83 /* FBC_CTL wants 32B or 64B units */
84 if (IS_GEN2(dev_priv))
85 cfb_pitch = (cfb_pitch / 32) - 1;
86 else
87 cfb_pitch = (cfb_pitch / 64) - 1;
88
89 /* Clear old tags */
90 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
91 I915_WRITE(FBC_TAG + (i * 4), 0);
92
93 if (IS_GEN4(dev_priv)) {
94 u32 fbc_ctl2;
95
96 /* Set it up... */
97 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
98 fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
99 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
100 I915_WRITE(FBC_FENCE_OFF, crtc->base.y);
101 }
102
103 /* enable it... */
104 fbc_ctl = I915_READ(FBC_CONTROL);
105 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
106 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
107 if (IS_I945GM(dev_priv))
108 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
109 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
110 fbc_ctl |= obj->fence_reg;
111 I915_WRITE(FBC_CONTROL, fbc_ctl);
112
113 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
114 cfb_pitch, crtc->base.y, plane_name(crtc->plane));
115 }
116
117 static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
118 {
119 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
120 }
121
122 static void g4x_fbc_enable(struct intel_crtc *crtc)
123 {
124 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
125 struct drm_framebuffer *fb = crtc->base.primary->fb;
126 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
127 u32 dpfc_ctl;
128
129 dev_priv->fbc.enabled = true;
130
131 dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
132 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
133 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
134 else
135 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
136 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
137
138 I915_WRITE(DPFC_FENCE_YOFF, crtc->base.y);
139
140 /* enable it... */
141 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
142
143 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
144 }
145
146 static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
147 {
148 u32 dpfc_ctl;
149
150 dev_priv->fbc.enabled = false;
151
152 /* Disable compression */
153 dpfc_ctl = I915_READ(DPFC_CONTROL);
154 if (dpfc_ctl & DPFC_CTL_EN) {
155 dpfc_ctl &= ~DPFC_CTL_EN;
156 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
157
158 DRM_DEBUG_KMS("disabled FBC\n");
159 }
160 }
161
162 static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
163 {
164 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
165 }
166
167 static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
168 {
169 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
170 POSTING_READ(MSG_FBC_REND_STATE);
171 }
172
173 static void ilk_fbc_enable(struct intel_crtc *crtc)
174 {
175 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
176 struct drm_framebuffer *fb = crtc->base.primary->fb;
177 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
178 u32 dpfc_ctl;
179 int threshold = dev_priv->fbc.threshold;
180
181 dev_priv->fbc.enabled = true;
182
183 dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
184 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
185 threshold++;
186
187 switch (threshold) {
188 case 4:
189 case 3:
190 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
191 break;
192 case 2:
193 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
194 break;
195 case 1:
196 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
197 break;
198 }
199 dpfc_ctl |= DPFC_CTL_FENCE_EN;
200 if (IS_GEN5(dev_priv))
201 dpfc_ctl |= obj->fence_reg;
202
203 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->base.y);
204 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
205 /* enable it... */
206 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
207
208 if (IS_GEN6(dev_priv)) {
209 I915_WRITE(SNB_DPFC_CTL_SA,
210 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
211 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
212 }
213
214 intel_fbc_nuke(dev_priv);
215
216 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
217 }
218
219 static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
220 {
221 u32 dpfc_ctl;
222
223 dev_priv->fbc.enabled = false;
224
225 /* Disable compression */
226 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
227 if (dpfc_ctl & DPFC_CTL_EN) {
228 dpfc_ctl &= ~DPFC_CTL_EN;
229 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
230
231 DRM_DEBUG_KMS("disabled FBC\n");
232 }
233 }
234
235 static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
236 {
237 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
238 }
239
240 static void gen7_fbc_enable(struct intel_crtc *crtc)
241 {
242 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
243 struct drm_framebuffer *fb = crtc->base.primary->fb;
244 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
245 u32 dpfc_ctl;
246 int threshold = dev_priv->fbc.threshold;
247
248 dev_priv->fbc.enabled = true;
249
250 dpfc_ctl = 0;
251 if (IS_IVYBRIDGE(dev_priv))
252 dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
253
254 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
255 threshold++;
256
257 switch (threshold) {
258 case 4:
259 case 3:
260 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
261 break;
262 case 2:
263 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
264 break;
265 case 1:
266 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
267 break;
268 }
269
270 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
271
272 if (dev_priv->fbc.false_color)
273 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
274
275 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
276
277 if (IS_IVYBRIDGE(dev_priv)) {
278 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
279 I915_WRITE(ILK_DISPLAY_CHICKEN1,
280 I915_READ(ILK_DISPLAY_CHICKEN1) |
281 ILK_FBCQ_DIS);
282 } else {
283 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
284 I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
285 I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
286 HSW_FBCQ_DIS);
287 }
288
289 I915_WRITE(SNB_DPFC_CTL_SA,
290 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
291 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
292
293 intel_fbc_nuke(dev_priv);
294
295 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
296 }
297
298 /**
299 * intel_fbc_enabled - Is FBC enabled?
300 * @dev_priv: i915 device instance
301 *
302 * This function is used to verify the current state of FBC.
303 * FIXME: This should be tracked in the plane config eventually
304 * instead of queried at runtime for most callers.
305 */
306 bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
307 {
308 return dev_priv->fbc.enabled;
309 }
310
311 static void intel_fbc_enable(struct intel_crtc *crtc,
312 const struct drm_framebuffer *fb)
313 {
314 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
315
316 dev_priv->fbc.enable_fbc(crtc);
317
318 dev_priv->fbc.crtc = crtc;
319 dev_priv->fbc.fb_id = fb->base.id;
320 dev_priv->fbc.y = crtc->base.y;
321 }
322
323 static void intel_fbc_work_fn(struct work_struct *__work)
324 {
325 struct intel_fbc_work *work =
326 container_of(to_delayed_work(__work),
327 struct intel_fbc_work, work);
328 struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
329 struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
330
331 mutex_lock(&dev_priv->fbc.lock);
332 if (work == dev_priv->fbc.fbc_work) {
333 /* Double check that we haven't switched fb without cancelling
334 * the prior work.
335 */
336 if (crtc_fb == work->fb)
337 intel_fbc_enable(work->crtc, work->fb);
338
339 dev_priv->fbc.fbc_work = NULL;
340 }
341 mutex_unlock(&dev_priv->fbc.lock);
342
343 kfree(work);
344 }
345
346 static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
347 {
348 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
349
350 if (dev_priv->fbc.fbc_work == NULL)
351 return;
352
353 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
354
355 /* Synchronisation is provided by struct_mutex and checking of
356 * dev_priv->fbc.fbc_work, so we can perform the cancellation
357 * entirely asynchronously.
358 */
359 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
360 /* tasklet was killed before being run, clean up */
361 kfree(dev_priv->fbc.fbc_work);
362
363 /* Mark the work as no longer wanted so that if it does
364 * wake-up (because the work was already running and waiting
365 * for our mutex), it will discover that is no longer
366 * necessary to run.
367 */
368 dev_priv->fbc.fbc_work = NULL;
369 }
370
371 static void intel_fbc_schedule_enable(struct intel_crtc *crtc)
372 {
373 struct intel_fbc_work *work;
374 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
375
376 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
377
378 intel_fbc_cancel_work(dev_priv);
379
380 work = kzalloc(sizeof(*work), GFP_KERNEL);
381 if (work == NULL) {
382 DRM_ERROR("Failed to allocate FBC work structure\n");
383 intel_fbc_enable(crtc, crtc->base.primary->fb);
384 return;
385 }
386
387 work->crtc = crtc;
388 work->fb = crtc->base.primary->fb;
389 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
390
391 dev_priv->fbc.fbc_work = work;
392
393 /* Delay the actual enabling to let pageflipping cease and the
394 * display to settle before starting the compression. Note that
395 * this delay also serves a second purpose: it allows for a
396 * vblank to pass after disabling the FBC before we attempt
397 * to modify the control registers.
398 *
399 * A more complicated solution would involve tracking vblanks
400 * following the termination of the page-flipping sequence
401 * and indeed performing the enable as a co-routine and not
402 * waiting synchronously upon the vblank.
403 *
404 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
405 */
406 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
407 }
408
409 static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
410 {
411 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
412
413 intel_fbc_cancel_work(dev_priv);
414
415 dev_priv->fbc.disable_fbc(dev_priv);
416 dev_priv->fbc.crtc = NULL;
417 }
418
419 /**
420 * intel_fbc_disable - disable FBC
421 * @dev_priv: i915 device instance
422 *
423 * This function disables FBC.
424 */
425 void intel_fbc_disable(struct drm_i915_private *dev_priv)
426 {
427 if (!dev_priv->fbc.enable_fbc)
428 return;
429
430 mutex_lock(&dev_priv->fbc.lock);
431 __intel_fbc_disable(dev_priv);
432 mutex_unlock(&dev_priv->fbc.lock);
433 }
434
435 /*
436 * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
437 * @crtc: the CRTC
438 *
439 * This function disables FBC if it's associated with the provided CRTC.
440 */
441 void intel_fbc_disable_crtc(struct intel_crtc *crtc)
442 {
443 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
444
445 if (!dev_priv->fbc.enable_fbc)
446 return;
447
448 mutex_lock(&dev_priv->fbc.lock);
449 if (dev_priv->fbc.crtc == crtc)
450 __intel_fbc_disable(dev_priv);
451 mutex_unlock(&dev_priv->fbc.lock);
452 }
453
454 const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
455 {
456 switch (reason) {
457 case FBC_OK:
458 return "FBC enabled but currently disabled in hardware";
459 case FBC_UNSUPPORTED:
460 return "unsupported by this chipset";
461 case FBC_NO_OUTPUT:
462 return "no output";
463 case FBC_STOLEN_TOO_SMALL:
464 return "not enough stolen memory";
465 case FBC_UNSUPPORTED_MODE:
466 return "mode incompatible with compression";
467 case FBC_MODE_TOO_LARGE:
468 return "mode too large for compression";
469 case FBC_BAD_PLANE:
470 return "FBC unsupported on plane";
471 case FBC_NOT_TILED:
472 return "framebuffer not tiled or fenced";
473 case FBC_MULTIPLE_PIPES:
474 return "more than one pipe active";
475 case FBC_MODULE_PARAM:
476 return "disabled per module param";
477 case FBC_CHIP_DEFAULT:
478 return "disabled per chip default";
479 case FBC_ROTATION:
480 return "rotation unsupported";
481 case FBC_IN_DBG_MASTER:
482 return "Kernel debugger is active";
483 case FBC_BAD_STRIDE:
484 return "framebuffer stride not supported";
485 default:
486 MISSING_CASE(reason);
487 return "unknown reason";
488 }
489 }
490
491 static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
492 enum no_fbc_reason reason)
493 {
494 if (dev_priv->fbc.no_fbc_reason == reason)
495 return;
496
497 dev_priv->fbc.no_fbc_reason = reason;
498 DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
499 }
500
501 static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
502 {
503 struct drm_crtc *crtc = NULL, *tmp_crtc;
504 enum pipe pipe;
505 bool pipe_a_only = false;
506
507 if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
508 pipe_a_only = true;
509
510 for_each_pipe(dev_priv, pipe) {
511 tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
512
513 if (intel_crtc_active(tmp_crtc) &&
514 to_intel_plane_state(tmp_crtc->primary->state)->visible)
515 crtc = tmp_crtc;
516
517 if (pipe_a_only)
518 break;
519 }
520
521 if (!crtc || crtc->primary->fb == NULL)
522 return NULL;
523
524 return crtc;
525 }
526
527 static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
528 {
529 enum pipe pipe;
530 int n_pipes = 0;
531 struct drm_crtc *crtc;
532
533 if (INTEL_INFO(dev_priv)->gen > 4)
534 return true;
535
536 for_each_pipe(dev_priv, pipe) {
537 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
538
539 if (intel_crtc_active(crtc) &&
540 to_intel_plane_state(crtc->primary->state)->visible)
541 n_pipes++;
542 }
543
544 return (n_pipes < 2);
545 }
546
547 static int find_compression_threshold(struct drm_i915_private *dev_priv,
548 struct drm_mm_node *node,
549 int size,
550 int fb_cpp)
551 {
552 int compression_threshold = 1;
553 int ret;
554 u64 end;
555
556 /* The FBC hardware for BDW/SKL doesn't have access to the stolen
557 * reserved range size, so it always assumes the maximum (8mb) is used.
558 * If we enable FBC using a CFB on that memory range we'll get FIFO
559 * underruns, even if that range is not reserved by the BIOS. */
560 if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
561 end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
562 else
563 end = dev_priv->gtt.stolen_usable_size;
564
565 /* HACK: This code depends on what we will do in *_enable_fbc. If that
566 * code changes, this code needs to change as well.
567 *
568 * The enable_fbc code will attempt to use one of our 2 compression
569 * thresholds, therefore, in that case, we only have 1 resort.
570 */
571
572 /* Try to over-allocate to reduce reallocations and fragmentation. */
573 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
574 4096, 0, end);
575 if (ret == 0)
576 return compression_threshold;
577
578 again:
579 /* HW's ability to limit the CFB is 1:4 */
580 if (compression_threshold > 4 ||
581 (fb_cpp == 2 && compression_threshold == 2))
582 return 0;
583
584 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
585 4096, 0, end);
586 if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
587 return 0;
588 } else if (ret) {
589 compression_threshold <<= 1;
590 goto again;
591 } else {
592 return compression_threshold;
593 }
594 }
595
596 static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
597 int fb_cpp)
598 {
599 struct drm_mm_node *uninitialized_var(compressed_llb);
600 int ret;
601
602 ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
603 size, fb_cpp);
604 if (!ret)
605 goto err_llb;
606 else if (ret > 1) {
607 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
608
609 }
610
611 dev_priv->fbc.threshold = ret;
612
613 if (INTEL_INFO(dev_priv)->gen >= 5)
614 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
615 else if (IS_GM45(dev_priv)) {
616 I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
617 } else {
618 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
619 if (!compressed_llb)
620 goto err_fb;
621
622 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
623 4096, 4096);
624 if (ret)
625 goto err_fb;
626
627 dev_priv->fbc.compressed_llb = compressed_llb;
628
629 I915_WRITE(FBC_CFB_BASE,
630 dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
631 I915_WRITE(FBC_LL_BASE,
632 dev_priv->mm.stolen_base + compressed_llb->start);
633 }
634
635 dev_priv->fbc.uncompressed_size = size;
636
637 DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
638 dev_priv->fbc.compressed_fb.size,
639 dev_priv->fbc.threshold);
640
641 return 0;
642
643 err_fb:
644 kfree(compressed_llb);
645 i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
646 err_llb:
647 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
648 return -ENOSPC;
649 }
650
651 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
652 {
653 if (dev_priv->fbc.uncompressed_size == 0)
654 return;
655
656 i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
657
658 if (dev_priv->fbc.compressed_llb) {
659 i915_gem_stolen_remove_node(dev_priv,
660 dev_priv->fbc.compressed_llb);
661 kfree(dev_priv->fbc.compressed_llb);
662 }
663
664 dev_priv->fbc.uncompressed_size = 0;
665 }
666
667 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
668 {
669 if (!dev_priv->fbc.enable_fbc)
670 return;
671
672 mutex_lock(&dev_priv->fbc.lock);
673 __intel_fbc_cleanup_cfb(dev_priv);
674 mutex_unlock(&dev_priv->fbc.lock);
675 }
676
677 static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size,
678 int fb_cpp)
679 {
680 if (size <= dev_priv->fbc.uncompressed_size)
681 return 0;
682
683 /* Release any current block */
684 __intel_fbc_cleanup_cfb(dev_priv);
685
686 return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp);
687 }
688
689 static bool stride_is_valid(struct drm_i915_private *dev_priv,
690 unsigned int stride)
691 {
692 /* These should have been caught earlier. */
693 WARN_ON(stride < 512);
694 WARN_ON((stride & (64 - 1)) != 0);
695
696 /* Below are the additional FBC restrictions. */
697
698 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
699 return stride == 4096 || stride == 8192;
700
701 if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
702 return false;
703
704 if (stride > 16384)
705 return false;
706
707 return true;
708 }
709
710 /**
711 * __intel_fbc_update - enable/disable FBC as needed, unlocked
712 * @dev_priv: i915 device instance
713 *
714 * Set up the framebuffer compression hardware at mode set time. We
715 * enable it if possible:
716 * - plane A only (on pre-965)
717 * - no pixel mulitply/line duplication
718 * - no alpha buffer discard
719 * - no dual wide
720 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
721 *
722 * We can't assume that any compression will take place (worst case),
723 * so the compressed buffer has to be the same size as the uncompressed
724 * one. It also must reside (along with the line length buffer) in
725 * stolen memory.
726 *
727 * We need to enable/disable FBC on a global basis.
728 */
729 static void __intel_fbc_update(struct drm_i915_private *dev_priv)
730 {
731 struct drm_crtc *crtc = NULL;
732 struct intel_crtc *intel_crtc;
733 struct drm_framebuffer *fb;
734 struct drm_i915_gem_object *obj;
735 const struct drm_display_mode *adjusted_mode;
736 unsigned int max_width, max_height;
737
738 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
739
740 /* disable framebuffer compression in vGPU */
741 if (intel_vgpu_active(dev_priv->dev))
742 i915.enable_fbc = 0;
743
744 if (i915.enable_fbc < 0) {
745 set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
746 goto out_disable;
747 }
748
749 if (!i915.enable_fbc) {
750 set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
751 goto out_disable;
752 }
753
754 /*
755 * If FBC is already on, we just have to verify that we can
756 * keep it that way...
757 * Need to disable if:
758 * - more than one pipe is active
759 * - changing FBC params (stride, fence, mode)
760 * - new fb is too large to fit in compressed buffer
761 * - going to an unsupported config (interlace, pixel multiply, etc.)
762 */
763 crtc = intel_fbc_find_crtc(dev_priv);
764 if (!crtc) {
765 set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
766 goto out_disable;
767 }
768
769 if (!multiple_pipes_ok(dev_priv)) {
770 set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
771 goto out_disable;
772 }
773
774 intel_crtc = to_intel_crtc(crtc);
775 fb = crtc->primary->fb;
776 obj = intel_fb_obj(fb);
777 adjusted_mode = &intel_crtc->config->base.adjusted_mode;
778
779 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
780 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
781 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
782 goto out_disable;
783 }
784
785 if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
786 max_width = 4096;
787 max_height = 4096;
788 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
789 max_width = 4096;
790 max_height = 2048;
791 } else {
792 max_width = 2048;
793 max_height = 1536;
794 }
795 if (intel_crtc->config->pipe_src_w > max_width ||
796 intel_crtc->config->pipe_src_h > max_height) {
797 set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
798 goto out_disable;
799 }
800 if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
801 intel_crtc->plane != PLANE_A) {
802 set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
803 goto out_disable;
804 }
805
806 /* The use of a CPU fence is mandatory in order to detect writes
807 * by the CPU to the scanout and trigger updates to the FBC.
808 */
809 if (obj->tiling_mode != I915_TILING_X ||
810 obj->fence_reg == I915_FENCE_REG_NONE) {
811 set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
812 goto out_disable;
813 }
814 if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
815 crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
816 set_no_fbc_reason(dev_priv, FBC_ROTATION);
817 goto out_disable;
818 }
819
820 if (!stride_is_valid(dev_priv, fb->pitches[0])) {
821 set_no_fbc_reason(dev_priv, FBC_BAD_STRIDE);
822 goto out_disable;
823 }
824
825 /* If the kernel debugger is active, always disable compression */
826 if (in_dbg_master()) {
827 set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
828 goto out_disable;
829 }
830
831 if (intel_fbc_setup_cfb(dev_priv, obj->base.size,
832 drm_format_plane_cpp(fb->pixel_format, 0))) {
833 set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
834 goto out_disable;
835 }
836
837 /* If the scanout has not changed, don't modify the FBC settings.
838 * Note that we make the fundamental assumption that the fb->obj
839 * cannot be unpinned (and have its GTT offset and fence revoked)
840 * without first being decoupled from the scanout and FBC disabled.
841 */
842 if (dev_priv->fbc.crtc == intel_crtc &&
843 dev_priv->fbc.fb_id == fb->base.id &&
844 dev_priv->fbc.y == crtc->y)
845 return;
846
847 if (intel_fbc_enabled(dev_priv)) {
848 /* We update FBC along two paths, after changing fb/crtc
849 * configuration (modeswitching) and after page-flipping
850 * finishes. For the latter, we know that not only did
851 * we disable the FBC at the start of the page-flip
852 * sequence, but also more than one vblank has passed.
853 *
854 * For the former case of modeswitching, it is possible
855 * to switch between two FBC valid configurations
856 * instantaneously so we do need to disable the FBC
857 * before we can modify its control registers. We also
858 * have to wait for the next vblank for that to take
859 * effect. However, since we delay enabling FBC we can
860 * assume that a vblank has passed since disabling and
861 * that we can safely alter the registers in the deferred
862 * callback.
863 *
864 * In the scenario that we go from a valid to invalid
865 * and then back to valid FBC configuration we have
866 * no strict enforcement that a vblank occurred since
867 * disabling the FBC. However, along all current pipe
868 * disabling paths we do need to wait for a vblank at
869 * some point. And we wait before enabling FBC anyway.
870 */
871 DRM_DEBUG_KMS("disabling active FBC for update\n");
872 __intel_fbc_disable(dev_priv);
873 }
874
875 intel_fbc_schedule_enable(intel_crtc);
876 dev_priv->fbc.no_fbc_reason = FBC_OK;
877 return;
878
879 out_disable:
880 /* Multiple disables should be harmless */
881 if (intel_fbc_enabled(dev_priv)) {
882 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
883 __intel_fbc_disable(dev_priv);
884 }
885 __intel_fbc_cleanup_cfb(dev_priv);
886 }
887
888 /*
889 * intel_fbc_update - enable/disable FBC as needed
890 * @dev_priv: i915 device instance
891 *
892 * This function reevaluates the overall state and enables or disables FBC.
893 */
894 void intel_fbc_update(struct drm_i915_private *dev_priv)
895 {
896 if (!dev_priv->fbc.enable_fbc)
897 return;
898
899 mutex_lock(&dev_priv->fbc.lock);
900 __intel_fbc_update(dev_priv);
901 mutex_unlock(&dev_priv->fbc.lock);
902 }
903
904 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
905 unsigned int frontbuffer_bits,
906 enum fb_op_origin origin)
907 {
908 unsigned int fbc_bits;
909
910 if (!dev_priv->fbc.enable_fbc)
911 return;
912
913 if (origin == ORIGIN_GTT)
914 return;
915
916 mutex_lock(&dev_priv->fbc.lock);
917
918 if (dev_priv->fbc.enabled)
919 fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
920 else if (dev_priv->fbc.fbc_work)
921 fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
922 dev_priv->fbc.fbc_work->crtc->pipe);
923 else
924 fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
925
926 dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
927
928 if (dev_priv->fbc.busy_bits)
929 __intel_fbc_disable(dev_priv);
930
931 mutex_unlock(&dev_priv->fbc.lock);
932 }
933
934 void intel_fbc_flush(struct drm_i915_private *dev_priv,
935 unsigned int frontbuffer_bits, enum fb_op_origin origin)
936 {
937 if (!dev_priv->fbc.enable_fbc)
938 return;
939
940 if (origin == ORIGIN_GTT)
941 return;
942
943 mutex_lock(&dev_priv->fbc.lock);
944
945 dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
946
947 if (!dev_priv->fbc.busy_bits) {
948 __intel_fbc_disable(dev_priv);
949 __intel_fbc_update(dev_priv);
950 }
951
952 mutex_unlock(&dev_priv->fbc.lock);
953 }
954
955 /**
956 * intel_fbc_init - Initialize FBC
957 * @dev_priv: the i915 device
958 *
959 * This function might be called during PM init process.
960 */
961 void intel_fbc_init(struct drm_i915_private *dev_priv)
962 {
963 enum pipe pipe;
964
965 mutex_init(&dev_priv->fbc.lock);
966
967 if (!HAS_FBC(dev_priv)) {
968 dev_priv->fbc.enabled = false;
969 dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
970 return;
971 }
972
973 for_each_pipe(dev_priv, pipe) {
974 dev_priv->fbc.possible_framebuffer_bits |=
975 INTEL_FRONTBUFFER_PRIMARY(pipe);
976
977 if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
978 break;
979 }
980
981 if (INTEL_INFO(dev_priv)->gen >= 7) {
982 dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
983 dev_priv->fbc.enable_fbc = gen7_fbc_enable;
984 dev_priv->fbc.disable_fbc = ilk_fbc_disable;
985 } else if (INTEL_INFO(dev_priv)->gen >= 5) {
986 dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
987 dev_priv->fbc.enable_fbc = ilk_fbc_enable;
988 dev_priv->fbc.disable_fbc = ilk_fbc_disable;
989 } else if (IS_GM45(dev_priv)) {
990 dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
991 dev_priv->fbc.enable_fbc = g4x_fbc_enable;
992 dev_priv->fbc.disable_fbc = g4x_fbc_disable;
993 } else {
994 dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
995 dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
996 dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
997
998 /* This value was pulled out of someone's hat */
999 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
1000 }
1001
1002 dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
1003 }
This page took 0.073496 seconds and 4 git commands to generate.