8460e3d72b98c4482567897886ad04aa06a8d198
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_fbc.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * DOC: Frame Buffer Compression (FBC)
26 *
27 * FBC tries to save memory bandwidth (and so power consumption) by
28 * compressing the amount of memory used by the display. It is total
29 * transparent to user space and completely handled in the kernel.
30 *
31 * The benefits of FBC are mostly visible with solid backgrounds and
32 * variation-less patterns. It comes from keeping the memory footprint small
33 * and having fewer memory pages opened and accessed for refreshing the display.
34 *
35 * i915 is responsible to reserve stolen memory for FBC and configure its
36 * offset on proper registers. The hardware takes care of all
37 * compress/decompress. However there are many known cases where we have to
38 * forcibly disable it to allow proper screen updates.
39 */
40
41 #include "intel_drv.h"
42 #include "i915_drv.h"
43
44 static inline bool fbc_supported(struct drm_i915_private *dev_priv)
45 {
46 return dev_priv->fbc.activate != NULL;
47 }
48
49 static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
50 {
51 return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
52 }
53
54 static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
55 {
56 return INTEL_INFO(dev_priv)->gen < 4;
57 }
58
59 /*
60 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
61 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
62 * origin so the x and y offsets can actually fit the registers. As a
63 * consequence, the fence doesn't really start exactly at the display plane
64 * address we program because it starts at the real start of the buffer, so we
65 * have to take this into consideration here.
66 */
67 static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
68 {
69 return crtc->base.y - crtc->adjusted_y;
70 }
71
72 /*
73 * For SKL+, the plane source size used by the hardware is based on the value we
74 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
75 * we wrote to PIPESRC.
76 */
77 static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
78 int *width, int *height)
79 {
80 struct intel_plane_state *plane_state =
81 to_intel_plane_state(crtc->base.primary->state);
82 int w, h;
83
84 if (intel_rotation_90_or_270(plane_state->base.rotation)) {
85 w = drm_rect_height(&plane_state->src) >> 16;
86 h = drm_rect_width(&plane_state->src) >> 16;
87 } else {
88 w = drm_rect_width(&plane_state->src) >> 16;
89 h = drm_rect_height(&plane_state->src) >> 16;
90 }
91
92 if (width)
93 *width = w;
94 if (height)
95 *height = h;
96 }
97
98 static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc,
99 struct drm_framebuffer *fb)
100 {
101 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
102 int lines;
103
104 intel_fbc_get_plane_source_size(crtc, NULL, &lines);
105 if (INTEL_INFO(dev_priv)->gen >= 7)
106 lines = min(lines, 2048);
107
108 /* Hardware needs the full buffer stride, not just the active area. */
109 return lines * fb->pitches[0];
110 }
111
112 static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
113 {
114 u32 fbc_ctl;
115
116 dev_priv->fbc.active = false;
117
118 /* Disable compression */
119 fbc_ctl = I915_READ(FBC_CONTROL);
120 if ((fbc_ctl & FBC_CTL_EN) == 0)
121 return;
122
123 fbc_ctl &= ~FBC_CTL_EN;
124 I915_WRITE(FBC_CONTROL, fbc_ctl);
125
126 /* Wait for compressing bit to clear */
127 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
128 DRM_DEBUG_KMS("FBC idle timed out\n");
129 return;
130 }
131
132 DRM_DEBUG_KMS("deactivated FBC\n");
133 }
134
135 static void i8xx_fbc_activate(struct intel_crtc *crtc)
136 {
137 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
138 struct drm_framebuffer *fb = crtc->base.primary->fb;
139 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
140 int cfb_pitch;
141 int i;
142 u32 fbc_ctl;
143
144 dev_priv->fbc.active = true;
145
146 /* Note: fbc.threshold == 1 for i8xx */
147 cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
148 if (fb->pitches[0] < cfb_pitch)
149 cfb_pitch = fb->pitches[0];
150
151 /* FBC_CTL wants 32B or 64B units */
152 if (IS_GEN2(dev_priv))
153 cfb_pitch = (cfb_pitch / 32) - 1;
154 else
155 cfb_pitch = (cfb_pitch / 64) - 1;
156
157 /* Clear old tags */
158 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
159 I915_WRITE(FBC_TAG(i), 0);
160
161 if (IS_GEN4(dev_priv)) {
162 u32 fbc_ctl2;
163
164 /* Set it up... */
165 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
166 fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
167 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
168 I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc));
169 }
170
171 /* enable it... */
172 fbc_ctl = I915_READ(FBC_CONTROL);
173 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
174 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
175 if (IS_I945GM(dev_priv))
176 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
177 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
178 fbc_ctl |= obj->fence_reg;
179 I915_WRITE(FBC_CONTROL, fbc_ctl);
180
181 DRM_DEBUG_KMS("activated FBC, pitch %d, yoff %d, plane %c\n",
182 cfb_pitch, crtc->base.y, plane_name(crtc->plane));
183 }
184
185 static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
186 {
187 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
188 }
189
190 static void g4x_fbc_activate(struct intel_crtc *crtc)
191 {
192 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
193 struct drm_framebuffer *fb = crtc->base.primary->fb;
194 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
195 u32 dpfc_ctl;
196
197 dev_priv->fbc.active = true;
198
199 dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
200 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
201 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
202 else
203 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
204 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
205
206 I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc));
207
208 /* enable it... */
209 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
210
211 DRM_DEBUG_KMS("activated fbc on plane %c\n", plane_name(crtc->plane));
212 }
213
214 static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
215 {
216 u32 dpfc_ctl;
217
218 dev_priv->fbc.active = false;
219
220 /* Disable compression */
221 dpfc_ctl = I915_READ(DPFC_CONTROL);
222 if (dpfc_ctl & DPFC_CTL_EN) {
223 dpfc_ctl &= ~DPFC_CTL_EN;
224 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
225
226 DRM_DEBUG_KMS("deactivated FBC\n");
227 }
228 }
229
230 static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
231 {
232 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
233 }
234
235 /* This function forces a CFB recompression through the nuke operation. */
236 static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
237 {
238 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
239 POSTING_READ(MSG_FBC_REND_STATE);
240 }
241
242 static void ilk_fbc_activate(struct intel_crtc *crtc)
243 {
244 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
245 struct drm_framebuffer *fb = crtc->base.primary->fb;
246 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
247 u32 dpfc_ctl;
248 int threshold = dev_priv->fbc.threshold;
249 unsigned int y_offset;
250
251 dev_priv->fbc.active = true;
252
253 dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
254 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
255 threshold++;
256
257 switch (threshold) {
258 case 4:
259 case 3:
260 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
261 break;
262 case 2:
263 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
264 break;
265 case 1:
266 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
267 break;
268 }
269 dpfc_ctl |= DPFC_CTL_FENCE_EN;
270 if (IS_GEN5(dev_priv))
271 dpfc_ctl |= obj->fence_reg;
272
273 y_offset = get_crtc_fence_y_offset(crtc);
274 I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
275 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
276 /* enable it... */
277 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
278
279 if (IS_GEN6(dev_priv)) {
280 I915_WRITE(SNB_DPFC_CTL_SA,
281 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
282 I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
283 }
284
285 intel_fbc_recompress(dev_priv);
286
287 DRM_DEBUG_KMS("activated fbc on plane %c\n", plane_name(crtc->plane));
288 }
289
290 static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
291 {
292 u32 dpfc_ctl;
293
294 dev_priv->fbc.active = false;
295
296 /* Disable compression */
297 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
298 if (dpfc_ctl & DPFC_CTL_EN) {
299 dpfc_ctl &= ~DPFC_CTL_EN;
300 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
301
302 DRM_DEBUG_KMS("deactivated FBC\n");
303 }
304 }
305
306 static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
307 {
308 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
309 }
310
311 static void gen7_fbc_activate(struct intel_crtc *crtc)
312 {
313 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
314 struct drm_framebuffer *fb = crtc->base.primary->fb;
315 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
316 u32 dpfc_ctl;
317 int threshold = dev_priv->fbc.threshold;
318
319 dev_priv->fbc.active = true;
320
321 dpfc_ctl = 0;
322 if (IS_IVYBRIDGE(dev_priv))
323 dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
324
325 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
326 threshold++;
327
328 switch (threshold) {
329 case 4:
330 case 3:
331 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
332 break;
333 case 2:
334 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
335 break;
336 case 1:
337 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
338 break;
339 }
340
341 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
342
343 if (dev_priv->fbc.false_color)
344 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
345
346 if (IS_IVYBRIDGE(dev_priv)) {
347 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
348 I915_WRITE(ILK_DISPLAY_CHICKEN1,
349 I915_READ(ILK_DISPLAY_CHICKEN1) |
350 ILK_FBCQ_DIS);
351 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
352 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
353 I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
354 I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
355 HSW_FBCQ_DIS);
356 }
357
358 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
359
360 I915_WRITE(SNB_DPFC_CTL_SA,
361 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
362 I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
363
364 intel_fbc_recompress(dev_priv);
365
366 DRM_DEBUG_KMS("activated fbc on plane %c\n", plane_name(crtc->plane));
367 }
368
369 /**
370 * intel_fbc_is_active - Is FBC active?
371 * @dev_priv: i915 device instance
372 *
373 * This function is used to verify the current state of FBC.
374 * FIXME: This should be tracked in the plane config eventually
375 * instead of queried at runtime for most callers.
376 */
377 bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
378 {
379 return dev_priv->fbc.active;
380 }
381
382 static void intel_fbc_activate(const struct drm_framebuffer *fb)
383 {
384 struct drm_i915_private *dev_priv = fb->dev->dev_private;
385 struct intel_crtc *crtc = dev_priv->fbc.crtc;
386
387 dev_priv->fbc.activate(crtc);
388
389 dev_priv->fbc.fb_id = fb->base.id;
390 dev_priv->fbc.y = crtc->base.y;
391 }
392
393 static void intel_fbc_work_fn(struct work_struct *__work)
394 {
395 struct intel_fbc_work *work =
396 container_of(to_delayed_work(__work),
397 struct intel_fbc_work, work);
398 struct drm_i915_private *dev_priv = work->fb->dev->dev_private;
399 struct drm_framebuffer *crtc_fb = dev_priv->fbc.crtc->base.primary->fb;
400
401 mutex_lock(&dev_priv->fbc.lock);
402 if (work == dev_priv->fbc.fbc_work) {
403 /* Double check that we haven't switched fb without cancelling
404 * the prior work.
405 */
406 if (crtc_fb == work->fb)
407 intel_fbc_activate(work->fb);
408
409 dev_priv->fbc.fbc_work = NULL;
410 }
411 mutex_unlock(&dev_priv->fbc.lock);
412
413 kfree(work);
414 }
415
416 static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
417 {
418 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
419
420 if (dev_priv->fbc.fbc_work == NULL)
421 return;
422
423 /* Synchronisation is provided by struct_mutex and checking of
424 * dev_priv->fbc.fbc_work, so we can perform the cancellation
425 * entirely asynchronously.
426 */
427 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
428 /* tasklet was killed before being run, clean up */
429 kfree(dev_priv->fbc.fbc_work);
430
431 /* Mark the work as no longer wanted so that if it does
432 * wake-up (because the work was already running and waiting
433 * for our mutex), it will discover that is no longer
434 * necessary to run.
435 */
436 dev_priv->fbc.fbc_work = NULL;
437 }
438
439 static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
440 {
441 struct intel_fbc_work *work;
442 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
443
444 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
445
446 intel_fbc_cancel_work(dev_priv);
447
448 work = kzalloc(sizeof(*work), GFP_KERNEL);
449 if (work == NULL) {
450 DRM_ERROR("Failed to allocate FBC work structure\n");
451 intel_fbc_activate(crtc->base.primary->fb);
452 return;
453 }
454
455 work->fb = crtc->base.primary->fb;
456 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
457
458 dev_priv->fbc.fbc_work = work;
459
460 /* Delay the actual enabling to let pageflipping cease and the
461 * display to settle before starting the compression. Note that
462 * this delay also serves a second purpose: it allows for a
463 * vblank to pass after disabling the FBC before we attempt
464 * to modify the control registers.
465 *
466 * A more complicated solution would involve tracking vblanks
467 * following the termination of the page-flipping sequence
468 * and indeed performing the enable as a co-routine and not
469 * waiting synchronously upon the vblank.
470 *
471 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
472 */
473 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
474 }
475
476 static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv)
477 {
478 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
479
480 intel_fbc_cancel_work(dev_priv);
481
482 if (dev_priv->fbc.active)
483 dev_priv->fbc.deactivate(dev_priv);
484 }
485
486 /*
487 * intel_fbc_deactivate - deactivate FBC if it's associated with crtc
488 * @crtc: the CRTC
489 *
490 * This function deactivates FBC if it's associated with the provided CRTC.
491 */
492 void intel_fbc_deactivate(struct intel_crtc *crtc)
493 {
494 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
495
496 if (!fbc_supported(dev_priv))
497 return;
498
499 mutex_lock(&dev_priv->fbc.lock);
500 if (dev_priv->fbc.crtc == crtc)
501 __intel_fbc_deactivate(dev_priv);
502 mutex_unlock(&dev_priv->fbc.lock);
503 }
504
505 static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
506 const char *reason)
507 {
508 if (dev_priv->fbc.no_fbc_reason == reason)
509 return;
510
511 dev_priv->fbc.no_fbc_reason = reason;
512 DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
513 }
514
515 static bool crtc_can_fbc(struct intel_crtc *crtc)
516 {
517 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
518
519 if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
520 return false;
521
522 if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
523 return false;
524
525 return true;
526 }
527
528 static bool crtc_is_valid(struct intel_crtc *crtc)
529 {
530 if (!intel_crtc_active(&crtc->base))
531 return false;
532
533 if (!to_intel_plane_state(crtc->base.primary->state)->visible)
534 return false;
535
536 return true;
537 }
538
539 static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
540 {
541 enum pipe pipe;
542 int n_pipes = 0;
543 struct drm_crtc *crtc;
544
545 if (INTEL_INFO(dev_priv)->gen > 4)
546 return true;
547
548 for_each_pipe(dev_priv, pipe) {
549 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
550
551 if (intel_crtc_active(crtc) &&
552 to_intel_plane_state(crtc->primary->state)->visible)
553 n_pipes++;
554 }
555
556 return (n_pipes < 2);
557 }
558
559 static int find_compression_threshold(struct drm_i915_private *dev_priv,
560 struct drm_mm_node *node,
561 int size,
562 int fb_cpp)
563 {
564 int compression_threshold = 1;
565 int ret;
566 u64 end;
567
568 /* The FBC hardware for BDW/SKL doesn't have access to the stolen
569 * reserved range size, so it always assumes the maximum (8mb) is used.
570 * If we enable FBC using a CFB on that memory range we'll get FIFO
571 * underruns, even if that range is not reserved by the BIOS. */
572 if (IS_BROADWELL(dev_priv) ||
573 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
574 end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
575 else
576 end = dev_priv->gtt.stolen_usable_size;
577
578 /* HACK: This code depends on what we will do in *_enable_fbc. If that
579 * code changes, this code needs to change as well.
580 *
581 * The enable_fbc code will attempt to use one of our 2 compression
582 * thresholds, therefore, in that case, we only have 1 resort.
583 */
584
585 /* Try to over-allocate to reduce reallocations and fragmentation. */
586 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
587 4096, 0, end);
588 if (ret == 0)
589 return compression_threshold;
590
591 again:
592 /* HW's ability to limit the CFB is 1:4 */
593 if (compression_threshold > 4 ||
594 (fb_cpp == 2 && compression_threshold == 2))
595 return 0;
596
597 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
598 4096, 0, end);
599 if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
600 return 0;
601 } else if (ret) {
602 compression_threshold <<= 1;
603 goto again;
604 } else {
605 return compression_threshold;
606 }
607 }
608
609 static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
610 {
611 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
612 struct drm_framebuffer *fb = crtc->base.primary->state->fb;
613 struct drm_mm_node *uninitialized_var(compressed_llb);
614 int size, fb_cpp, ret;
615
616 WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb));
617
618 size = intel_fbc_calculate_cfb_size(crtc, fb);
619 fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0);
620
621 ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
622 size, fb_cpp);
623 if (!ret)
624 goto err_llb;
625 else if (ret > 1) {
626 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
627
628 }
629
630 dev_priv->fbc.threshold = ret;
631
632 if (INTEL_INFO(dev_priv)->gen >= 5)
633 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
634 else if (IS_GM45(dev_priv)) {
635 I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
636 } else {
637 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
638 if (!compressed_llb)
639 goto err_fb;
640
641 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
642 4096, 4096);
643 if (ret)
644 goto err_fb;
645
646 dev_priv->fbc.compressed_llb = compressed_llb;
647
648 I915_WRITE(FBC_CFB_BASE,
649 dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
650 I915_WRITE(FBC_LL_BASE,
651 dev_priv->mm.stolen_base + compressed_llb->start);
652 }
653
654 dev_priv->fbc.uncompressed_size = size;
655
656 DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
657 dev_priv->fbc.compressed_fb.size,
658 dev_priv->fbc.threshold);
659
660 return 0;
661
662 err_fb:
663 kfree(compressed_llb);
664 i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
665 err_llb:
666 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
667 return -ENOSPC;
668 }
669
670 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
671 {
672 if (dev_priv->fbc.uncompressed_size == 0)
673 return;
674
675 i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
676
677 if (dev_priv->fbc.compressed_llb) {
678 i915_gem_stolen_remove_node(dev_priv,
679 dev_priv->fbc.compressed_llb);
680 kfree(dev_priv->fbc.compressed_llb);
681 }
682
683 dev_priv->fbc.uncompressed_size = 0;
684 }
685
686 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
687 {
688 if (!fbc_supported(dev_priv))
689 return;
690
691 mutex_lock(&dev_priv->fbc.lock);
692 __intel_fbc_cleanup_cfb(dev_priv);
693 mutex_unlock(&dev_priv->fbc.lock);
694 }
695
696 static bool stride_is_valid(struct drm_i915_private *dev_priv,
697 unsigned int stride)
698 {
699 /* These should have been caught earlier. */
700 WARN_ON(stride < 512);
701 WARN_ON((stride & (64 - 1)) != 0);
702
703 /* Below are the additional FBC restrictions. */
704
705 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
706 return stride == 4096 || stride == 8192;
707
708 if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
709 return false;
710
711 if (stride > 16384)
712 return false;
713
714 return true;
715 }
716
717 static bool pixel_format_is_valid(struct drm_framebuffer *fb)
718 {
719 struct drm_device *dev = fb->dev;
720 struct drm_i915_private *dev_priv = dev->dev_private;
721
722 switch (fb->pixel_format) {
723 case DRM_FORMAT_XRGB8888:
724 case DRM_FORMAT_XBGR8888:
725 return true;
726 case DRM_FORMAT_XRGB1555:
727 case DRM_FORMAT_RGB565:
728 /* 16bpp not supported on gen2 */
729 if (IS_GEN2(dev))
730 return false;
731 /* WaFbcOnly1to1Ratio:ctg */
732 if (IS_G4X(dev_priv))
733 return false;
734 return true;
735 default:
736 return false;
737 }
738 }
739
740 /*
741 * For some reason, the hardware tracking starts looking at whatever we
742 * programmed as the display plane base address register. It does not look at
743 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
744 * variables instead of just looking at the pipe/plane size.
745 */
746 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
747 {
748 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
749 unsigned int effective_w, effective_h, max_w, max_h;
750
751 if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
752 max_w = 4096;
753 max_h = 4096;
754 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
755 max_w = 4096;
756 max_h = 2048;
757 } else {
758 max_w = 2048;
759 max_h = 1536;
760 }
761
762 intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h);
763 effective_w += crtc->adjusted_x;
764 effective_h += crtc->adjusted_y;
765
766 return effective_w <= max_w && effective_h <= max_h;
767 }
768
769 /**
770 * __intel_fbc_update - activate/deactivate FBC as needed, unlocked
771 * @crtc: the CRTC that triggered the update
772 *
773 * This function completely reevaluates the status of FBC, then activates,
774 * deactivates or maintains it on the same state.
775 */
776 static void __intel_fbc_update(struct intel_crtc *crtc)
777 {
778 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
779 struct drm_framebuffer *fb;
780 struct drm_i915_gem_object *obj;
781 const struct drm_display_mode *adjusted_mode;
782
783 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
784
785 if (!multiple_pipes_ok(dev_priv)) {
786 set_no_fbc_reason(dev_priv, "more than one pipe active");
787 goto out_disable;
788 }
789
790 if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc)
791 return;
792
793 if (!crtc_is_valid(crtc)) {
794 set_no_fbc_reason(dev_priv, "no output");
795 goto out_disable;
796 }
797
798 fb = crtc->base.primary->fb;
799 obj = intel_fb_obj(fb);
800 adjusted_mode = &crtc->config->base.adjusted_mode;
801
802 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
803 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
804 set_no_fbc_reason(dev_priv, "incompatible mode");
805 goto out_disable;
806 }
807
808 if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
809 set_no_fbc_reason(dev_priv, "mode too large for compression");
810 goto out_disable;
811 }
812
813 /* The use of a CPU fence is mandatory in order to detect writes
814 * by the CPU to the scanout and trigger updates to the FBC.
815 */
816 if (obj->tiling_mode != I915_TILING_X ||
817 obj->fence_reg == I915_FENCE_REG_NONE) {
818 set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced");
819 goto out_disable;
820 }
821 if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
822 crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) {
823 set_no_fbc_reason(dev_priv, "rotation unsupported");
824 goto out_disable;
825 }
826
827 if (!stride_is_valid(dev_priv, fb->pitches[0])) {
828 set_no_fbc_reason(dev_priv, "framebuffer stride not supported");
829 goto out_disable;
830 }
831
832 if (!pixel_format_is_valid(fb)) {
833 set_no_fbc_reason(dev_priv, "pixel format is invalid");
834 goto out_disable;
835 }
836
837 /* WaFbcExceedCdClockThreshold:hsw,bdw */
838 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
839 ilk_pipe_pixel_rate(crtc->config) >=
840 dev_priv->cdclk_freq * 95 / 100) {
841 set_no_fbc_reason(dev_priv, "pixel rate is too big");
842 goto out_disable;
843 }
844
845 /* It is possible for the required CFB size change without a
846 * crtc->disable + crtc->enable since it is possible to change the
847 * stride without triggering a full modeset. Since we try to
848 * over-allocate the CFB, there's a chance we may keep FBC enabled even
849 * if this happens, but if we exceed the current CFB size we'll have to
850 * disable FBC. Notice that it would be possible to disable FBC, wait
851 * for a frame, free the stolen node, then try to reenable FBC in case
852 * we didn't get any invalidate/deactivate calls, but this would require
853 * a lot of tracking just for a specific case. If we conclude it's an
854 * important case, we can implement it later. */
855 if (intel_fbc_calculate_cfb_size(crtc, fb) >
856 dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) {
857 set_no_fbc_reason(dev_priv, "CFB requirements changed");
858 goto out_disable;
859 }
860
861 /* If the scanout has not changed, don't modify the FBC settings.
862 * Note that we make the fundamental assumption that the fb->obj
863 * cannot be unpinned (and have its GTT offset and fence revoked)
864 * without first being decoupled from the scanout and FBC disabled.
865 */
866 if (dev_priv->fbc.crtc == crtc &&
867 dev_priv->fbc.fb_id == fb->base.id &&
868 dev_priv->fbc.y == crtc->base.y &&
869 dev_priv->fbc.active)
870 return;
871
872 if (intel_fbc_is_active(dev_priv)) {
873 /* We update FBC along two paths, after changing fb/crtc
874 * configuration (modeswitching) and after page-flipping
875 * finishes. For the latter, we know that not only did
876 * we disable the FBC at the start of the page-flip
877 * sequence, but also more than one vblank has passed.
878 *
879 * For the former case of modeswitching, it is possible
880 * to switch between two FBC valid configurations
881 * instantaneously so we do need to disable the FBC
882 * before we can modify its control registers. We also
883 * have to wait for the next vblank for that to take
884 * effect. However, since we delay enabling FBC we can
885 * assume that a vblank has passed since disabling and
886 * that we can safely alter the registers in the deferred
887 * callback.
888 *
889 * In the scenario that we go from a valid to invalid
890 * and then back to valid FBC configuration we have
891 * no strict enforcement that a vblank occurred since
892 * disabling the FBC. However, along all current pipe
893 * disabling paths we do need to wait for a vblank at
894 * some point. And we wait before enabling FBC anyway.
895 */
896 DRM_DEBUG_KMS("deactivating FBC for update\n");
897 __intel_fbc_deactivate(dev_priv);
898 }
899
900 intel_fbc_schedule_activation(crtc);
901 dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
902 return;
903
904 out_disable:
905 /* Multiple disables should be harmless */
906 if (intel_fbc_is_active(dev_priv)) {
907 DRM_DEBUG_KMS("unsupported config, deactivating FBC\n");
908 __intel_fbc_deactivate(dev_priv);
909 }
910 }
911
912 /*
913 * intel_fbc_update - activate/deactivate FBC as needed
914 * @crtc: the CRTC that triggered the update
915 *
916 * This function reevaluates the overall state and activates or deactivates FBC.
917 */
918 void intel_fbc_update(struct intel_crtc *crtc)
919 {
920 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
921
922 if (!fbc_supported(dev_priv))
923 return;
924
925 mutex_lock(&dev_priv->fbc.lock);
926 __intel_fbc_update(crtc);
927 mutex_unlock(&dev_priv->fbc.lock);
928 }
929
930 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
931 unsigned int frontbuffer_bits,
932 enum fb_op_origin origin)
933 {
934 unsigned int fbc_bits;
935
936 if (!fbc_supported(dev_priv))
937 return;
938
939 if (origin == ORIGIN_GTT)
940 return;
941
942 mutex_lock(&dev_priv->fbc.lock);
943
944 if (dev_priv->fbc.enabled)
945 fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
946 else
947 fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
948
949 dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
950
951 if (dev_priv->fbc.busy_bits)
952 __intel_fbc_deactivate(dev_priv);
953
954 mutex_unlock(&dev_priv->fbc.lock);
955 }
956
957 void intel_fbc_flush(struct drm_i915_private *dev_priv,
958 unsigned int frontbuffer_bits, enum fb_op_origin origin)
959 {
960 if (!fbc_supported(dev_priv))
961 return;
962
963 if (origin == ORIGIN_GTT)
964 return;
965
966 mutex_lock(&dev_priv->fbc.lock);
967
968 dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
969
970 if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) {
971 __intel_fbc_deactivate(dev_priv);
972 __intel_fbc_update(dev_priv->fbc.crtc);
973 }
974
975 mutex_unlock(&dev_priv->fbc.lock);
976 }
977
978 /**
979 * intel_fbc_enable: tries to enable FBC on the CRTC
980 * @crtc: the CRTC
981 *
982 * This function checks if it's possible to enable FBC on the following CRTC,
983 * then enables it. Notice that it doesn't activate FBC.
984 */
985 void intel_fbc_enable(struct intel_crtc *crtc)
986 {
987 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
988
989 if (!fbc_supported(dev_priv))
990 return;
991
992 mutex_lock(&dev_priv->fbc.lock);
993
994 if (dev_priv->fbc.enabled) {
995 WARN_ON(dev_priv->fbc.crtc == crtc);
996 goto out;
997 }
998
999 WARN_ON(dev_priv->fbc.active);
1000 WARN_ON(dev_priv->fbc.crtc != NULL);
1001
1002 if (intel_vgpu_active(dev_priv->dev)) {
1003 set_no_fbc_reason(dev_priv, "VGPU is active");
1004 goto out;
1005 }
1006
1007 if (i915.enable_fbc < 0) {
1008 set_no_fbc_reason(dev_priv, "disabled per chip default");
1009 goto out;
1010 }
1011
1012 if (!i915.enable_fbc) {
1013 set_no_fbc_reason(dev_priv, "disabled per module param");
1014 goto out;
1015 }
1016
1017 if (!crtc_can_fbc(crtc)) {
1018 set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC");
1019 goto out;
1020 }
1021
1022 if (intel_fbc_alloc_cfb(crtc)) {
1023 set_no_fbc_reason(dev_priv, "not enough stolen memory");
1024 goto out;
1025 }
1026
1027 DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
1028 dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n";
1029
1030 dev_priv->fbc.enabled = true;
1031 dev_priv->fbc.crtc = crtc;
1032 out:
1033 mutex_unlock(&dev_priv->fbc.lock);
1034 }
1035
1036 /**
1037 * __intel_fbc_disable - disable FBC
1038 * @dev_priv: i915 device instance
1039 *
1040 * This is the low level function that actually disables FBC. Callers should
1041 * grab the FBC lock.
1042 */
1043 static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
1044 {
1045 struct intel_crtc *crtc = dev_priv->fbc.crtc;
1046
1047 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
1048 WARN_ON(!dev_priv->fbc.enabled);
1049 WARN_ON(dev_priv->fbc.active);
1050 assert_pipe_disabled(dev_priv, crtc->pipe);
1051
1052 DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
1053
1054 __intel_fbc_cleanup_cfb(dev_priv);
1055
1056 dev_priv->fbc.enabled = false;
1057 dev_priv->fbc.crtc = NULL;
1058 }
1059
1060 /**
1061 * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
1062 * @crtc: the CRTC
1063 *
1064 * This function disables FBC if it's associated with the provided CRTC.
1065 */
1066 void intel_fbc_disable_crtc(struct intel_crtc *crtc)
1067 {
1068 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1069
1070 if (!fbc_supported(dev_priv))
1071 return;
1072
1073 mutex_lock(&dev_priv->fbc.lock);
1074 if (dev_priv->fbc.crtc == crtc) {
1075 WARN_ON(!dev_priv->fbc.enabled);
1076 WARN_ON(dev_priv->fbc.active);
1077 __intel_fbc_disable(dev_priv);
1078 }
1079 mutex_unlock(&dev_priv->fbc.lock);
1080 }
1081
1082 /**
1083 * intel_fbc_disable - globally disable FBC
1084 * @dev_priv: i915 device instance
1085 *
1086 * This function disables FBC regardless of which CRTC is associated with it.
1087 */
1088 void intel_fbc_disable(struct drm_i915_private *dev_priv)
1089 {
1090 if (!fbc_supported(dev_priv))
1091 return;
1092
1093 mutex_lock(&dev_priv->fbc.lock);
1094 if (dev_priv->fbc.enabled)
1095 __intel_fbc_disable(dev_priv);
1096 mutex_unlock(&dev_priv->fbc.lock);
1097 }
1098
1099 /**
1100 * intel_fbc_init - Initialize FBC
1101 * @dev_priv: the i915 device
1102 *
1103 * This function might be called during PM init process.
1104 */
1105 void intel_fbc_init(struct drm_i915_private *dev_priv)
1106 {
1107 enum pipe pipe;
1108
1109 mutex_init(&dev_priv->fbc.lock);
1110 dev_priv->fbc.enabled = false;
1111 dev_priv->fbc.active = false;
1112
1113 if (!HAS_FBC(dev_priv)) {
1114 dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
1115 return;
1116 }
1117
1118 for_each_pipe(dev_priv, pipe) {
1119 dev_priv->fbc.possible_framebuffer_bits |=
1120 INTEL_FRONTBUFFER_PRIMARY(pipe);
1121
1122 if (fbc_on_pipe_a_only(dev_priv))
1123 break;
1124 }
1125
1126 if (INTEL_INFO(dev_priv)->gen >= 7) {
1127 dev_priv->fbc.is_active = ilk_fbc_is_active;
1128 dev_priv->fbc.activate = gen7_fbc_activate;
1129 dev_priv->fbc.deactivate = ilk_fbc_deactivate;
1130 } else if (INTEL_INFO(dev_priv)->gen >= 5) {
1131 dev_priv->fbc.is_active = ilk_fbc_is_active;
1132 dev_priv->fbc.activate = ilk_fbc_activate;
1133 dev_priv->fbc.deactivate = ilk_fbc_deactivate;
1134 } else if (IS_GM45(dev_priv)) {
1135 dev_priv->fbc.is_active = g4x_fbc_is_active;
1136 dev_priv->fbc.activate = g4x_fbc_activate;
1137 dev_priv->fbc.deactivate = g4x_fbc_deactivate;
1138 } else {
1139 dev_priv->fbc.is_active = i8xx_fbc_is_active;
1140 dev_priv->fbc.activate = i8xx_fbc_activate;
1141 dev_priv->fbc.deactivate = i8xx_fbc_deactivate;
1142
1143 /* This value was pulled out of someone's hat */
1144 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
1145 }
1146
1147 /* We still don't have any sort of hardware state readout for FBC, so
1148 * deactivate it in case the BIOS activated it to make sure software
1149 * matches the hardware state. */
1150 if (dev_priv->fbc.is_active(dev_priv))
1151 dev_priv->fbc.deactivate(dev_priv);
1152 }
This page took 0.067404 seconds and 4 git commands to generate.