2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
32 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
33 * framebuffer contents in-memory, aiming at reducing the required bandwidth
34 * during in-memory transfers and, therefore, reduce the power packet.
36 * The benefits of FBC are mostly visible with solid backgrounds and
37 * variation-less patterns.
39 * FBC-related functionality can be enabled by the means of the
40 * i915.i915_enable_fbc parameter
43 void i8xx_disable_fbc(struct drm_device
*dev
)
45 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
48 /* Disable compression */
49 fbc_ctl
= I915_READ(FBC_CONTROL
);
50 if ((fbc_ctl
& FBC_CTL_EN
) == 0)
53 fbc_ctl
&= ~FBC_CTL_EN
;
54 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
56 /* Wait for compressing bit to clear */
57 if (wait_for((I915_READ(FBC_STATUS
) & FBC_STAT_COMPRESSING
) == 0, 10)) {
58 DRM_DEBUG_KMS("FBC idle timed out\n");
62 DRM_DEBUG_KMS("disabled FBC\n");
65 void i8xx_enable_fbc(struct drm_crtc
*crtc
, unsigned long interval
)
67 struct drm_device
*dev
= crtc
->dev
;
68 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
69 struct drm_framebuffer
*fb
= crtc
->fb
;
70 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
71 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
72 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
75 u32 fbc_ctl
, fbc_ctl2
;
77 cfb_pitch
= dev_priv
->cfb_size
/ FBC_LL_SIZE
;
78 if (fb
->pitches
[0] < cfb_pitch
)
79 cfb_pitch
= fb
->pitches
[0];
81 /* FBC_CTL wants 64B units */
82 cfb_pitch
= (cfb_pitch
/ 64) - 1;
83 plane
= intel_crtc
->plane
== 0 ? FBC_CTL_PLANEA
: FBC_CTL_PLANEB
;
86 for (i
= 0; i
< (FBC_LL_SIZE
/ 32) + 1; i
++)
87 I915_WRITE(FBC_TAG
+ (i
* 4), 0);
90 fbc_ctl2
= FBC_CTL_FENCE_DBL
| FBC_CTL_IDLE_IMM
| FBC_CTL_CPU_FENCE
;
92 I915_WRITE(FBC_CONTROL2
, fbc_ctl2
);
93 I915_WRITE(FBC_FENCE_OFF
, crtc
->y
);
96 fbc_ctl
= FBC_CTL_EN
| FBC_CTL_PERIODIC
;
98 fbc_ctl
|= FBC_CTL_C3_IDLE
; /* 945 needs special SR handling */
99 fbc_ctl
|= (cfb_pitch
& 0xff) << FBC_CTL_STRIDE_SHIFT
;
100 fbc_ctl
|= (interval
& 0x2fff) << FBC_CTL_INTERVAL_SHIFT
;
101 fbc_ctl
|= obj
->fence_reg
;
102 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
104 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
105 cfb_pitch
, crtc
->y
, intel_crtc
->plane
);
108 bool i8xx_fbc_enabled(struct drm_device
*dev
)
110 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
112 return I915_READ(FBC_CONTROL
) & FBC_CTL_EN
;
115 void g4x_enable_fbc(struct drm_crtc
*crtc
, unsigned long interval
)
117 struct drm_device
*dev
= crtc
->dev
;
118 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
119 struct drm_framebuffer
*fb
= crtc
->fb
;
120 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
121 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
122 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
123 int plane
= intel_crtc
->plane
== 0 ? DPFC_CTL_PLANEA
: DPFC_CTL_PLANEB
;
124 unsigned long stall_watermark
= 200;
127 dpfc_ctl
= plane
| DPFC_SR_EN
| DPFC_CTL_LIMIT_1X
;
128 dpfc_ctl
|= DPFC_CTL_FENCE_EN
| obj
->fence_reg
;
129 I915_WRITE(DPFC_CHICKEN
, DPFC_HT_MODIFY
);
131 I915_WRITE(DPFC_RECOMP_CTL
, DPFC_RECOMP_STALL_EN
|
132 (stall_watermark
<< DPFC_RECOMP_STALL_WM_SHIFT
) |
133 (interval
<< DPFC_RECOMP_TIMER_COUNT_SHIFT
));
134 I915_WRITE(DPFC_FENCE_YOFF
, crtc
->y
);
137 I915_WRITE(DPFC_CONTROL
, I915_READ(DPFC_CONTROL
) | DPFC_CTL_EN
);
139 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc
->plane
);
142 void g4x_disable_fbc(struct drm_device
*dev
)
144 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
147 /* Disable compression */
148 dpfc_ctl
= I915_READ(DPFC_CONTROL
);
149 if (dpfc_ctl
& DPFC_CTL_EN
) {
150 dpfc_ctl
&= ~DPFC_CTL_EN
;
151 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
);
153 DRM_DEBUG_KMS("disabled FBC\n");
157 bool g4x_fbc_enabled(struct drm_device
*dev
)
159 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
161 return I915_READ(DPFC_CONTROL
) & DPFC_CTL_EN
;
164 static void sandybridge_blit_fbc_update(struct drm_device
*dev
)
166 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
169 /* Make sure blitter notifies FBC of writes */
170 gen6_gt_force_wake_get(dev_priv
);
171 blt_ecoskpd
= I915_READ(GEN6_BLITTER_ECOSKPD
);
172 blt_ecoskpd
|= GEN6_BLITTER_FBC_NOTIFY
<<
173 GEN6_BLITTER_LOCK_SHIFT
;
174 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
175 blt_ecoskpd
|= GEN6_BLITTER_FBC_NOTIFY
;
176 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
177 blt_ecoskpd
&= ~(GEN6_BLITTER_FBC_NOTIFY
<<
178 GEN6_BLITTER_LOCK_SHIFT
);
179 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
180 POSTING_READ(GEN6_BLITTER_ECOSKPD
);
181 gen6_gt_force_wake_put(dev_priv
);
184 void ironlake_enable_fbc(struct drm_crtc
*crtc
, unsigned long interval
)
186 struct drm_device
*dev
= crtc
->dev
;
187 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
188 struct drm_framebuffer
*fb
= crtc
->fb
;
189 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
190 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
191 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
192 int plane
= intel_crtc
->plane
== 0 ? DPFC_CTL_PLANEA
: DPFC_CTL_PLANEB
;
193 unsigned long stall_watermark
= 200;
196 dpfc_ctl
= I915_READ(ILK_DPFC_CONTROL
);
197 dpfc_ctl
&= DPFC_RESERVED
;
198 dpfc_ctl
|= (plane
| DPFC_CTL_LIMIT_1X
);
199 /* Set persistent mode for front-buffer rendering, ala X. */
200 dpfc_ctl
|= DPFC_CTL_PERSISTENT_MODE
;
201 dpfc_ctl
|= (DPFC_CTL_FENCE_EN
| obj
->fence_reg
);
202 I915_WRITE(ILK_DPFC_CHICKEN
, DPFC_HT_MODIFY
);
204 I915_WRITE(ILK_DPFC_RECOMP_CTL
, DPFC_RECOMP_STALL_EN
|
205 (stall_watermark
<< DPFC_RECOMP_STALL_WM_SHIFT
) |
206 (interval
<< DPFC_RECOMP_TIMER_COUNT_SHIFT
));
207 I915_WRITE(ILK_DPFC_FENCE_YOFF
, crtc
->y
);
208 I915_WRITE(ILK_FBC_RT_BASE
, obj
->gtt_offset
| ILK_FBC_RT_VALID
);
210 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
213 I915_WRITE(SNB_DPFC_CTL_SA
,
214 SNB_CPU_FENCE_ENABLE
| obj
->fence_reg
);
215 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, crtc
->y
);
216 sandybridge_blit_fbc_update(dev
);
219 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc
->plane
);
222 void ironlake_disable_fbc(struct drm_device
*dev
)
224 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
227 /* Disable compression */
228 dpfc_ctl
= I915_READ(ILK_DPFC_CONTROL
);
229 if (dpfc_ctl
& DPFC_CTL_EN
) {
230 dpfc_ctl
&= ~DPFC_CTL_EN
;
231 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
);
233 DRM_DEBUG_KMS("disabled FBC\n");
237 bool ironlake_fbc_enabled(struct drm_device
*dev
)
239 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
241 return I915_READ(ILK_DPFC_CONTROL
) & DPFC_CTL_EN
;
244 bool intel_fbc_enabled(struct drm_device
*dev
)
246 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
248 if (!dev_priv
->display
.fbc_enabled
)
251 return dev_priv
->display
.fbc_enabled(dev
);
254 static void intel_fbc_work_fn(struct work_struct
*__work
)
256 struct intel_fbc_work
*work
=
257 container_of(to_delayed_work(__work
),
258 struct intel_fbc_work
, work
);
259 struct drm_device
*dev
= work
->crtc
->dev
;
260 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
262 mutex_lock(&dev
->struct_mutex
);
263 if (work
== dev_priv
->fbc_work
) {
264 /* Double check that we haven't switched fb without cancelling
267 if (work
->crtc
->fb
== work
->fb
) {
268 dev_priv
->display
.enable_fbc(work
->crtc
,
271 dev_priv
->cfb_plane
= to_intel_crtc(work
->crtc
)->plane
;
272 dev_priv
->cfb_fb
= work
->crtc
->fb
->base
.id
;
273 dev_priv
->cfb_y
= work
->crtc
->y
;
276 dev_priv
->fbc_work
= NULL
;
278 mutex_unlock(&dev
->struct_mutex
);
283 static void intel_cancel_fbc_work(struct drm_i915_private
*dev_priv
)
285 if (dev_priv
->fbc_work
== NULL
)
288 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
290 /* Synchronisation is provided by struct_mutex and checking of
291 * dev_priv->fbc_work, so we can perform the cancellation
292 * entirely asynchronously.
294 if (cancel_delayed_work(&dev_priv
->fbc_work
->work
))
295 /* tasklet was killed before being run, clean up */
296 kfree(dev_priv
->fbc_work
);
298 /* Mark the work as no longer wanted so that if it does
299 * wake-up (because the work was already running and waiting
300 * for our mutex), it will discover that is no longer
303 dev_priv
->fbc_work
= NULL
;
306 void intel_enable_fbc(struct drm_crtc
*crtc
, unsigned long interval
)
308 struct intel_fbc_work
*work
;
309 struct drm_device
*dev
= crtc
->dev
;
310 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
312 if (!dev_priv
->display
.enable_fbc
)
315 intel_cancel_fbc_work(dev_priv
);
317 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
319 dev_priv
->display
.enable_fbc(crtc
, interval
);
325 work
->interval
= interval
;
326 INIT_DELAYED_WORK(&work
->work
, intel_fbc_work_fn
);
328 dev_priv
->fbc_work
= work
;
330 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
332 /* Delay the actual enabling to let pageflipping cease and the
333 * display to settle before starting the compression. Note that
334 * this delay also serves a second purpose: it allows for a
335 * vblank to pass after disabling the FBC before we attempt
336 * to modify the control registers.
338 * A more complicated solution would involve tracking vblanks
339 * following the termination of the page-flipping sequence
340 * and indeed performing the enable as a co-routine and not
341 * waiting synchronously upon the vblank.
343 schedule_delayed_work(&work
->work
, msecs_to_jiffies(50));
346 void intel_disable_fbc(struct drm_device
*dev
)
348 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
350 intel_cancel_fbc_work(dev_priv
);
352 if (!dev_priv
->display
.disable_fbc
)
355 dev_priv
->display
.disable_fbc(dev
);
356 dev_priv
->cfb_plane
= -1;
360 * intel_update_fbc - enable/disable FBC as needed
361 * @dev: the drm_device
363 * Set up the framebuffer compression hardware at mode set time. We
364 * enable it if possible:
365 * - plane A only (on pre-965)
366 * - no pixel mulitply/line duplication
367 * - no alpha buffer discard
369 * - framebuffer <= 2048 in width, 1536 in height
371 * We can't assume that any compression will take place (worst case),
372 * so the compressed buffer has to be the same size as the uncompressed
373 * one. It also must reside (along with the line length buffer) in
376 * We need to enable/disable FBC on a global basis.
378 void intel_update_fbc(struct drm_device
*dev
)
380 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
381 struct drm_crtc
*crtc
= NULL
, *tmp_crtc
;
382 struct intel_crtc
*intel_crtc
;
383 struct drm_framebuffer
*fb
;
384 struct intel_framebuffer
*intel_fb
;
385 struct drm_i915_gem_object
*obj
;
393 if (!I915_HAS_FBC(dev
))
397 * If FBC is already on, we just have to verify that we can
398 * keep it that way...
399 * Need to disable if:
400 * - more than one pipe is active
401 * - changing FBC params (stride, fence, mode)
402 * - new fb is too large to fit in compressed buffer
403 * - going to an unsupported config (interlace, pixel multiply, etc.)
405 list_for_each_entry(tmp_crtc
, &dev
->mode_config
.crtc_list
, head
) {
406 if (tmp_crtc
->enabled
&& tmp_crtc
->fb
) {
408 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
409 dev_priv
->no_fbc_reason
= FBC_MULTIPLE_PIPES
;
416 if (!crtc
|| crtc
->fb
== NULL
) {
417 DRM_DEBUG_KMS("no output, disabling\n");
418 dev_priv
->no_fbc_reason
= FBC_NO_OUTPUT
;
422 intel_crtc
= to_intel_crtc(crtc
);
424 intel_fb
= to_intel_framebuffer(fb
);
427 enable_fbc
= i915_enable_fbc
;
428 if (enable_fbc
< 0) {
429 DRM_DEBUG_KMS("fbc set to per-chip default\n");
431 if (INTEL_INFO(dev
)->gen
<= 6)
435 DRM_DEBUG_KMS("fbc disabled per module param\n");
436 dev_priv
->no_fbc_reason
= FBC_MODULE_PARAM
;
439 if (intel_fb
->obj
->base
.size
> dev_priv
->cfb_size
) {
440 DRM_DEBUG_KMS("framebuffer too large, disabling "
442 dev_priv
->no_fbc_reason
= FBC_STOLEN_TOO_SMALL
;
445 if ((crtc
->mode
.flags
& DRM_MODE_FLAG_INTERLACE
) ||
446 (crtc
->mode
.flags
& DRM_MODE_FLAG_DBLSCAN
)) {
447 DRM_DEBUG_KMS("mode incompatible with compression, "
449 dev_priv
->no_fbc_reason
= FBC_UNSUPPORTED_MODE
;
452 if ((crtc
->mode
.hdisplay
> 2048) ||
453 (crtc
->mode
.vdisplay
> 1536)) {
454 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
455 dev_priv
->no_fbc_reason
= FBC_MODE_TOO_LARGE
;
458 if ((IS_I915GM(dev
) || IS_I945GM(dev
)) && intel_crtc
->plane
!= 0) {
459 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
460 dev_priv
->no_fbc_reason
= FBC_BAD_PLANE
;
464 /* The use of a CPU fence is mandatory in order to detect writes
465 * by the CPU to the scanout and trigger updates to the FBC.
467 if (obj
->tiling_mode
!= I915_TILING_X
||
468 obj
->fence_reg
== I915_FENCE_REG_NONE
) {
469 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
470 dev_priv
->no_fbc_reason
= FBC_NOT_TILED
;
474 /* If the kernel debugger is active, always disable compression */
478 /* If the scanout has not changed, don't modify the FBC settings.
479 * Note that we make the fundamental assumption that the fb->obj
480 * cannot be unpinned (and have its GTT offset and fence revoked)
481 * without first being decoupled from the scanout and FBC disabled.
483 if (dev_priv
->cfb_plane
== intel_crtc
->plane
&&
484 dev_priv
->cfb_fb
== fb
->base
.id
&&
485 dev_priv
->cfb_y
== crtc
->y
)
488 if (intel_fbc_enabled(dev
)) {
489 /* We update FBC along two paths, after changing fb/crtc
490 * configuration (modeswitching) and after page-flipping
491 * finishes. For the latter, we know that not only did
492 * we disable the FBC at the start of the page-flip
493 * sequence, but also more than one vblank has passed.
495 * For the former case of modeswitching, it is possible
496 * to switch between two FBC valid configurations
497 * instantaneously so we do need to disable the FBC
498 * before we can modify its control registers. We also
499 * have to wait for the next vblank for that to take
500 * effect. However, since we delay enabling FBC we can
501 * assume that a vblank has passed since disabling and
502 * that we can safely alter the registers in the deferred
505 * In the scenario that we go from a valid to invalid
506 * and then back to valid FBC configuration we have
507 * no strict enforcement that a vblank occurred since
508 * disabling the FBC. However, along all current pipe
509 * disabling paths we do need to wait for a vblank at
510 * some point. And we wait before enabling FBC anyway.
512 DRM_DEBUG_KMS("disabling active FBC for update\n");
513 intel_disable_fbc(dev
);
516 intel_enable_fbc(crtc
, 500);
520 /* Multiple disables should be harmless */
521 if (intel_fbc_enabled(dev
)) {
522 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
523 intel_disable_fbc(dev
);
527 static const struct cxsr_latency cxsr_latency_table
[] = {
528 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
529 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
530 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
531 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
532 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
534 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
535 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
536 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
537 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
538 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
540 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
541 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
542 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
543 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
544 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
546 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
547 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
548 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
549 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
550 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
552 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
553 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
554 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
555 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
556 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
558 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
559 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
560 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
561 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
562 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
565 const struct cxsr_latency
*intel_get_cxsr_latency(int is_desktop
,
570 const struct cxsr_latency
*latency
;
573 if (fsb
== 0 || mem
== 0)
576 for (i
= 0; i
< ARRAY_SIZE(cxsr_latency_table
); i
++) {
577 latency
= &cxsr_latency_table
[i
];
578 if (is_desktop
== latency
->is_desktop
&&
579 is_ddr3
== latency
->is_ddr3
&&
580 fsb
== latency
->fsb_freq
&& mem
== latency
->mem_freq
)
584 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
589 void pineview_disable_cxsr(struct drm_device
*dev
)
591 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
593 /* deactivate cxsr */
594 I915_WRITE(DSPFW3
, I915_READ(DSPFW3
) & ~PINEVIEW_SELF_REFRESH_EN
);
598 * Latency for FIFO fetches is dependent on several factors:
599 * - memory configuration (speed, channels)
601 * - current MCH state
602 * It can be fairly high in some situations, so here we assume a fairly
603 * pessimal value. It's a tradeoff between extra memory fetches (if we
604 * set this value too high, the FIFO will fetch frequently to stay full)
605 * and power consumption (set it too low to save power and we might see
606 * FIFO underruns and display "flicker").
608 * A value of 5us seems to be a good balance; safe for very low end
609 * platforms but not overly aggressive on lower latency configs.
611 static const int latency_ns
= 5000;
613 int i9xx_get_fifo_size(struct drm_device
*dev
, int plane
)
615 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
616 uint32_t dsparb
= I915_READ(DSPARB
);
619 size
= dsparb
& 0x7f;
621 size
= ((dsparb
>> DSPARB_CSTART_SHIFT
) & 0x7f) - size
;
623 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
624 plane
? "B" : "A", size
);
629 int i85x_get_fifo_size(struct drm_device
*dev
, int plane
)
631 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
632 uint32_t dsparb
= I915_READ(DSPARB
);
635 size
= dsparb
& 0x1ff;
637 size
= ((dsparb
>> DSPARB_BEND_SHIFT
) & 0x1ff) - size
;
638 size
>>= 1; /* Convert to cachelines */
640 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
641 plane
? "B" : "A", size
);
646 int i845_get_fifo_size(struct drm_device
*dev
, int plane
)
648 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
649 uint32_t dsparb
= I915_READ(DSPARB
);
652 size
= dsparb
& 0x7f;
653 size
>>= 2; /* Convert to cachelines */
655 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
662 int i830_get_fifo_size(struct drm_device
*dev
, int plane
)
664 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
665 uint32_t dsparb
= I915_READ(DSPARB
);
668 size
= dsparb
& 0x7f;
669 size
>>= 1; /* Convert to cachelines */
671 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
672 plane
? "B" : "A", size
);
677 /* Pineview has different values for various configs */
678 static const struct intel_watermark_params pineview_display_wm
= {
679 PINEVIEW_DISPLAY_FIFO
,
683 PINEVIEW_FIFO_LINE_SIZE
685 static const struct intel_watermark_params pineview_display_hplloff_wm
= {
686 PINEVIEW_DISPLAY_FIFO
,
688 PINEVIEW_DFT_HPLLOFF_WM
,
690 PINEVIEW_FIFO_LINE_SIZE
692 static const struct intel_watermark_params pineview_cursor_wm
= {
693 PINEVIEW_CURSOR_FIFO
,
694 PINEVIEW_CURSOR_MAX_WM
,
695 PINEVIEW_CURSOR_DFT_WM
,
696 PINEVIEW_CURSOR_GUARD_WM
,
697 PINEVIEW_FIFO_LINE_SIZE
,
699 static const struct intel_watermark_params pineview_cursor_hplloff_wm
= {
700 PINEVIEW_CURSOR_FIFO
,
701 PINEVIEW_CURSOR_MAX_WM
,
702 PINEVIEW_CURSOR_DFT_WM
,
703 PINEVIEW_CURSOR_GUARD_WM
,
704 PINEVIEW_FIFO_LINE_SIZE
706 static const struct intel_watermark_params g4x_wm_info
= {
713 static const struct intel_watermark_params g4x_cursor_wm_info
= {
720 static const struct intel_watermark_params valleyview_wm_info
= {
721 VALLEYVIEW_FIFO_SIZE
,
727 static const struct intel_watermark_params valleyview_cursor_wm_info
= {
729 VALLEYVIEW_CURSOR_MAX_WM
,
734 static const struct intel_watermark_params i965_cursor_wm_info
= {
741 static const struct intel_watermark_params i945_wm_info
= {
748 static const struct intel_watermark_params i915_wm_info
= {
755 static const struct intel_watermark_params i855_wm_info
= {
762 static const struct intel_watermark_params i830_wm_info
= {
770 static const struct intel_watermark_params ironlake_display_wm_info
= {
777 static const struct intel_watermark_params ironlake_cursor_wm_info
= {
784 static const struct intel_watermark_params ironlake_display_srwm_info
= {
786 ILK_DISPLAY_MAX_SRWM
,
787 ILK_DISPLAY_DFT_SRWM
,
791 static const struct intel_watermark_params ironlake_cursor_srwm_info
= {
799 static const struct intel_watermark_params sandybridge_display_wm_info
= {
806 static const struct intel_watermark_params sandybridge_cursor_wm_info
= {
813 static const struct intel_watermark_params sandybridge_display_srwm_info
= {
815 SNB_DISPLAY_MAX_SRWM
,
816 SNB_DISPLAY_DFT_SRWM
,
820 static const struct intel_watermark_params sandybridge_cursor_srwm_info
= {
830 * intel_calculate_wm - calculate watermark level
831 * @clock_in_khz: pixel clock
832 * @wm: chip FIFO params
833 * @pixel_size: display pixel size
834 * @latency_ns: memory latency for the platform
836 * Calculate the watermark level (the level at which the display plane will
837 * start fetching from memory again). Each chip has a different display
838 * FIFO size and allocation, so the caller needs to figure that out and pass
839 * in the correct intel_watermark_params structure.
841 * As the pixel clock runs, the FIFO will be drained at a rate that depends
842 * on the pixel size. When it reaches the watermark level, it'll start
843 * fetching FIFO line sized based chunks from memory until the FIFO fills
844 * past the watermark point. If the FIFO drains completely, a FIFO underrun
845 * will occur, and a display engine hang could result.
847 static unsigned long intel_calculate_wm(unsigned long clock_in_khz
,
848 const struct intel_watermark_params
*wm
,
851 unsigned long latency_ns
)
853 long entries_required
, wm_size
;
856 * Note: we need to make sure we don't overflow for various clock &
858 * clocks go from a few thousand to several hundred thousand.
859 * latency is usually a few thousand
861 entries_required
= ((clock_in_khz
/ 1000) * pixel_size
* latency_ns
) /
863 entries_required
= DIV_ROUND_UP(entries_required
, wm
->cacheline_size
);
865 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required
);
867 wm_size
= fifo_size
- (entries_required
+ wm
->guard_size
);
869 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size
);
871 /* Don't promote wm_size to unsigned... */
872 if (wm_size
> (long)wm
->max_wm
)
873 wm_size
= wm
->max_wm
;
875 wm_size
= wm
->default_wm
;
879 static struct drm_crtc
*single_enabled_crtc(struct drm_device
*dev
)
881 struct drm_crtc
*crtc
, *enabled
= NULL
;
883 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
884 if (crtc
->enabled
&& crtc
->fb
) {
894 void pineview_update_wm(struct drm_device
*dev
)
896 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
897 struct drm_crtc
*crtc
;
898 const struct cxsr_latency
*latency
;
902 latency
= intel_get_cxsr_latency(IS_PINEVIEW_G(dev
), dev_priv
->is_ddr3
,
903 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
905 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
906 pineview_disable_cxsr(dev
);
910 crtc
= single_enabled_crtc(dev
);
912 int clock
= crtc
->mode
.clock
;
913 int pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
916 wm
= intel_calculate_wm(clock
, &pineview_display_wm
,
917 pineview_display_wm
.fifo_size
,
918 pixel_size
, latency
->display_sr
);
919 reg
= I915_READ(DSPFW1
);
920 reg
&= ~DSPFW_SR_MASK
;
921 reg
|= wm
<< DSPFW_SR_SHIFT
;
922 I915_WRITE(DSPFW1
, reg
);
923 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg
);
926 wm
= intel_calculate_wm(clock
, &pineview_cursor_wm
,
927 pineview_display_wm
.fifo_size
,
928 pixel_size
, latency
->cursor_sr
);
929 reg
= I915_READ(DSPFW3
);
930 reg
&= ~DSPFW_CURSOR_SR_MASK
;
931 reg
|= (wm
& 0x3f) << DSPFW_CURSOR_SR_SHIFT
;
932 I915_WRITE(DSPFW3
, reg
);
934 /* Display HPLL off SR */
935 wm
= intel_calculate_wm(clock
, &pineview_display_hplloff_wm
,
936 pineview_display_hplloff_wm
.fifo_size
,
937 pixel_size
, latency
->display_hpll_disable
);
938 reg
= I915_READ(DSPFW3
);
939 reg
&= ~DSPFW_HPLL_SR_MASK
;
940 reg
|= wm
& DSPFW_HPLL_SR_MASK
;
941 I915_WRITE(DSPFW3
, reg
);
943 /* cursor HPLL off SR */
944 wm
= intel_calculate_wm(clock
, &pineview_cursor_hplloff_wm
,
945 pineview_display_hplloff_wm
.fifo_size
,
946 pixel_size
, latency
->cursor_hpll_disable
);
947 reg
= I915_READ(DSPFW3
);
948 reg
&= ~DSPFW_HPLL_CURSOR_MASK
;
949 reg
|= (wm
& 0x3f) << DSPFW_HPLL_CURSOR_SHIFT
;
950 I915_WRITE(DSPFW3
, reg
);
951 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg
);
955 I915_READ(DSPFW3
) | PINEVIEW_SELF_REFRESH_EN
);
956 DRM_DEBUG_KMS("Self-refresh is enabled\n");
958 pineview_disable_cxsr(dev
);
959 DRM_DEBUG_KMS("Self-refresh is disabled\n");
963 static bool g4x_compute_wm0(struct drm_device
*dev
,
965 const struct intel_watermark_params
*display
,
966 int display_latency_ns
,
967 const struct intel_watermark_params
*cursor
,
968 int cursor_latency_ns
,
972 struct drm_crtc
*crtc
;
973 int htotal
, hdisplay
, clock
, pixel_size
;
974 int line_time_us
, line_count
;
975 int entries
, tlb_miss
;
977 crtc
= intel_get_crtc_for_plane(dev
, plane
);
978 if (crtc
->fb
== NULL
|| !crtc
->enabled
) {
979 *cursor_wm
= cursor
->guard_size
;
980 *plane_wm
= display
->guard_size
;
984 htotal
= crtc
->mode
.htotal
;
985 hdisplay
= crtc
->mode
.hdisplay
;
986 clock
= crtc
->mode
.clock
;
987 pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
989 /* Use the small buffer method to calculate plane watermark */
990 entries
= ((clock
* pixel_size
/ 1000) * display_latency_ns
) / 1000;
991 tlb_miss
= display
->fifo_size
*display
->cacheline_size
- hdisplay
* 8;
994 entries
= DIV_ROUND_UP(entries
, display
->cacheline_size
);
995 *plane_wm
= entries
+ display
->guard_size
;
996 if (*plane_wm
> (int)display
->max_wm
)
997 *plane_wm
= display
->max_wm
;
999 /* Use the large buffer method to calculate cursor watermark */
1000 line_time_us
= ((htotal
* 1000) / clock
);
1001 line_count
= (cursor_latency_ns
/ line_time_us
+ 1000) / 1000;
1002 entries
= line_count
* 64 * pixel_size
;
1003 tlb_miss
= cursor
->fifo_size
*cursor
->cacheline_size
- hdisplay
* 8;
1005 entries
+= tlb_miss
;
1006 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1007 *cursor_wm
= entries
+ cursor
->guard_size
;
1008 if (*cursor_wm
> (int)cursor
->max_wm
)
1009 *cursor_wm
= (int)cursor
->max_wm
;
1015 * Check the wm result.
1017 * If any calculated watermark values is larger than the maximum value that
1018 * can be programmed into the associated watermark register, that watermark
1021 static bool g4x_check_srwm(struct drm_device
*dev
,
1022 int display_wm
, int cursor_wm
,
1023 const struct intel_watermark_params
*display
,
1024 const struct intel_watermark_params
*cursor
)
1026 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1027 display_wm
, cursor_wm
);
1029 if (display_wm
> display
->max_wm
) {
1030 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1031 display_wm
, display
->max_wm
);
1035 if (cursor_wm
> cursor
->max_wm
) {
1036 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1037 cursor_wm
, cursor
->max_wm
);
1041 if (!(display_wm
|| cursor_wm
)) {
1042 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1049 static bool g4x_compute_srwm(struct drm_device
*dev
,
1052 const struct intel_watermark_params
*display
,
1053 const struct intel_watermark_params
*cursor
,
1054 int *display_wm
, int *cursor_wm
)
1056 struct drm_crtc
*crtc
;
1057 int hdisplay
, htotal
, pixel_size
, clock
;
1058 unsigned long line_time_us
;
1059 int line_count
, line_size
;
1064 *display_wm
= *cursor_wm
= 0;
1068 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1069 hdisplay
= crtc
->mode
.hdisplay
;
1070 htotal
= crtc
->mode
.htotal
;
1071 clock
= crtc
->mode
.clock
;
1072 pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
1074 line_time_us
= (htotal
* 1000) / clock
;
1075 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
1076 line_size
= hdisplay
* pixel_size
;
1078 /* Use the minimum of the small and large buffer method for primary */
1079 small
= ((clock
* pixel_size
/ 1000) * latency_ns
) / 1000;
1080 large
= line_count
* line_size
;
1082 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
1083 *display_wm
= entries
+ display
->guard_size
;
1085 /* calculate the self-refresh watermark for display cursor */
1086 entries
= line_count
* pixel_size
* 64;
1087 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1088 *cursor_wm
= entries
+ cursor
->guard_size
;
1090 return g4x_check_srwm(dev
,
1091 *display_wm
, *cursor_wm
,
1095 static bool vlv_compute_drain_latency(struct drm_device
*dev
,
1097 int *plane_prec_mult
,
1099 int *cursor_prec_mult
,
1102 struct drm_crtc
*crtc
;
1103 int clock
, pixel_size
;
1106 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1107 if (crtc
->fb
== NULL
|| !crtc
->enabled
)
1110 clock
= crtc
->mode
.clock
; /* VESA DOT Clock */
1111 pixel_size
= crtc
->fb
->bits_per_pixel
/ 8; /* BPP */
1113 entries
= (clock
/ 1000) * pixel_size
;
1114 *plane_prec_mult
= (entries
> 256) ?
1115 DRAIN_LATENCY_PRECISION_32
: DRAIN_LATENCY_PRECISION_16
;
1116 *plane_dl
= (64 * (*plane_prec_mult
) * 4) / ((clock
/ 1000) *
1119 entries
= (clock
/ 1000) * 4; /* BPP is always 4 for cursor */
1120 *cursor_prec_mult
= (entries
> 256) ?
1121 DRAIN_LATENCY_PRECISION_32
: DRAIN_LATENCY_PRECISION_16
;
1122 *cursor_dl
= (64 * (*cursor_prec_mult
) * 4) / ((clock
/ 1000) * 4);
1128 * Update drain latency registers of memory arbiter
1130 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1131 * to be programmed. Each plane has a drain latency multiplier and a drain
1135 static void vlv_update_drain_latency(struct drm_device
*dev
)
1137 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1138 int planea_prec
, planea_dl
, planeb_prec
, planeb_dl
;
1139 int cursora_prec
, cursora_dl
, cursorb_prec
, cursorb_dl
;
1140 int plane_prec_mult
, cursor_prec_mult
; /* Precision multiplier is
1143 /* For plane A, Cursor A */
1144 if (vlv_compute_drain_latency(dev
, 0, &plane_prec_mult
, &planea_dl
,
1145 &cursor_prec_mult
, &cursora_dl
)) {
1146 cursora_prec
= (cursor_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1147 DDL_CURSORA_PRECISION_32
: DDL_CURSORA_PRECISION_16
;
1148 planea_prec
= (plane_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1149 DDL_PLANEA_PRECISION_32
: DDL_PLANEA_PRECISION_16
;
1151 I915_WRITE(VLV_DDL1
, cursora_prec
|
1152 (cursora_dl
<< DDL_CURSORA_SHIFT
) |
1153 planea_prec
| planea_dl
);
1156 /* For plane B, Cursor B */
1157 if (vlv_compute_drain_latency(dev
, 1, &plane_prec_mult
, &planeb_dl
,
1158 &cursor_prec_mult
, &cursorb_dl
)) {
1159 cursorb_prec
= (cursor_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1160 DDL_CURSORB_PRECISION_32
: DDL_CURSORB_PRECISION_16
;
1161 planeb_prec
= (plane_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1162 DDL_PLANEB_PRECISION_32
: DDL_PLANEB_PRECISION_16
;
1164 I915_WRITE(VLV_DDL2
, cursorb_prec
|
1165 (cursorb_dl
<< DDL_CURSORB_SHIFT
) |
1166 planeb_prec
| planeb_dl
);
1170 #define single_plane_enabled(mask) is_power_of_2(mask)
1172 void valleyview_update_wm(struct drm_device
*dev
)
1174 static const int sr_latency_ns
= 12000;
1175 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1176 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1177 int plane_sr
, cursor_sr
;
1178 unsigned int enabled
= 0;
1180 vlv_update_drain_latency(dev
);
1182 if (g4x_compute_wm0(dev
, 0,
1183 &valleyview_wm_info
, latency_ns
,
1184 &valleyview_cursor_wm_info
, latency_ns
,
1185 &planea_wm
, &cursora_wm
))
1188 if (g4x_compute_wm0(dev
, 1,
1189 &valleyview_wm_info
, latency_ns
,
1190 &valleyview_cursor_wm_info
, latency_ns
,
1191 &planeb_wm
, &cursorb_wm
))
1194 plane_sr
= cursor_sr
= 0;
1195 if (single_plane_enabled(enabled
) &&
1196 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1198 &valleyview_wm_info
,
1199 &valleyview_cursor_wm_info
,
1200 &plane_sr
, &cursor_sr
))
1201 I915_WRITE(FW_BLC_SELF_VLV
, FW_CSPWRDWNEN
);
1203 I915_WRITE(FW_BLC_SELF_VLV
,
1204 I915_READ(FW_BLC_SELF_VLV
) & ~FW_CSPWRDWNEN
);
1206 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1207 planea_wm
, cursora_wm
,
1208 planeb_wm
, cursorb_wm
,
1209 plane_sr
, cursor_sr
);
1212 (plane_sr
<< DSPFW_SR_SHIFT
) |
1213 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1214 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1217 (I915_READ(DSPFW2
) & DSPFW_CURSORA_MASK
) |
1218 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1220 (I915_READ(DSPFW3
) | (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
)));
1223 void g4x_update_wm(struct drm_device
*dev
)
1225 static const int sr_latency_ns
= 12000;
1226 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1227 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1228 int plane_sr
, cursor_sr
;
1229 unsigned int enabled
= 0;
1231 if (g4x_compute_wm0(dev
, 0,
1232 &g4x_wm_info
, latency_ns
,
1233 &g4x_cursor_wm_info
, latency_ns
,
1234 &planea_wm
, &cursora_wm
))
1237 if (g4x_compute_wm0(dev
, 1,
1238 &g4x_wm_info
, latency_ns
,
1239 &g4x_cursor_wm_info
, latency_ns
,
1240 &planeb_wm
, &cursorb_wm
))
1243 plane_sr
= cursor_sr
= 0;
1244 if (single_plane_enabled(enabled
) &&
1245 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1248 &g4x_cursor_wm_info
,
1249 &plane_sr
, &cursor_sr
))
1250 I915_WRITE(FW_BLC_SELF
, FW_BLC_SELF_EN
);
1252 I915_WRITE(FW_BLC_SELF
,
1253 I915_READ(FW_BLC_SELF
) & ~FW_BLC_SELF_EN
);
1255 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1256 planea_wm
, cursora_wm
,
1257 planeb_wm
, cursorb_wm
,
1258 plane_sr
, cursor_sr
);
1261 (plane_sr
<< DSPFW_SR_SHIFT
) |
1262 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1263 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1266 (I915_READ(DSPFW2
) & DSPFW_CURSORA_MASK
) |
1267 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1268 /* HPLL off in SR has some issues on G4x... disable it */
1270 (I915_READ(DSPFW3
) & ~DSPFW_HPLL_SR_EN
) |
1271 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1274 void i965_update_wm(struct drm_device
*dev
)
1276 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1277 struct drm_crtc
*crtc
;
1281 /* Calc sr entries for one plane configs */
1282 crtc
= single_enabled_crtc(dev
);
1284 /* self-refresh has much higher latency */
1285 static const int sr_latency_ns
= 12000;
1286 int clock
= crtc
->mode
.clock
;
1287 int htotal
= crtc
->mode
.htotal
;
1288 int hdisplay
= crtc
->mode
.hdisplay
;
1289 int pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
1290 unsigned long line_time_us
;
1293 line_time_us
= ((htotal
* 1000) / clock
);
1295 /* Use ns/us then divide to preserve precision */
1296 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1297 pixel_size
* hdisplay
;
1298 entries
= DIV_ROUND_UP(entries
, I915_FIFO_LINE_SIZE
);
1299 srwm
= I965_FIFO_SIZE
- entries
;
1303 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1306 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1308 entries
= DIV_ROUND_UP(entries
,
1309 i965_cursor_wm_info
.cacheline_size
);
1310 cursor_sr
= i965_cursor_wm_info
.fifo_size
-
1311 (entries
+ i965_cursor_wm_info
.guard_size
);
1313 if (cursor_sr
> i965_cursor_wm_info
.max_wm
)
1314 cursor_sr
= i965_cursor_wm_info
.max_wm
;
1316 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1317 "cursor %d\n", srwm
, cursor_sr
);
1319 if (IS_CRESTLINE(dev
))
1320 I915_WRITE(FW_BLC_SELF
, FW_BLC_SELF_EN
);
1322 /* Turn off self refresh if both pipes are enabled */
1323 if (IS_CRESTLINE(dev
))
1324 I915_WRITE(FW_BLC_SELF
, I915_READ(FW_BLC_SELF
)
1328 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1331 /* 965 has limitations... */
1332 I915_WRITE(DSPFW1
, (srwm
<< DSPFW_SR_SHIFT
) |
1333 (8 << 16) | (8 << 8) | (8 << 0));
1334 I915_WRITE(DSPFW2
, (8 << 8) | (8 << 0));
1335 /* update cursor SR watermark */
1336 I915_WRITE(DSPFW3
, (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1339 void i9xx_update_wm(struct drm_device
*dev
)
1341 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1342 const struct intel_watermark_params
*wm_info
;
1347 int planea_wm
, planeb_wm
;
1348 struct drm_crtc
*crtc
, *enabled
= NULL
;
1351 wm_info
= &i945_wm_info
;
1352 else if (!IS_GEN2(dev
))
1353 wm_info
= &i915_wm_info
;
1355 wm_info
= &i855_wm_info
;
1357 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 0);
1358 crtc
= intel_get_crtc_for_plane(dev
, 0);
1359 if (crtc
->enabled
&& crtc
->fb
) {
1360 planea_wm
= intel_calculate_wm(crtc
->mode
.clock
,
1362 crtc
->fb
->bits_per_pixel
/ 8,
1366 planea_wm
= fifo_size
- wm_info
->guard_size
;
1368 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 1);
1369 crtc
= intel_get_crtc_for_plane(dev
, 1);
1370 if (crtc
->enabled
&& crtc
->fb
) {
1371 planeb_wm
= intel_calculate_wm(crtc
->mode
.clock
,
1373 crtc
->fb
->bits_per_pixel
/ 8,
1375 if (enabled
== NULL
)
1380 planeb_wm
= fifo_size
- wm_info
->guard_size
;
1382 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm
, planeb_wm
);
1385 * Overlay gets an aggressive default since video jitter is bad.
1389 /* Play safe and disable self-refresh before adjusting watermarks. */
1390 if (IS_I945G(dev
) || IS_I945GM(dev
))
1391 I915_WRITE(FW_BLC_SELF
, FW_BLC_SELF_EN_MASK
| 0);
1392 else if (IS_I915GM(dev
))
1393 I915_WRITE(INSTPM
, I915_READ(INSTPM
) & ~INSTPM_SELF_EN
);
1395 /* Calc sr entries for one plane configs */
1396 if (HAS_FW_BLC(dev
) && enabled
) {
1397 /* self-refresh has much higher latency */
1398 static const int sr_latency_ns
= 6000;
1399 int clock
= enabled
->mode
.clock
;
1400 int htotal
= enabled
->mode
.htotal
;
1401 int hdisplay
= enabled
->mode
.hdisplay
;
1402 int pixel_size
= enabled
->fb
->bits_per_pixel
/ 8;
1403 unsigned long line_time_us
;
1406 line_time_us
= (htotal
* 1000) / clock
;
1408 /* Use ns/us then divide to preserve precision */
1409 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1410 pixel_size
* hdisplay
;
1411 entries
= DIV_ROUND_UP(entries
, wm_info
->cacheline_size
);
1412 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries
);
1413 srwm
= wm_info
->fifo_size
- entries
;
1417 if (IS_I945G(dev
) || IS_I945GM(dev
))
1418 I915_WRITE(FW_BLC_SELF
,
1419 FW_BLC_SELF_FIFO_MASK
| (srwm
& 0xff));
1420 else if (IS_I915GM(dev
))
1421 I915_WRITE(FW_BLC_SELF
, srwm
& 0x3f);
1424 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1425 planea_wm
, planeb_wm
, cwm
, srwm
);
1427 fwater_lo
= ((planeb_wm
& 0x3f) << 16) | (planea_wm
& 0x3f);
1428 fwater_hi
= (cwm
& 0x1f);
1430 /* Set request length to 8 cachelines per fetch */
1431 fwater_lo
= fwater_lo
| (1 << 24) | (1 << 8);
1432 fwater_hi
= fwater_hi
| (1 << 8);
1434 I915_WRITE(FW_BLC
, fwater_lo
);
1435 I915_WRITE(FW_BLC2
, fwater_hi
);
1437 if (HAS_FW_BLC(dev
)) {
1439 if (IS_I945G(dev
) || IS_I945GM(dev
))
1440 I915_WRITE(FW_BLC_SELF
,
1441 FW_BLC_SELF_EN_MASK
| FW_BLC_SELF_EN
);
1442 else if (IS_I915GM(dev
))
1443 I915_WRITE(INSTPM
, I915_READ(INSTPM
) | INSTPM_SELF_EN
);
1444 DRM_DEBUG_KMS("memory self refresh enabled\n");
1446 DRM_DEBUG_KMS("memory self refresh disabled\n");
1450 void i830_update_wm(struct drm_device
*dev
)
1452 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1453 struct drm_crtc
*crtc
;
1457 crtc
= single_enabled_crtc(dev
);
1461 planea_wm
= intel_calculate_wm(crtc
->mode
.clock
, &i830_wm_info
,
1462 dev_priv
->display
.get_fifo_size(dev
, 0),
1463 crtc
->fb
->bits_per_pixel
/ 8,
1465 fwater_lo
= I915_READ(FW_BLC
) & ~0xfff;
1466 fwater_lo
|= (3<<8) | planea_wm
;
1468 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm
);
1470 I915_WRITE(FW_BLC
, fwater_lo
);
1473 #define ILK_LP0_PLANE_LATENCY 700
1474 #define ILK_LP0_CURSOR_LATENCY 1300
1477 * Check the wm result.
1479 * If any calculated watermark values is larger than the maximum value that
1480 * can be programmed into the associated watermark register, that watermark
1483 static bool ironlake_check_srwm(struct drm_device
*dev
, int level
,
1484 int fbc_wm
, int display_wm
, int cursor_wm
,
1485 const struct intel_watermark_params
*display
,
1486 const struct intel_watermark_params
*cursor
)
1488 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1490 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1491 " cursor %d\n", level
, display_wm
, fbc_wm
, cursor_wm
);
1493 if (fbc_wm
> SNB_FBC_MAX_SRWM
) {
1494 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1495 fbc_wm
, SNB_FBC_MAX_SRWM
, level
);
1497 /* fbc has it's own way to disable FBC WM */
1498 I915_WRITE(DISP_ARB_CTL
,
1499 I915_READ(DISP_ARB_CTL
) | DISP_FBC_WM_DIS
);
1503 if (display_wm
> display
->max_wm
) {
1504 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1505 display_wm
, SNB_DISPLAY_MAX_SRWM
, level
);
1509 if (cursor_wm
> cursor
->max_wm
) {
1510 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1511 cursor_wm
, SNB_CURSOR_MAX_SRWM
, level
);
1515 if (!(fbc_wm
|| display_wm
|| cursor_wm
)) {
1516 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level
, level
);
1524 * Compute watermark values of WM[1-3],
1526 static bool ironlake_compute_srwm(struct drm_device
*dev
, int level
, int plane
,
1528 const struct intel_watermark_params
*display
,
1529 const struct intel_watermark_params
*cursor
,
1530 int *fbc_wm
, int *display_wm
, int *cursor_wm
)
1532 struct drm_crtc
*crtc
;
1533 unsigned long line_time_us
;
1534 int hdisplay
, htotal
, pixel_size
, clock
;
1535 int line_count
, line_size
;
1540 *fbc_wm
= *display_wm
= *cursor_wm
= 0;
1544 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1545 hdisplay
= crtc
->mode
.hdisplay
;
1546 htotal
= crtc
->mode
.htotal
;
1547 clock
= crtc
->mode
.clock
;
1548 pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
1550 line_time_us
= (htotal
* 1000) / clock
;
1551 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
1552 line_size
= hdisplay
* pixel_size
;
1554 /* Use the minimum of the small and large buffer method for primary */
1555 small
= ((clock
* pixel_size
/ 1000) * latency_ns
) / 1000;
1556 large
= line_count
* line_size
;
1558 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
1559 *display_wm
= entries
+ display
->guard_size
;
1563 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1565 *fbc_wm
= DIV_ROUND_UP(*display_wm
* 64, line_size
) + 2;
1567 /* calculate the self-refresh watermark for display cursor */
1568 entries
= line_count
* pixel_size
* 64;
1569 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1570 *cursor_wm
= entries
+ cursor
->guard_size
;
1572 return ironlake_check_srwm(dev
, level
,
1573 *fbc_wm
, *display_wm
, *cursor_wm
,
1577 void ironlake_update_wm(struct drm_device
*dev
)
1579 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1580 int fbc_wm
, plane_wm
, cursor_wm
;
1581 unsigned int enabled
;
1584 if (g4x_compute_wm0(dev
, 0,
1585 &ironlake_display_wm_info
,
1586 ILK_LP0_PLANE_LATENCY
,
1587 &ironlake_cursor_wm_info
,
1588 ILK_LP0_CURSOR_LATENCY
,
1589 &plane_wm
, &cursor_wm
)) {
1590 I915_WRITE(WM0_PIPEA_ILK
,
1591 (plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
);
1592 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1593 " plane %d, " "cursor: %d\n",
1594 plane_wm
, cursor_wm
);
1598 if (g4x_compute_wm0(dev
, 1,
1599 &ironlake_display_wm_info
,
1600 ILK_LP0_PLANE_LATENCY
,
1601 &ironlake_cursor_wm_info
,
1602 ILK_LP0_CURSOR_LATENCY
,
1603 &plane_wm
, &cursor_wm
)) {
1604 I915_WRITE(WM0_PIPEB_ILK
,
1605 (plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
);
1606 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1607 " plane %d, cursor: %d\n",
1608 plane_wm
, cursor_wm
);
1613 * Calculate and update the self-refresh watermark only when one
1614 * display plane is used.
1616 I915_WRITE(WM3_LP_ILK
, 0);
1617 I915_WRITE(WM2_LP_ILK
, 0);
1618 I915_WRITE(WM1_LP_ILK
, 0);
1620 if (!single_plane_enabled(enabled
))
1622 enabled
= ffs(enabled
) - 1;
1625 if (!ironlake_compute_srwm(dev
, 1, enabled
,
1626 ILK_READ_WM1_LATENCY() * 500,
1627 &ironlake_display_srwm_info
,
1628 &ironlake_cursor_srwm_info
,
1629 &fbc_wm
, &plane_wm
, &cursor_wm
))
1632 I915_WRITE(WM1_LP_ILK
,
1634 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1635 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1636 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1640 if (!ironlake_compute_srwm(dev
, 2, enabled
,
1641 ILK_READ_WM2_LATENCY() * 500,
1642 &ironlake_display_srwm_info
,
1643 &ironlake_cursor_srwm_info
,
1644 &fbc_wm
, &plane_wm
, &cursor_wm
))
1647 I915_WRITE(WM2_LP_ILK
,
1649 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1650 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1651 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1655 * WM3 is unsupported on ILK, probably because we don't have latency
1656 * data for that power state
1660 void sandybridge_update_wm(struct drm_device
*dev
)
1662 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1663 int latency
= SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1665 int fbc_wm
, plane_wm
, cursor_wm
;
1666 unsigned int enabled
;
1669 if (g4x_compute_wm0(dev
, 0,
1670 &sandybridge_display_wm_info
, latency
,
1671 &sandybridge_cursor_wm_info
, latency
,
1672 &plane_wm
, &cursor_wm
)) {
1673 val
= I915_READ(WM0_PIPEA_ILK
);
1674 val
&= ~(WM0_PIPE_PLANE_MASK
| WM0_PIPE_CURSOR_MASK
);
1675 I915_WRITE(WM0_PIPEA_ILK
, val
|
1676 ((plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
));
1677 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1678 " plane %d, " "cursor: %d\n",
1679 plane_wm
, cursor_wm
);
1683 if (g4x_compute_wm0(dev
, 1,
1684 &sandybridge_display_wm_info
, latency
,
1685 &sandybridge_cursor_wm_info
, latency
,
1686 &plane_wm
, &cursor_wm
)) {
1687 val
= I915_READ(WM0_PIPEB_ILK
);
1688 val
&= ~(WM0_PIPE_PLANE_MASK
| WM0_PIPE_CURSOR_MASK
);
1689 I915_WRITE(WM0_PIPEB_ILK
, val
|
1690 ((plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
));
1691 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1692 " plane %d, cursor: %d\n",
1693 plane_wm
, cursor_wm
);
1697 /* IVB has 3 pipes */
1698 if (IS_IVYBRIDGE(dev
) &&
1699 g4x_compute_wm0(dev
, 2,
1700 &sandybridge_display_wm_info
, latency
,
1701 &sandybridge_cursor_wm_info
, latency
,
1702 &plane_wm
, &cursor_wm
)) {
1703 val
= I915_READ(WM0_PIPEC_IVB
);
1704 val
&= ~(WM0_PIPE_PLANE_MASK
| WM0_PIPE_CURSOR_MASK
);
1705 I915_WRITE(WM0_PIPEC_IVB
, val
|
1706 ((plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
));
1707 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1708 " plane %d, cursor: %d\n",
1709 plane_wm
, cursor_wm
);
1714 * Calculate and update the self-refresh watermark only when one
1715 * display plane is used.
1717 * SNB support 3 levels of watermark.
1719 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1720 * and disabled in the descending order
1723 I915_WRITE(WM3_LP_ILK
, 0);
1724 I915_WRITE(WM2_LP_ILK
, 0);
1725 I915_WRITE(WM1_LP_ILK
, 0);
1727 if (!single_plane_enabled(enabled
) ||
1728 dev_priv
->sprite_scaling_enabled
)
1730 enabled
= ffs(enabled
) - 1;
1733 if (!ironlake_compute_srwm(dev
, 1, enabled
,
1734 SNB_READ_WM1_LATENCY() * 500,
1735 &sandybridge_display_srwm_info
,
1736 &sandybridge_cursor_srwm_info
,
1737 &fbc_wm
, &plane_wm
, &cursor_wm
))
1740 I915_WRITE(WM1_LP_ILK
,
1742 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1743 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1744 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1748 if (!ironlake_compute_srwm(dev
, 2, enabled
,
1749 SNB_READ_WM2_LATENCY() * 500,
1750 &sandybridge_display_srwm_info
,
1751 &sandybridge_cursor_srwm_info
,
1752 &fbc_wm
, &plane_wm
, &cursor_wm
))
1755 I915_WRITE(WM2_LP_ILK
,
1757 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1758 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1759 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1763 if (!ironlake_compute_srwm(dev
, 3, enabled
,
1764 SNB_READ_WM3_LATENCY() * 500,
1765 &sandybridge_display_srwm_info
,
1766 &sandybridge_cursor_srwm_info
,
1767 &fbc_wm
, &plane_wm
, &cursor_wm
))
1770 I915_WRITE(WM3_LP_ILK
,
1772 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1773 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1774 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1779 sandybridge_compute_sprite_wm(struct drm_device
*dev
, int plane
,
1780 uint32_t sprite_width
, int pixel_size
,
1781 const struct intel_watermark_params
*display
,
1782 int display_latency_ns
, int *sprite_wm
)
1784 struct drm_crtc
*crtc
;
1786 int entries
, tlb_miss
;
1788 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1789 if (crtc
->fb
== NULL
|| !crtc
->enabled
) {
1790 *sprite_wm
= display
->guard_size
;
1794 clock
= crtc
->mode
.clock
;
1796 /* Use the small buffer method to calculate the sprite watermark */
1797 entries
= ((clock
* pixel_size
/ 1000) * display_latency_ns
) / 1000;
1798 tlb_miss
= display
->fifo_size
*display
->cacheline_size
-
1801 entries
+= tlb_miss
;
1802 entries
= DIV_ROUND_UP(entries
, display
->cacheline_size
);
1803 *sprite_wm
= entries
+ display
->guard_size
;
1804 if (*sprite_wm
> (int)display
->max_wm
)
1805 *sprite_wm
= display
->max_wm
;
1811 sandybridge_compute_sprite_srwm(struct drm_device
*dev
, int plane
,
1812 uint32_t sprite_width
, int pixel_size
,
1813 const struct intel_watermark_params
*display
,
1814 int latency_ns
, int *sprite_wm
)
1816 struct drm_crtc
*crtc
;
1817 unsigned long line_time_us
;
1819 int line_count
, line_size
;
1828 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1829 clock
= crtc
->mode
.clock
;
1835 line_time_us
= (sprite_width
* 1000) / clock
;
1836 if (!line_time_us
) {
1841 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
1842 line_size
= sprite_width
* pixel_size
;
1844 /* Use the minimum of the small and large buffer method for primary */
1845 small
= ((clock
* pixel_size
/ 1000) * latency_ns
) / 1000;
1846 large
= line_count
* line_size
;
1848 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
1849 *sprite_wm
= entries
+ display
->guard_size
;
1851 return *sprite_wm
> 0x3ff ? false : true;
1854 void sandybridge_update_sprite_wm(struct drm_device
*dev
, int pipe
,
1855 uint32_t sprite_width
, int pixel_size
)
1857 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1858 int latency
= SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1865 reg
= WM0_PIPEA_ILK
;
1868 reg
= WM0_PIPEB_ILK
;
1871 reg
= WM0_PIPEC_IVB
;
1874 return; /* bad pipe */
1877 ret
= sandybridge_compute_sprite_wm(dev
, pipe
, sprite_width
, pixel_size
,
1878 &sandybridge_display_wm_info
,
1879 latency
, &sprite_wm
);
1881 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
1886 val
= I915_READ(reg
);
1887 val
&= ~WM0_PIPE_SPRITE_MASK
;
1888 I915_WRITE(reg
, val
| (sprite_wm
<< WM0_PIPE_SPRITE_SHIFT
));
1889 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe
, sprite_wm
);
1892 ret
= sandybridge_compute_sprite_srwm(dev
, pipe
, sprite_width
,
1894 &sandybridge_display_srwm_info
,
1895 SNB_READ_WM1_LATENCY() * 500,
1898 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
1902 I915_WRITE(WM1S_LP_ILK
, sprite_wm
);
1904 /* Only IVB has two more LP watermarks for sprite */
1905 if (!IS_IVYBRIDGE(dev
))
1908 ret
= sandybridge_compute_sprite_srwm(dev
, pipe
, sprite_width
,
1910 &sandybridge_display_srwm_info
,
1911 SNB_READ_WM2_LATENCY() * 500,
1914 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
1918 I915_WRITE(WM2S_LP_IVB
, sprite_wm
);
1920 ret
= sandybridge_compute_sprite_srwm(dev
, pipe
, sprite_width
,
1922 &sandybridge_display_srwm_info
,
1923 SNB_READ_WM3_LATENCY() * 500,
1926 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
1930 I915_WRITE(WM3S_LP_IVB
, sprite_wm
);
1934 * intel_update_watermarks - update FIFO watermark values based on current modes
1936 * Calculate watermark values for the various WM regs based on current mode
1937 * and plane configuration.
1939 * There are several cases to deal with here:
1940 * - normal (i.e. non-self-refresh)
1941 * - self-refresh (SR) mode
1942 * - lines are large relative to FIFO size (buffer can hold up to 2)
1943 * - lines are small relative to FIFO size (buffer can hold more than 2
1944 * lines), so need to account for TLB latency
1946 * The normal calculation is:
1947 * watermark = dotclock * bytes per pixel * latency
1948 * where latency is platform & configuration dependent (we assume pessimal
1951 * The SR calculation is:
1952 * watermark = (trunc(latency/line time)+1) * surface width *
1955 * line time = htotal / dotclock
1956 * surface width = hdisplay for normal plane and 64 for cursor
1957 * and latency is assumed to be high, as above.
1959 * The final value programmed to the register should always be rounded up,
1960 * and include an extra 2 entries to account for clock crossings.
1962 * We don't use the sprite, so we can ignore that. And on Crestline we have
1963 * to set the non-SR watermarks to 8.
1965 void intel_update_watermarks(struct drm_device
*dev
)
1967 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1969 if (dev_priv
->display
.update_wm
)
1970 dev_priv
->display
.update_wm(dev
);
1973 void intel_update_sprite_watermarks(struct drm_device
*dev
, int pipe
,
1974 uint32_t sprite_width
, int pixel_size
)
1976 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1978 if (dev_priv
->display
.update_sprite_wm
)
1979 dev_priv
->display
.update_sprite_wm(dev
, pipe
, sprite_width
,
1983 static struct drm_i915_gem_object
*
1984 intel_alloc_context_page(struct drm_device
*dev
)
1986 struct drm_i915_gem_object
*ctx
;
1989 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
1991 ctx
= i915_gem_alloc_object(dev
, 4096);
1993 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
1997 ret
= i915_gem_object_pin(ctx
, 4096, true);
1999 DRM_ERROR("failed to pin power context: %d\n", ret
);
2003 ret
= i915_gem_object_set_to_gtt_domain(ctx
, 1);
2005 DRM_ERROR("failed to set-domain on power context: %d\n", ret
);
2012 i915_gem_object_unpin(ctx
);
2014 drm_gem_object_unreference(&ctx
->base
);
2015 mutex_unlock(&dev
->struct_mutex
);
2019 bool ironlake_set_drps(struct drm_device
*dev
, u8 val
)
2021 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2024 rgvswctl
= I915_READ16(MEMSWCTL
);
2025 if (rgvswctl
& MEMCTL_CMD_STS
) {
2026 DRM_DEBUG("gpu busy, RCS change rejected\n");
2027 return false; /* still busy with another command */
2030 rgvswctl
= (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
2031 (val
<< MEMCTL_FREQ_SHIFT
) | MEMCTL_SFCAVM
;
2032 I915_WRITE16(MEMSWCTL
, rgvswctl
);
2033 POSTING_READ16(MEMSWCTL
);
2035 rgvswctl
|= MEMCTL_CMD_STS
;
2036 I915_WRITE16(MEMSWCTL
, rgvswctl
);
2041 void ironlake_enable_drps(struct drm_device
*dev
)
2043 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2044 u32 rgvmodectl
= I915_READ(MEMMODECTL
);
2045 u8 fmax
, fmin
, fstart
, vstart
;
2047 /* Enable temp reporting */
2048 I915_WRITE16(PMMISC
, I915_READ(PMMISC
) | MCPPCE_EN
);
2049 I915_WRITE16(TSC1
, I915_READ(TSC1
) | TSE
);
2051 /* 100ms RC evaluation intervals */
2052 I915_WRITE(RCUPEI
, 100000);
2053 I915_WRITE(RCDNEI
, 100000);
2055 /* Set max/min thresholds to 90ms and 80ms respectively */
2056 I915_WRITE(RCBMAXAVG
, 90000);
2057 I915_WRITE(RCBMINAVG
, 80000);
2059 I915_WRITE(MEMIHYST
, 1);
2061 /* Set up min, max, and cur for interrupt handling */
2062 fmax
= (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
;
2063 fmin
= (rgvmodectl
& MEMMODE_FMIN_MASK
);
2064 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
2065 MEMMODE_FSTART_SHIFT
;
2067 vstart
= (I915_READ(PXVFREQ_BASE
+ (fstart
* 4)) & PXVFREQ_PX_MASK
) >>
2070 dev_priv
->fmax
= fmax
; /* IPS callback will increase this */
2071 dev_priv
->fstart
= fstart
;
2073 dev_priv
->max_delay
= fstart
;
2074 dev_priv
->min_delay
= fmin
;
2075 dev_priv
->cur_delay
= fstart
;
2077 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2078 fmax
, fmin
, fstart
);
2080 I915_WRITE(MEMINTREN
, MEMINT_CX_SUPR_EN
| MEMINT_EVAL_CHG_EN
);
2083 * Interrupts will be enabled in ironlake_irq_postinstall
2086 I915_WRITE(VIDSTART
, vstart
);
2087 POSTING_READ(VIDSTART
);
2089 rgvmodectl
|= MEMMODE_SWMODE_EN
;
2090 I915_WRITE(MEMMODECTL
, rgvmodectl
);
2092 if (wait_for((I915_READ(MEMSWCTL
) & MEMCTL_CMD_STS
) == 0, 10))
2093 DRM_ERROR("stuck trying to change perf mode\n");
2096 ironlake_set_drps(dev
, fstart
);
2098 dev_priv
->last_count1
= I915_READ(0x112e4) + I915_READ(0x112e8) +
2100 dev_priv
->last_time1
= jiffies_to_msecs(jiffies
);
2101 dev_priv
->last_count2
= I915_READ(0x112f4);
2102 getrawmonotonic(&dev_priv
->last_time2
);
2105 void ironlake_disable_drps(struct drm_device
*dev
)
2107 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2108 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
2110 /* Ack interrupts, disable EFC interrupt */
2111 I915_WRITE(MEMINTREN
, I915_READ(MEMINTREN
) & ~MEMINT_EVAL_CHG_EN
);
2112 I915_WRITE(MEMINTRSTS
, MEMINT_EVAL_CHG
);
2113 I915_WRITE(DEIER
, I915_READ(DEIER
) & ~DE_PCU_EVENT
);
2114 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
2115 I915_WRITE(DEIMR
, I915_READ(DEIMR
) | DE_PCU_EVENT
);
2117 /* Go back to the starting frequency */
2118 ironlake_set_drps(dev
, dev_priv
->fstart
);
2120 rgvswctl
|= MEMCTL_CMD_STS
;
2121 I915_WRITE(MEMSWCTL
, rgvswctl
);
2126 void gen6_set_rps(struct drm_device
*dev
, u8 val
)
2128 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2131 swreq
= (val
& 0x3ff) << 25;
2132 I915_WRITE(GEN6_RPNSWREQ
, swreq
);
2135 void gen6_disable_rps(struct drm_device
*dev
)
2137 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2139 I915_WRITE(GEN6_RPNSWREQ
, 1 << 31);
2140 I915_WRITE(GEN6_PMINTRMSK
, 0xffffffff);
2141 I915_WRITE(GEN6_PMIER
, 0);
2142 /* Complete PM interrupt masking here doesn't race with the rps work
2143 * item again unmasking PM interrupts because that is using a different
2144 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2145 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2147 spin_lock_irq(&dev_priv
->rps_lock
);
2148 dev_priv
->pm_iir
= 0;
2149 spin_unlock_irq(&dev_priv
->rps_lock
);
2151 I915_WRITE(GEN6_PMIIR
, I915_READ(GEN6_PMIIR
));
2154 int intel_enable_rc6(const struct drm_device
*dev
)
2157 * Respect the kernel parameter if it is set
2159 if (i915_enable_rc6
>= 0)
2160 return i915_enable_rc6
;
2163 * Disable RC6 on Ironlake
2165 if (INTEL_INFO(dev
)->gen
== 5)
2168 /* Sorry Haswell, no RC6 for you for now. */
2169 if (IS_HASWELL(dev
))
2173 * Disable rc6 on Sandybridge
2175 if (INTEL_INFO(dev
)->gen
== 6) {
2176 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2177 return INTEL_RC6_ENABLE
;
2179 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2180 return (INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
);
2183 void gen6_enable_rps(struct drm_i915_private
*dev_priv
)
2185 u32 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
2186 u32 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
2187 u32 pcu_mbox
, rc6_mask
= 0;
2189 int cur_freq
, min_freq
, max_freq
;
2193 /* Here begins a magic sequence of register writes to enable
2194 * auto-downclocking.
2196 * Perhaps there might be some value in exposing these to
2199 I915_WRITE(GEN6_RC_STATE
, 0);
2200 mutex_lock(&dev_priv
->dev
->struct_mutex
);
2202 /* Clear the DBG now so we don't confuse earlier errors */
2203 if ((gtfifodbg
= I915_READ(GTFIFODBG
))) {
2204 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg
);
2205 I915_WRITE(GTFIFODBG
, gtfifodbg
);
2208 gen6_gt_force_wake_get(dev_priv
);
2210 /* disable the counters and set deterministic thresholds */
2211 I915_WRITE(GEN6_RC_CONTROL
, 0);
2213 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT
, 1000 << 16);
2214 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16 | 30);
2215 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT
, 30);
2216 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
2217 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
2219 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
2220 I915_WRITE(RING_MAX_IDLE(dev_priv
->ring
[i
].mmio_base
), 10);
2222 I915_WRITE(GEN6_RC_SLEEP
, 0);
2223 I915_WRITE(GEN6_RC1e_THRESHOLD
, 1000);
2224 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000);
2225 I915_WRITE(GEN6_RC6p_THRESHOLD
, 100000);
2226 I915_WRITE(GEN6_RC6pp_THRESHOLD
, 64000); /* unused */
2228 rc6_mode
= intel_enable_rc6(dev_priv
->dev
);
2229 if (rc6_mode
& INTEL_RC6_ENABLE
)
2230 rc6_mask
|= GEN6_RC_CTL_RC6_ENABLE
;
2232 if (rc6_mode
& INTEL_RC6p_ENABLE
)
2233 rc6_mask
|= GEN6_RC_CTL_RC6p_ENABLE
;
2235 if (rc6_mode
& INTEL_RC6pp_ENABLE
)
2236 rc6_mask
|= GEN6_RC_CTL_RC6pp_ENABLE
;
2238 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2239 (rc6_mode
& INTEL_RC6_ENABLE
) ? "on" : "off",
2240 (rc6_mode
& INTEL_RC6p_ENABLE
) ? "on" : "off",
2241 (rc6_mode
& INTEL_RC6pp_ENABLE
) ? "on" : "off");
2243 I915_WRITE(GEN6_RC_CONTROL
,
2245 GEN6_RC_CTL_EI_MODE(1) |
2246 GEN6_RC_CTL_HW_ENABLE
);
2248 I915_WRITE(GEN6_RPNSWREQ
,
2249 GEN6_FREQUENCY(10) |
2251 GEN6_AGGRESSIVE_TURBO
);
2252 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
2253 GEN6_FREQUENCY(12));
2255 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
2256 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
2259 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 10000);
2260 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 1000000);
2261 I915_WRITE(GEN6_RP_UP_EI
, 100000);
2262 I915_WRITE(GEN6_RP_DOWN_EI
, 5000000);
2263 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
2264 I915_WRITE(GEN6_RP_CONTROL
,
2265 GEN6_RP_MEDIA_TURBO
|
2266 GEN6_RP_MEDIA_HW_MODE
|
2267 GEN6_RP_MEDIA_IS_GFX
|
2269 GEN6_RP_UP_BUSY_AVG
|
2270 GEN6_RP_DOWN_IDLE_CONT
);
2272 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
2274 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2276 I915_WRITE(GEN6_PCODE_DATA
, 0);
2277 I915_WRITE(GEN6_PCODE_MAILBOX
,
2279 GEN6_PCODE_WRITE_MIN_FREQ_TABLE
);
2280 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
2282 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2284 min_freq
= (rp_state_cap
& 0xff0000) >> 16;
2285 max_freq
= rp_state_cap
& 0xff;
2286 cur_freq
= (gt_perf_status
& 0xff00) >> 8;
2288 /* Check for overclock support */
2289 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
2291 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2292 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_READ_OC_PARAMS
);
2293 pcu_mbox
= I915_READ(GEN6_PCODE_DATA
);
2294 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
2296 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2297 if (pcu_mbox
& (1<<31)) { /* OC supported */
2298 max_freq
= pcu_mbox
& 0xff;
2299 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox
* 50);
2302 /* In units of 100MHz */
2303 dev_priv
->max_delay
= max_freq
;
2304 dev_priv
->min_delay
= min_freq
;
2305 dev_priv
->cur_delay
= cur_freq
;
2307 /* requires MSI enabled */
2308 I915_WRITE(GEN6_PMIER
,
2309 GEN6_PM_MBOX_EVENT
|
2310 GEN6_PM_THERMAL_EVENT
|
2311 GEN6_PM_RP_DOWN_TIMEOUT
|
2312 GEN6_PM_RP_UP_THRESHOLD
|
2313 GEN6_PM_RP_DOWN_THRESHOLD
|
2314 GEN6_PM_RP_UP_EI_EXPIRED
|
2315 GEN6_PM_RP_DOWN_EI_EXPIRED
);
2316 spin_lock_irq(&dev_priv
->rps_lock
);
2317 WARN_ON(dev_priv
->pm_iir
!= 0);
2318 I915_WRITE(GEN6_PMIMR
, 0);
2319 spin_unlock_irq(&dev_priv
->rps_lock
);
2320 /* enable all PM interrupts */
2321 I915_WRITE(GEN6_PMINTRMSK
, 0);
2323 gen6_gt_force_wake_put(dev_priv
);
2324 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
2327 void gen6_update_ring_freq(struct drm_i915_private
*dev_priv
)
2330 int gpu_freq
, ia_freq
, max_ia_freq
;
2331 int scaling_factor
= 180;
2333 max_ia_freq
= cpufreq_quick_get_max(0);
2335 * Default to measured freq if none found, PCU will ensure we don't go
2339 max_ia_freq
= tsc_khz
;
2341 /* Convert from kHz to MHz */
2342 max_ia_freq
/= 1000;
2344 mutex_lock(&dev_priv
->dev
->struct_mutex
);
2347 * For each potential GPU frequency, load a ring frequency we'd like
2348 * to use for memory access. We do this by specifying the IA frequency
2349 * the PCU should use as a reference to determine the ring frequency.
2351 for (gpu_freq
= dev_priv
->max_delay
; gpu_freq
>= dev_priv
->min_delay
;
2353 int diff
= dev_priv
->max_delay
- gpu_freq
;
2356 * For GPU frequencies less than 750MHz, just use the lowest
2359 if (gpu_freq
< min_freq
)
2362 ia_freq
= max_ia_freq
- ((diff
* scaling_factor
) / 2);
2363 ia_freq
= DIV_ROUND_CLOSEST(ia_freq
, 100);
2365 I915_WRITE(GEN6_PCODE_DATA
,
2366 (ia_freq
<< GEN6_PCODE_FREQ_IA_RATIO_SHIFT
) |
2368 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
|
2369 GEN6_PCODE_WRITE_MIN_FREQ_TABLE
);
2370 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) &
2371 GEN6_PCODE_READY
) == 0, 10)) {
2372 DRM_ERROR("pcode write of freq table timed out\n");
2377 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
2380 static void ironlake_teardown_rc6(struct drm_device
*dev
)
2382 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2384 if (dev_priv
->renderctx
) {
2385 i915_gem_object_unpin(dev_priv
->renderctx
);
2386 drm_gem_object_unreference(&dev_priv
->renderctx
->base
);
2387 dev_priv
->renderctx
= NULL
;
2390 if (dev_priv
->pwrctx
) {
2391 i915_gem_object_unpin(dev_priv
->pwrctx
);
2392 drm_gem_object_unreference(&dev_priv
->pwrctx
->base
);
2393 dev_priv
->pwrctx
= NULL
;
2397 void ironlake_disable_rc6(struct drm_device
*dev
)
2399 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2401 if (I915_READ(PWRCTXA
)) {
2402 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
2403 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) | RCX_SW_EXIT
);
2404 wait_for(((I915_READ(RSTDBYCTL
) & RSX_STATUS_MASK
) == RSX_STATUS_ON
),
2407 I915_WRITE(PWRCTXA
, 0);
2408 POSTING_READ(PWRCTXA
);
2410 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) & ~RCX_SW_EXIT
);
2411 POSTING_READ(RSTDBYCTL
);
2414 ironlake_teardown_rc6(dev
);
2417 static int ironlake_setup_rc6(struct drm_device
*dev
)
2419 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2421 if (dev_priv
->renderctx
== NULL
)
2422 dev_priv
->renderctx
= intel_alloc_context_page(dev
);
2423 if (!dev_priv
->renderctx
)
2426 if (dev_priv
->pwrctx
== NULL
)
2427 dev_priv
->pwrctx
= intel_alloc_context_page(dev
);
2428 if (!dev_priv
->pwrctx
) {
2429 ironlake_teardown_rc6(dev
);
2436 void ironlake_enable_rc6(struct drm_device
*dev
)
2438 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2441 /* rc6 disabled by default due to repeated reports of hanging during
2444 if (!intel_enable_rc6(dev
))
2447 mutex_lock(&dev
->struct_mutex
);
2448 ret
= ironlake_setup_rc6(dev
);
2450 mutex_unlock(&dev
->struct_mutex
);
2455 * GPU can automatically power down the render unit if given a page
2458 ret
= BEGIN_LP_RING(6);
2460 ironlake_teardown_rc6(dev
);
2461 mutex_unlock(&dev
->struct_mutex
);
2465 OUT_RING(MI_SUSPEND_FLUSH
| MI_SUSPEND_FLUSH_EN
);
2466 OUT_RING(MI_SET_CONTEXT
);
2467 OUT_RING(dev_priv
->renderctx
->gtt_offset
|
2469 MI_SAVE_EXT_STATE_EN
|
2470 MI_RESTORE_EXT_STATE_EN
|
2471 MI_RESTORE_INHIBIT
);
2472 OUT_RING(MI_SUSPEND_FLUSH
);
2478 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
2479 * does an implicit flush, combined with MI_FLUSH above, it should be
2480 * safe to assume that renderctx is valid
2482 ret
= intel_wait_ring_idle(LP_RING(dev_priv
));
2484 DRM_ERROR("failed to enable ironlake power power savings\n");
2485 ironlake_teardown_rc6(dev
);
2486 mutex_unlock(&dev
->struct_mutex
);
2490 I915_WRITE(PWRCTXA
, dev_priv
->pwrctx
->gtt_offset
| PWRCTX_EN
);
2491 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) & ~RCX_SW_EXIT
);
2492 mutex_unlock(&dev
->struct_mutex
);
2495 static unsigned long intel_pxfreq(u32 vidfreq
)
2498 int div
= (vidfreq
& 0x3f0000) >> 16;
2499 int post
= (vidfreq
& 0x3000) >> 12;
2500 int pre
= (vidfreq
& 0x7);
2505 freq
= ((div
* 133333) / ((1<<post
) * pre
));
2510 void intel_init_emon(struct drm_device
*dev
)
2512 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2517 /* Disable to program */
2521 /* Program energy weights for various events */
2522 I915_WRITE(SDEW
, 0x15040d00);
2523 I915_WRITE(CSIEW0
, 0x007f0000);
2524 I915_WRITE(CSIEW1
, 0x1e220004);
2525 I915_WRITE(CSIEW2
, 0x04000004);
2527 for (i
= 0; i
< 5; i
++)
2528 I915_WRITE(PEW
+ (i
* 4), 0);
2529 for (i
= 0; i
< 3; i
++)
2530 I915_WRITE(DEW
+ (i
* 4), 0);
2532 /* Program P-state weights to account for frequency power adjustment */
2533 for (i
= 0; i
< 16; i
++) {
2534 u32 pxvidfreq
= I915_READ(PXVFREQ_BASE
+ (i
* 4));
2535 unsigned long freq
= intel_pxfreq(pxvidfreq
);
2536 unsigned long vid
= (pxvidfreq
& PXVFREQ_PX_MASK
) >>
2541 val
*= (freq
/ 1000);
2543 val
/= (127*127*900);
2545 DRM_ERROR("bad pxval: %ld\n", val
);
2548 /* Render standby states get 0 weight */
2552 for (i
= 0; i
< 4; i
++) {
2553 u32 val
= (pxw
[i
*4] << 24) | (pxw
[(i
*4)+1] << 16) |
2554 (pxw
[(i
*4)+2] << 8) | (pxw
[(i
*4)+3]);
2555 I915_WRITE(PXW
+ (i
* 4), val
);
2558 /* Adjust magic regs to magic values (more experimental results) */
2559 I915_WRITE(OGW0
, 0);
2560 I915_WRITE(OGW1
, 0);
2561 I915_WRITE(EG0
, 0x00007f00);
2562 I915_WRITE(EG1
, 0x0000000e);
2563 I915_WRITE(EG2
, 0x000e0000);
2564 I915_WRITE(EG3
, 0x68000300);
2565 I915_WRITE(EG4
, 0x42000000);
2566 I915_WRITE(EG5
, 0x00140031);
2570 for (i
= 0; i
< 8; i
++)
2571 I915_WRITE(PXWL
+ (i
* 4), 0);
2573 /* Enable PMON + select events */
2574 I915_WRITE(ECR
, 0x80000019);
2576 lcfuse
= I915_READ(LCFUSE02
);
2578 dev_priv
->corr
= (lcfuse
& LCFUSE_HIV_MASK
);
2581 void ironlake_init_clock_gating(struct drm_device
*dev
)
2583 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2584 uint32_t dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
;
2586 /* Required for FBC */
2587 dspclk_gate
|= DPFCUNIT_CLOCK_GATE_DISABLE
|
2588 DPFCRUNIT_CLOCK_GATE_DISABLE
|
2589 DPFDUNIT_CLOCK_GATE_DISABLE
;
2590 /* Required for CxSR */
2591 dspclk_gate
|= DPARBUNIT_CLOCK_GATE_DISABLE
;
2593 I915_WRITE(PCH_3DCGDIS0
,
2594 MARIUNIT_CLOCK_GATE_DISABLE
|
2595 SVSMUNIT_CLOCK_GATE_DISABLE
);
2596 I915_WRITE(PCH_3DCGDIS1
,
2597 VFMUNIT_CLOCK_GATE_DISABLE
);
2599 I915_WRITE(PCH_DSPCLK_GATE_D
, dspclk_gate
);
2602 * According to the spec the following bits should be set in
2603 * order to enable memory self-refresh
2604 * The bit 22/21 of 0x42004
2605 * The bit 5 of 0x42020
2606 * The bit 15 of 0x45000
2608 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
2609 (I915_READ(ILK_DISPLAY_CHICKEN2
) |
2610 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
));
2611 I915_WRITE(ILK_DSPCLK_GATE
,
2612 (I915_READ(ILK_DSPCLK_GATE
) |
2613 ILK_DPARB_CLK_GATE
));
2614 I915_WRITE(DISP_ARB_CTL
,
2615 (I915_READ(DISP_ARB_CTL
) |
2617 I915_WRITE(WM3_LP_ILK
, 0);
2618 I915_WRITE(WM2_LP_ILK
, 0);
2619 I915_WRITE(WM1_LP_ILK
, 0);
2622 * Based on the document from hardware guys the following bits
2623 * should be set unconditionally in order to enable FBC.
2624 * The bit 22 of 0x42000
2625 * The bit 22 of 0x42004
2626 * The bit 7,8,9 of 0x42020.
2628 if (IS_IRONLAKE_M(dev
)) {
2629 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
2630 I915_READ(ILK_DISPLAY_CHICKEN1
) |
2632 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
2633 I915_READ(ILK_DISPLAY_CHICKEN2
) |
2635 I915_WRITE(ILK_DSPCLK_GATE
,
2636 I915_READ(ILK_DSPCLK_GATE
) |
2642 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
2643 I915_READ(ILK_DISPLAY_CHICKEN2
) |
2644 ILK_ELPIN_409_SELECT
);
2645 I915_WRITE(_3D_CHICKEN2
,
2646 _3D_CHICKEN2_WM_READ_PIPELINED
<< 16 |
2647 _3D_CHICKEN2_WM_READ_PIPELINED
);
2650 void gen6_init_clock_gating(struct drm_device
*dev
)
2652 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2654 uint32_t dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
;
2656 I915_WRITE(PCH_DSPCLK_GATE_D
, dspclk_gate
);
2658 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
2659 I915_READ(ILK_DISPLAY_CHICKEN2
) |
2660 ILK_ELPIN_409_SELECT
);
2662 I915_WRITE(WM3_LP_ILK
, 0);
2663 I915_WRITE(WM2_LP_ILK
, 0);
2664 I915_WRITE(WM1_LP_ILK
, 0);
2666 /* clear masked bit */
2667 I915_WRITE(CACHE_MODE_0
,
2668 CM0_STC_EVICT_DISABLE_LRA_SNB
<< CM0_MASK_SHIFT
);
2670 I915_WRITE(GEN6_UCGCTL1
,
2671 I915_READ(GEN6_UCGCTL1
) |
2672 GEN6_BLBUNIT_CLOCK_GATE_DISABLE
|
2673 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
2675 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
2676 * gating disable must be set. Failure to set it results in
2677 * flickering pixels due to Z write ordering failures after
2678 * some amount of runtime in the Mesa "fire" demo, and Unigine
2679 * Sanctuary and Tropics, and apparently anything else with
2680 * alpha test or pixel discard.
2682 * According to the spec, bit 11 (RCCUNIT) must also be set,
2683 * but we didn't debug actual testcases to find it out.
2685 I915_WRITE(GEN6_UCGCTL2
,
2686 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
2687 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
2689 /* Bspec says we need to always set all mask bits. */
2690 I915_WRITE(_3D_CHICKEN
, (0xFFFF << 16) |
2691 _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL
);
2694 * According to the spec the following bits should be
2695 * set in order to enable memory self-refresh and fbc:
2696 * The bit21 and bit22 of 0x42000
2697 * The bit21 and bit22 of 0x42004
2698 * The bit5 and bit7 of 0x42020
2699 * The bit14 of 0x70180
2700 * The bit14 of 0x71180
2702 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
2703 I915_READ(ILK_DISPLAY_CHICKEN1
) |
2704 ILK_FBCQ_DIS
| ILK_PABSTRETCH_DIS
);
2705 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
2706 I915_READ(ILK_DISPLAY_CHICKEN2
) |
2707 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
);
2708 I915_WRITE(ILK_DSPCLK_GATE
,
2709 I915_READ(ILK_DSPCLK_GATE
) |
2710 ILK_DPARB_CLK_GATE
|
2713 for_each_pipe(pipe
) {
2714 I915_WRITE(DSPCNTR(pipe
),
2715 I915_READ(DSPCNTR(pipe
)) |
2716 DISPPLANE_TRICKLE_FEED_DISABLE
);
2717 intel_flush_display_plane(dev_priv
, pipe
);
2721 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private
*dev_priv
)
2723 uint32_t reg
= I915_READ(GEN7_FF_THREAD_MODE
);
2725 reg
&= ~GEN7_FF_SCHED_MASK
;
2726 reg
|= GEN7_FF_TS_SCHED_HW
;
2727 reg
|= GEN7_FF_VS_SCHED_HW
;
2728 reg
|= GEN7_FF_DS_SCHED_HW
;
2730 I915_WRITE(GEN7_FF_THREAD_MODE
, reg
);
2733 void ivybridge_init_clock_gating(struct drm_device
*dev
)
2735 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2737 uint32_t dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
;
2739 I915_WRITE(PCH_DSPCLK_GATE_D
, dspclk_gate
);
2741 I915_WRITE(WM3_LP_ILK
, 0);
2742 I915_WRITE(WM2_LP_ILK
, 0);
2743 I915_WRITE(WM1_LP_ILK
, 0);
2745 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
2746 * This implements the WaDisableRCZUnitClockGating workaround.
2748 I915_WRITE(GEN6_UCGCTL2
, GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
2750 I915_WRITE(ILK_DSPCLK_GATE
, IVB_VRHUNIT_CLK_GATE
);
2752 I915_WRITE(IVB_CHICKEN3
,
2753 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
2754 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
2756 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
2757 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
2758 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
2760 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
2761 I915_WRITE(GEN7_L3CNTLREG1
,
2762 GEN7_WA_FOR_GEN7_L3_CONTROL
);
2763 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
2764 GEN7_WA_L3_CHICKEN_MODE
);
2766 /* This is required by WaCatErrorRejectionIssue */
2767 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
2768 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
2769 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
2771 for_each_pipe(pipe
) {
2772 I915_WRITE(DSPCNTR(pipe
),
2773 I915_READ(DSPCNTR(pipe
)) |
2774 DISPPLANE_TRICKLE_FEED_DISABLE
);
2775 intel_flush_display_plane(dev_priv
, pipe
);
2778 gen7_setup_fixed_func_scheduler(dev_priv
);
2781 void valleyview_init_clock_gating(struct drm_device
*dev
)
2783 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2785 uint32_t dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
;
2787 I915_WRITE(PCH_DSPCLK_GATE_D
, dspclk_gate
);
2789 I915_WRITE(WM3_LP_ILK
, 0);
2790 I915_WRITE(WM2_LP_ILK
, 0);
2791 I915_WRITE(WM1_LP_ILK
, 0);
2793 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
2794 * This implements the WaDisableRCZUnitClockGating workaround.
2796 I915_WRITE(GEN6_UCGCTL2
, GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
2798 I915_WRITE(ILK_DSPCLK_GATE
, IVB_VRHUNIT_CLK_GATE
);
2800 I915_WRITE(IVB_CHICKEN3
,
2801 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
2802 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
2804 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
2805 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
2806 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
2808 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
2809 I915_WRITE(GEN7_L3CNTLREG1
, GEN7_WA_FOR_GEN7_L3_CONTROL
);
2810 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
, GEN7_WA_L3_CHICKEN_MODE
);
2812 /* This is required by WaCatErrorRejectionIssue */
2813 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
2814 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
2815 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
2817 for_each_pipe(pipe
) {
2818 I915_WRITE(DSPCNTR(pipe
),
2819 I915_READ(DSPCNTR(pipe
)) |
2820 DISPPLANE_TRICKLE_FEED_DISABLE
);
2821 intel_flush_display_plane(dev_priv
, pipe
);
2824 I915_WRITE(CACHE_MODE_1
, I915_READ(CACHE_MODE_1
) |
2825 (PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
<< 16) |
2826 PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
);
2829 void g4x_init_clock_gating(struct drm_device
*dev
)
2831 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2832 uint32_t dspclk_gate
;
2834 I915_WRITE(RENCLK_GATE_D1
, 0);
2835 I915_WRITE(RENCLK_GATE_D2
, VF_UNIT_CLOCK_GATE_DISABLE
|
2836 GS_UNIT_CLOCK_GATE_DISABLE
|
2837 CL_UNIT_CLOCK_GATE_DISABLE
);
2838 I915_WRITE(RAMCLK_GATE_D
, 0);
2839 dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
|
2840 OVRUNIT_CLOCK_GATE_DISABLE
|
2841 OVCUNIT_CLOCK_GATE_DISABLE
;
2843 dspclk_gate
|= DSSUNIT_CLOCK_GATE_DISABLE
;
2844 I915_WRITE(DSPCLK_GATE_D
, dspclk_gate
);
2847 void crestline_init_clock_gating(struct drm_device
*dev
)
2849 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2851 I915_WRITE(RENCLK_GATE_D1
, I965_RCC_CLOCK_GATE_DISABLE
);
2852 I915_WRITE(RENCLK_GATE_D2
, 0);
2853 I915_WRITE(DSPCLK_GATE_D
, 0);
2854 I915_WRITE(RAMCLK_GATE_D
, 0);
2855 I915_WRITE16(DEUC
, 0);
2858 void broadwater_init_clock_gating(struct drm_device
*dev
)
2860 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2862 I915_WRITE(RENCLK_GATE_D1
, I965_RCZ_CLOCK_GATE_DISABLE
|
2863 I965_RCC_CLOCK_GATE_DISABLE
|
2864 I965_RCPB_CLOCK_GATE_DISABLE
|
2865 I965_ISC_CLOCK_GATE_DISABLE
|
2866 I965_FBC_CLOCK_GATE_DISABLE
);
2867 I915_WRITE(RENCLK_GATE_D2
, 0);
2870 void gen3_init_clock_gating(struct drm_device
*dev
)
2872 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2873 u32 dstate
= I915_READ(D_STATE
);
2875 dstate
|= DSTATE_PLL_D3_OFF
| DSTATE_GFX_CLOCK_GATING
|
2876 DSTATE_DOT_CLOCK_GATING
;
2877 I915_WRITE(D_STATE
, dstate
);
2880 void i85x_init_clock_gating(struct drm_device
*dev
)
2882 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2884 I915_WRITE(RENCLK_GATE_D1
, SV_CLOCK_GATE_DISABLE
);
2887 void i830_init_clock_gating(struct drm_device
*dev
)
2889 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2891 I915_WRITE(DSPCLK_GATE_D
, OVRUNIT_CLOCK_GATE_DISABLE
);
2894 void ibx_init_clock_gating(struct drm_device
*dev
)
2896 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2899 * On Ibex Peak and Cougar Point, we need to disable clock
2900 * gating for the panel power sequencer or it will fail to
2901 * start up when no ports are active.
2903 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
2906 void cpt_init_clock_gating(struct drm_device
*dev
)
2908 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2912 * On Ibex Peak and Cougar Point, we need to disable clock
2913 * gating for the panel power sequencer or it will fail to
2914 * start up when no ports are active.
2916 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
2917 I915_WRITE(SOUTH_CHICKEN2
, I915_READ(SOUTH_CHICKEN2
) |
2918 DPLS_EDP_PPS_FIX_DIS
);
2919 /* Without this, mode sets may fail silently on FDI */
2921 I915_WRITE(TRANS_CHICKEN2(pipe
), TRANS_AUTOTRAIN_GEN_STALL_DIS
);
2924 void intel_init_clock_gating(struct drm_device
*dev
)
2926 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2928 dev_priv
->display
.init_clock_gating(dev
);
2930 if (dev_priv
->display
.init_pch_clock_gating
)
2931 dev_priv
->display
.init_pch_clock_gating(dev
);