2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
35 * RC6 is a special power stage which allows the GPU to enter an very
36 * low-voltage mode when idle, using down to 0V while at this stage. This
37 * stage is entered automatically when the GPU is idle when RC6 support is
38 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
40 * There are different RC6 modes available in Intel GPU, which differentiate
41 * among each other with the latency required to enter and leave RC6 and
42 * voltage consumed by the GPU in different states.
44 * The combination of the following flags define which states GPU is allowed
45 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
46 * RC6pp is deepest RC6. Their support by hardware varies according to the
47 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
48 * which brings the most power savings; deeper states save more power, but
49 * require higher latency to switch to and wake up.
51 #define INTEL_RC6_ENABLE (1<<0)
52 #define INTEL_RC6p_ENABLE (1<<1)
53 #define INTEL_RC6pp_ENABLE (1<<2)
55 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
56 * framebuffer contents in-memory, aiming at reducing the required bandwidth
57 * during in-memory transfers and, therefore, reduce the power packet.
59 * The benefits of FBC are mostly visible with solid backgrounds and
60 * variation-less patterns.
62 * FBC-related functionality can be enabled by the means of the
63 * i915.i915_enable_fbc parameter
66 static void gen9_init_clock_gating(struct drm_device
*dev
)
68 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
71 * WaDisableSDEUnitClockGating:skl
72 * This seems to be a pre-production w/a.
74 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
75 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
78 * WaDisableDgMirrorFixInHalfSliceChicken5:skl
79 * This is a pre-production w/a.
81 I915_WRITE(GEN9_HALF_SLICE_CHICKEN5
,
82 I915_READ(GEN9_HALF_SLICE_CHICKEN5
) &
83 ~GEN9_DG_MIRROR_FIX_ENABLE
);
85 /* Wa4x4STCOptimizationDisable:skl */
86 I915_WRITE(CACHE_MODE_1
,
87 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE
));
90 static void i8xx_disable_fbc(struct drm_device
*dev
)
92 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
95 dev_priv
->fbc
.enabled
= false;
97 /* Disable compression */
98 fbc_ctl
= I915_READ(FBC_CONTROL
);
99 if ((fbc_ctl
& FBC_CTL_EN
) == 0)
102 fbc_ctl
&= ~FBC_CTL_EN
;
103 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
105 /* Wait for compressing bit to clear */
106 if (wait_for((I915_READ(FBC_STATUS
) & FBC_STAT_COMPRESSING
) == 0, 10)) {
107 DRM_DEBUG_KMS("FBC idle timed out\n");
111 DRM_DEBUG_KMS("disabled FBC\n");
114 static void i8xx_enable_fbc(struct drm_crtc
*crtc
)
116 struct drm_device
*dev
= crtc
->dev
;
117 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
118 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
119 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
120 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
125 dev_priv
->fbc
.enabled
= true;
127 cfb_pitch
= dev_priv
->fbc
.size
/ FBC_LL_SIZE
;
128 if (fb
->pitches
[0] < cfb_pitch
)
129 cfb_pitch
= fb
->pitches
[0];
131 /* FBC_CTL wants 32B or 64B units */
133 cfb_pitch
= (cfb_pitch
/ 32) - 1;
135 cfb_pitch
= (cfb_pitch
/ 64) - 1;
138 for (i
= 0; i
< (FBC_LL_SIZE
/ 32) + 1; i
++)
139 I915_WRITE(FBC_TAG
+ (i
* 4), 0);
145 fbc_ctl2
= FBC_CTL_FENCE_DBL
| FBC_CTL_IDLE_IMM
| FBC_CTL_CPU_FENCE
;
146 fbc_ctl2
|= FBC_CTL_PLANE(intel_crtc
->plane
);
147 I915_WRITE(FBC_CONTROL2
, fbc_ctl2
);
148 I915_WRITE(FBC_FENCE_OFF
, crtc
->y
);
152 fbc_ctl
= I915_READ(FBC_CONTROL
);
153 fbc_ctl
&= 0x3fff << FBC_CTL_INTERVAL_SHIFT
;
154 fbc_ctl
|= FBC_CTL_EN
| FBC_CTL_PERIODIC
;
156 fbc_ctl
|= FBC_CTL_C3_IDLE
; /* 945 needs special SR handling */
157 fbc_ctl
|= (cfb_pitch
& 0xff) << FBC_CTL_STRIDE_SHIFT
;
158 fbc_ctl
|= obj
->fence_reg
;
159 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
161 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
162 cfb_pitch
, crtc
->y
, plane_name(intel_crtc
->plane
));
165 static bool i8xx_fbc_enabled(struct drm_device
*dev
)
167 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
169 return I915_READ(FBC_CONTROL
) & FBC_CTL_EN
;
172 static void g4x_enable_fbc(struct drm_crtc
*crtc
)
174 struct drm_device
*dev
= crtc
->dev
;
175 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
176 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
177 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
178 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
181 dev_priv
->fbc
.enabled
= true;
183 dpfc_ctl
= DPFC_CTL_PLANE(intel_crtc
->plane
) | DPFC_SR_EN
;
184 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
185 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
187 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
188 dpfc_ctl
|= DPFC_CTL_FENCE_EN
| obj
->fence_reg
;
190 I915_WRITE(DPFC_FENCE_YOFF
, crtc
->y
);
193 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
195 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc
->plane
));
198 static void g4x_disable_fbc(struct drm_device
*dev
)
200 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
203 dev_priv
->fbc
.enabled
= false;
205 /* Disable compression */
206 dpfc_ctl
= I915_READ(DPFC_CONTROL
);
207 if (dpfc_ctl
& DPFC_CTL_EN
) {
208 dpfc_ctl
&= ~DPFC_CTL_EN
;
209 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
);
211 DRM_DEBUG_KMS("disabled FBC\n");
215 static bool g4x_fbc_enabled(struct drm_device
*dev
)
217 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
219 return I915_READ(DPFC_CONTROL
) & DPFC_CTL_EN
;
222 static void sandybridge_blit_fbc_update(struct drm_device
*dev
)
224 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
227 /* Make sure blitter notifies FBC of writes */
229 /* Blitter is part of Media powerwell on VLV. No impact of
230 * his param in other platforms for now */
231 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_MEDIA
);
233 blt_ecoskpd
= I915_READ(GEN6_BLITTER_ECOSKPD
);
234 blt_ecoskpd
|= GEN6_BLITTER_FBC_NOTIFY
<<
235 GEN6_BLITTER_LOCK_SHIFT
;
236 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
237 blt_ecoskpd
|= GEN6_BLITTER_FBC_NOTIFY
;
238 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
239 blt_ecoskpd
&= ~(GEN6_BLITTER_FBC_NOTIFY
<<
240 GEN6_BLITTER_LOCK_SHIFT
);
241 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
242 POSTING_READ(GEN6_BLITTER_ECOSKPD
);
244 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_MEDIA
);
247 static void ironlake_enable_fbc(struct drm_crtc
*crtc
)
249 struct drm_device
*dev
= crtc
->dev
;
250 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
251 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
252 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
253 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
256 dev_priv
->fbc
.enabled
= true;
258 dpfc_ctl
= DPFC_CTL_PLANE(intel_crtc
->plane
);
259 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
260 dev_priv
->fbc
.threshold
++;
262 switch (dev_priv
->fbc
.threshold
) {
265 dpfc_ctl
|= DPFC_CTL_LIMIT_4X
;
268 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
271 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
274 dpfc_ctl
|= DPFC_CTL_FENCE_EN
;
276 dpfc_ctl
|= obj
->fence_reg
;
278 I915_WRITE(ILK_DPFC_FENCE_YOFF
, crtc
->y
);
279 I915_WRITE(ILK_FBC_RT_BASE
, i915_gem_obj_ggtt_offset(obj
) | ILK_FBC_RT_VALID
);
281 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
284 I915_WRITE(SNB_DPFC_CTL_SA
,
285 SNB_CPU_FENCE_ENABLE
| obj
->fence_reg
);
286 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, crtc
->y
);
287 sandybridge_blit_fbc_update(dev
);
290 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc
->plane
));
293 static void ironlake_disable_fbc(struct drm_device
*dev
)
295 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
298 dev_priv
->fbc
.enabled
= false;
300 /* Disable compression */
301 dpfc_ctl
= I915_READ(ILK_DPFC_CONTROL
);
302 if (dpfc_ctl
& DPFC_CTL_EN
) {
303 dpfc_ctl
&= ~DPFC_CTL_EN
;
304 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
);
306 DRM_DEBUG_KMS("disabled FBC\n");
310 static bool ironlake_fbc_enabled(struct drm_device
*dev
)
312 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
314 return I915_READ(ILK_DPFC_CONTROL
) & DPFC_CTL_EN
;
317 static void gen7_enable_fbc(struct drm_crtc
*crtc
)
319 struct drm_device
*dev
= crtc
->dev
;
320 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
321 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
322 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
323 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
326 dev_priv
->fbc
.enabled
= true;
328 dpfc_ctl
= IVB_DPFC_CTL_PLANE(intel_crtc
->plane
);
329 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
330 dev_priv
->fbc
.threshold
++;
332 switch (dev_priv
->fbc
.threshold
) {
335 dpfc_ctl
|= DPFC_CTL_LIMIT_4X
;
338 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
341 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
345 dpfc_ctl
|= IVB_DPFC_CTL_FENCE_EN
;
347 if (dev_priv
->fbc
.false_color
)
348 dpfc_ctl
|= FBC_CTL_FALSE_COLOR
;
350 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
352 if (IS_IVYBRIDGE(dev
)) {
353 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
354 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
355 I915_READ(ILK_DISPLAY_CHICKEN1
) |
358 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
359 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc
->pipe
),
360 I915_READ(CHICKEN_PIPESL_1(intel_crtc
->pipe
)) |
364 I915_WRITE(SNB_DPFC_CTL_SA
,
365 SNB_CPU_FENCE_ENABLE
| obj
->fence_reg
);
366 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, crtc
->y
);
368 sandybridge_blit_fbc_update(dev
);
370 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc
->plane
));
373 bool intel_fbc_enabled(struct drm_device
*dev
)
375 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
377 return dev_priv
->fbc
.enabled
;
380 void bdw_fbc_sw_flush(struct drm_device
*dev
, u32 value
)
382 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
387 if (!intel_fbc_enabled(dev
))
390 I915_WRITE(MSG_FBC_REND_STATE
, value
);
393 static void intel_fbc_work_fn(struct work_struct
*__work
)
395 struct intel_fbc_work
*work
=
396 container_of(to_delayed_work(__work
),
397 struct intel_fbc_work
, work
);
398 struct drm_device
*dev
= work
->crtc
->dev
;
399 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
401 mutex_lock(&dev
->struct_mutex
);
402 if (work
== dev_priv
->fbc
.fbc_work
) {
403 /* Double check that we haven't switched fb without cancelling
406 if (work
->crtc
->primary
->fb
== work
->fb
) {
407 dev_priv
->display
.enable_fbc(work
->crtc
);
409 dev_priv
->fbc
.plane
= to_intel_crtc(work
->crtc
)->plane
;
410 dev_priv
->fbc
.fb_id
= work
->crtc
->primary
->fb
->base
.id
;
411 dev_priv
->fbc
.y
= work
->crtc
->y
;
414 dev_priv
->fbc
.fbc_work
= NULL
;
416 mutex_unlock(&dev
->struct_mutex
);
421 static void intel_cancel_fbc_work(struct drm_i915_private
*dev_priv
)
423 if (dev_priv
->fbc
.fbc_work
== NULL
)
426 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
428 /* Synchronisation is provided by struct_mutex and checking of
429 * dev_priv->fbc.fbc_work, so we can perform the cancellation
430 * entirely asynchronously.
432 if (cancel_delayed_work(&dev_priv
->fbc
.fbc_work
->work
))
433 /* tasklet was killed before being run, clean up */
434 kfree(dev_priv
->fbc
.fbc_work
);
436 /* Mark the work as no longer wanted so that if it does
437 * wake-up (because the work was already running and waiting
438 * for our mutex), it will discover that is no longer
441 dev_priv
->fbc
.fbc_work
= NULL
;
444 static void intel_enable_fbc(struct drm_crtc
*crtc
)
446 struct intel_fbc_work
*work
;
447 struct drm_device
*dev
= crtc
->dev
;
448 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
450 if (!dev_priv
->display
.enable_fbc
)
453 intel_cancel_fbc_work(dev_priv
);
455 work
= kzalloc(sizeof(*work
), GFP_KERNEL
);
457 DRM_ERROR("Failed to allocate FBC work structure\n");
458 dev_priv
->display
.enable_fbc(crtc
);
463 work
->fb
= crtc
->primary
->fb
;
464 INIT_DELAYED_WORK(&work
->work
, intel_fbc_work_fn
);
466 dev_priv
->fbc
.fbc_work
= work
;
468 /* Delay the actual enabling to let pageflipping cease and the
469 * display to settle before starting the compression. Note that
470 * this delay also serves a second purpose: it allows for a
471 * vblank to pass after disabling the FBC before we attempt
472 * to modify the control registers.
474 * A more complicated solution would involve tracking vblanks
475 * following the termination of the page-flipping sequence
476 * and indeed performing the enable as a co-routine and not
477 * waiting synchronously upon the vblank.
479 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
481 schedule_delayed_work(&work
->work
, msecs_to_jiffies(50));
484 void intel_disable_fbc(struct drm_device
*dev
)
486 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
488 intel_cancel_fbc_work(dev_priv
);
490 if (!dev_priv
->display
.disable_fbc
)
493 dev_priv
->display
.disable_fbc(dev
);
494 dev_priv
->fbc
.plane
= -1;
497 static bool set_no_fbc_reason(struct drm_i915_private
*dev_priv
,
498 enum no_fbc_reason reason
)
500 if (dev_priv
->fbc
.no_fbc_reason
== reason
)
503 dev_priv
->fbc
.no_fbc_reason
= reason
;
508 * intel_update_fbc - enable/disable FBC as needed
509 * @dev: the drm_device
511 * Set up the framebuffer compression hardware at mode set time. We
512 * enable it if possible:
513 * - plane A only (on pre-965)
514 * - no pixel mulitply/line duplication
515 * - no alpha buffer discard
517 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
519 * We can't assume that any compression will take place (worst case),
520 * so the compressed buffer has to be the same size as the uncompressed
521 * one. It also must reside (along with the line length buffer) in
524 * We need to enable/disable FBC on a global basis.
526 void intel_update_fbc(struct drm_device
*dev
)
528 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
529 struct drm_crtc
*crtc
= NULL
, *tmp_crtc
;
530 struct intel_crtc
*intel_crtc
;
531 struct drm_framebuffer
*fb
;
532 struct drm_i915_gem_object
*obj
;
533 const struct drm_display_mode
*adjusted_mode
;
534 unsigned int max_width
, max_height
;
537 set_no_fbc_reason(dev_priv
, FBC_UNSUPPORTED
);
541 if (!i915
.powersave
) {
542 if (set_no_fbc_reason(dev_priv
, FBC_MODULE_PARAM
))
543 DRM_DEBUG_KMS("fbc disabled per module param\n");
548 * If FBC is already on, we just have to verify that we can
549 * keep it that way...
550 * Need to disable if:
551 * - more than one pipe is active
552 * - changing FBC params (stride, fence, mode)
553 * - new fb is too large to fit in compressed buffer
554 * - going to an unsupported config (interlace, pixel multiply, etc.)
556 for_each_crtc(dev
, tmp_crtc
) {
557 if (intel_crtc_active(tmp_crtc
) &&
558 to_intel_crtc(tmp_crtc
)->primary_enabled
) {
560 if (set_no_fbc_reason(dev_priv
, FBC_MULTIPLE_PIPES
))
561 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
568 if (!crtc
|| crtc
->primary
->fb
== NULL
) {
569 if (set_no_fbc_reason(dev_priv
, FBC_NO_OUTPUT
))
570 DRM_DEBUG_KMS("no output, disabling\n");
574 intel_crtc
= to_intel_crtc(crtc
);
575 fb
= crtc
->primary
->fb
;
576 obj
= intel_fb_obj(fb
);
577 adjusted_mode
= &intel_crtc
->config
.adjusted_mode
;
579 if (i915
.enable_fbc
< 0) {
580 if (set_no_fbc_reason(dev_priv
, FBC_CHIP_DEFAULT
))
581 DRM_DEBUG_KMS("disabled per chip default\n");
584 if (!i915
.enable_fbc
) {
585 if (set_no_fbc_reason(dev_priv
, FBC_MODULE_PARAM
))
586 DRM_DEBUG_KMS("fbc disabled per module param\n");
589 if ((adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
590 (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLSCAN
)) {
591 if (set_no_fbc_reason(dev_priv
, FBC_UNSUPPORTED_MODE
))
592 DRM_DEBUG_KMS("mode incompatible with compression, "
597 if (INTEL_INFO(dev
)->gen
>= 8 || IS_HASWELL(dev
)) {
600 } else if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
607 if (intel_crtc
->config
.pipe_src_w
> max_width
||
608 intel_crtc
->config
.pipe_src_h
> max_height
) {
609 if (set_no_fbc_reason(dev_priv
, FBC_MODE_TOO_LARGE
))
610 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
613 if ((INTEL_INFO(dev
)->gen
< 4 || HAS_DDI(dev
)) &&
614 intel_crtc
->plane
!= PLANE_A
) {
615 if (set_no_fbc_reason(dev_priv
, FBC_BAD_PLANE
))
616 DRM_DEBUG_KMS("plane not A, disabling compression\n");
620 /* The use of a CPU fence is mandatory in order to detect writes
621 * by the CPU to the scanout and trigger updates to the FBC.
623 if (obj
->tiling_mode
!= I915_TILING_X
||
624 obj
->fence_reg
== I915_FENCE_REG_NONE
) {
625 if (set_no_fbc_reason(dev_priv
, FBC_NOT_TILED
))
626 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
629 if (INTEL_INFO(dev
)->gen
<= 4 && !IS_G4X(dev
) &&
630 to_intel_plane(crtc
->primary
)->rotation
!= BIT(DRM_ROTATE_0
)) {
631 if (set_no_fbc_reason(dev_priv
, FBC_UNSUPPORTED_MODE
))
632 DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
636 /* If the kernel debugger is active, always disable compression */
640 if (i915_gem_stolen_setup_compression(dev
, obj
->base
.size
,
641 drm_format_plane_cpp(fb
->pixel_format
, 0))) {
642 if (set_no_fbc_reason(dev_priv
, FBC_STOLEN_TOO_SMALL
))
643 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
647 /* If the scanout has not changed, don't modify the FBC settings.
648 * Note that we make the fundamental assumption that the fb->obj
649 * cannot be unpinned (and have its GTT offset and fence revoked)
650 * without first being decoupled from the scanout and FBC disabled.
652 if (dev_priv
->fbc
.plane
== intel_crtc
->plane
&&
653 dev_priv
->fbc
.fb_id
== fb
->base
.id
&&
654 dev_priv
->fbc
.y
== crtc
->y
)
657 if (intel_fbc_enabled(dev
)) {
658 /* We update FBC along two paths, after changing fb/crtc
659 * configuration (modeswitching) and after page-flipping
660 * finishes. For the latter, we know that not only did
661 * we disable the FBC at the start of the page-flip
662 * sequence, but also more than one vblank has passed.
664 * For the former case of modeswitching, it is possible
665 * to switch between two FBC valid configurations
666 * instantaneously so we do need to disable the FBC
667 * before we can modify its control registers. We also
668 * have to wait for the next vblank for that to take
669 * effect. However, since we delay enabling FBC we can
670 * assume that a vblank has passed since disabling and
671 * that we can safely alter the registers in the deferred
674 * In the scenario that we go from a valid to invalid
675 * and then back to valid FBC configuration we have
676 * no strict enforcement that a vblank occurred since
677 * disabling the FBC. However, along all current pipe
678 * disabling paths we do need to wait for a vblank at
679 * some point. And we wait before enabling FBC anyway.
681 DRM_DEBUG_KMS("disabling active FBC for update\n");
682 intel_disable_fbc(dev
);
685 intel_enable_fbc(crtc
);
686 dev_priv
->fbc
.no_fbc_reason
= FBC_OK
;
690 /* Multiple disables should be harmless */
691 if (intel_fbc_enabled(dev
)) {
692 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
693 intel_disable_fbc(dev
);
695 i915_gem_stolen_cleanup_compression(dev
);
698 static void i915_pineview_get_mem_freq(struct drm_device
*dev
)
700 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
703 tmp
= I915_READ(CLKCFG
);
705 switch (tmp
& CLKCFG_FSB_MASK
) {
707 dev_priv
->fsb_freq
= 533; /* 133*4 */
710 dev_priv
->fsb_freq
= 800; /* 200*4 */
713 dev_priv
->fsb_freq
= 667; /* 167*4 */
716 dev_priv
->fsb_freq
= 400; /* 100*4 */
720 switch (tmp
& CLKCFG_MEM_MASK
) {
722 dev_priv
->mem_freq
= 533;
725 dev_priv
->mem_freq
= 667;
728 dev_priv
->mem_freq
= 800;
732 /* detect pineview DDR3 setting */
733 tmp
= I915_READ(CSHRDDR3CTL
);
734 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
737 static void i915_ironlake_get_mem_freq(struct drm_device
*dev
)
739 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
742 ddrpll
= I915_READ16(DDRMPLL1
);
743 csipll
= I915_READ16(CSIPLL0
);
745 switch (ddrpll
& 0xff) {
747 dev_priv
->mem_freq
= 800;
750 dev_priv
->mem_freq
= 1066;
753 dev_priv
->mem_freq
= 1333;
756 dev_priv
->mem_freq
= 1600;
759 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
761 dev_priv
->mem_freq
= 0;
765 dev_priv
->ips
.r_t
= dev_priv
->mem_freq
;
767 switch (csipll
& 0x3ff) {
769 dev_priv
->fsb_freq
= 3200;
772 dev_priv
->fsb_freq
= 3733;
775 dev_priv
->fsb_freq
= 4266;
778 dev_priv
->fsb_freq
= 4800;
781 dev_priv
->fsb_freq
= 5333;
784 dev_priv
->fsb_freq
= 5866;
787 dev_priv
->fsb_freq
= 6400;
790 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
792 dev_priv
->fsb_freq
= 0;
796 if (dev_priv
->fsb_freq
== 3200) {
797 dev_priv
->ips
.c_m
= 0;
798 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
799 dev_priv
->ips
.c_m
= 1;
801 dev_priv
->ips
.c_m
= 2;
805 static const struct cxsr_latency cxsr_latency_table
[] = {
806 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
807 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
808 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
809 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
810 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
812 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
813 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
814 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
815 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
816 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
818 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
819 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
820 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
821 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
822 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
824 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
825 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
826 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
827 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
828 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
830 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
831 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
832 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
833 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
834 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
836 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
837 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
838 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
839 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
840 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
843 static const struct cxsr_latency
*intel_get_cxsr_latency(int is_desktop
,
848 const struct cxsr_latency
*latency
;
851 if (fsb
== 0 || mem
== 0)
854 for (i
= 0; i
< ARRAY_SIZE(cxsr_latency_table
); i
++) {
855 latency
= &cxsr_latency_table
[i
];
856 if (is_desktop
== latency
->is_desktop
&&
857 is_ddr3
== latency
->is_ddr3
&&
858 fsb
== latency
->fsb_freq
&& mem
== latency
->mem_freq
)
862 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
867 void intel_set_memory_cxsr(struct drm_i915_private
*dev_priv
, bool enable
)
869 struct drm_device
*dev
= dev_priv
->dev
;
872 if (IS_VALLEYVIEW(dev
)) {
873 I915_WRITE(FW_BLC_SELF_VLV
, enable
? FW_CSPWRDWNEN
: 0);
874 } else if (IS_G4X(dev
) || IS_CRESTLINE(dev
)) {
875 I915_WRITE(FW_BLC_SELF
, enable
? FW_BLC_SELF_EN
: 0);
876 } else if (IS_PINEVIEW(dev
)) {
877 val
= I915_READ(DSPFW3
) & ~PINEVIEW_SELF_REFRESH_EN
;
878 val
|= enable
? PINEVIEW_SELF_REFRESH_EN
: 0;
879 I915_WRITE(DSPFW3
, val
);
880 } else if (IS_I945G(dev
) || IS_I945GM(dev
)) {
881 val
= enable
? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN
) :
882 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN
);
883 I915_WRITE(FW_BLC_SELF
, val
);
884 } else if (IS_I915GM(dev
)) {
885 val
= enable
? _MASKED_BIT_ENABLE(INSTPM_SELF_EN
) :
886 _MASKED_BIT_DISABLE(INSTPM_SELF_EN
);
887 I915_WRITE(INSTPM
, val
);
892 DRM_DEBUG_KMS("memory self-refresh is %s\n",
893 enable
? "enabled" : "disabled");
897 * Latency for FIFO fetches is dependent on several factors:
898 * - memory configuration (speed, channels)
900 * - current MCH state
901 * It can be fairly high in some situations, so here we assume a fairly
902 * pessimal value. It's a tradeoff between extra memory fetches (if we
903 * set this value too high, the FIFO will fetch frequently to stay full)
904 * and power consumption (set it too low to save power and we might see
905 * FIFO underruns and display "flicker").
907 * A value of 5us seems to be a good balance; safe for very low end
908 * platforms but not overly aggressive on lower latency configs.
910 static const int pessimal_latency_ns
= 5000;
912 static int i9xx_get_fifo_size(struct drm_device
*dev
, int plane
)
914 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
915 uint32_t dsparb
= I915_READ(DSPARB
);
918 size
= dsparb
& 0x7f;
920 size
= ((dsparb
>> DSPARB_CSTART_SHIFT
) & 0x7f) - size
;
922 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
923 plane
? "B" : "A", size
);
928 static int i830_get_fifo_size(struct drm_device
*dev
, int plane
)
930 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
931 uint32_t dsparb
= I915_READ(DSPARB
);
934 size
= dsparb
& 0x1ff;
936 size
= ((dsparb
>> DSPARB_BEND_SHIFT
) & 0x1ff) - size
;
937 size
>>= 1; /* Convert to cachelines */
939 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
940 plane
? "B" : "A", size
);
945 static int i845_get_fifo_size(struct drm_device
*dev
, int plane
)
947 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
948 uint32_t dsparb
= I915_READ(DSPARB
);
951 size
= dsparb
& 0x7f;
952 size
>>= 2; /* Convert to cachelines */
954 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
961 /* Pineview has different values for various configs */
962 static const struct intel_watermark_params pineview_display_wm
= {
963 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
964 .max_wm
= PINEVIEW_MAX_WM
,
965 .default_wm
= PINEVIEW_DFT_WM
,
966 .guard_size
= PINEVIEW_GUARD_WM
,
967 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
969 static const struct intel_watermark_params pineview_display_hplloff_wm
= {
970 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
971 .max_wm
= PINEVIEW_MAX_WM
,
972 .default_wm
= PINEVIEW_DFT_HPLLOFF_WM
,
973 .guard_size
= PINEVIEW_GUARD_WM
,
974 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
976 static const struct intel_watermark_params pineview_cursor_wm
= {
977 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
978 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
979 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
980 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
981 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
983 static const struct intel_watermark_params pineview_cursor_hplloff_wm
= {
984 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
985 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
986 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
987 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
988 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
990 static const struct intel_watermark_params g4x_wm_info
= {
991 .fifo_size
= G4X_FIFO_SIZE
,
992 .max_wm
= G4X_MAX_WM
,
993 .default_wm
= G4X_MAX_WM
,
995 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
997 static const struct intel_watermark_params g4x_cursor_wm_info
= {
998 .fifo_size
= I965_CURSOR_FIFO
,
999 .max_wm
= I965_CURSOR_MAX_WM
,
1000 .default_wm
= I965_CURSOR_DFT_WM
,
1002 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
1004 static const struct intel_watermark_params valleyview_wm_info
= {
1005 .fifo_size
= VALLEYVIEW_FIFO_SIZE
,
1006 .max_wm
= VALLEYVIEW_MAX_WM
,
1007 .default_wm
= VALLEYVIEW_MAX_WM
,
1009 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
1011 static const struct intel_watermark_params valleyview_cursor_wm_info
= {
1012 .fifo_size
= I965_CURSOR_FIFO
,
1013 .max_wm
= VALLEYVIEW_CURSOR_MAX_WM
,
1014 .default_wm
= I965_CURSOR_DFT_WM
,
1016 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
1018 static const struct intel_watermark_params i965_cursor_wm_info
= {
1019 .fifo_size
= I965_CURSOR_FIFO
,
1020 .max_wm
= I965_CURSOR_MAX_WM
,
1021 .default_wm
= I965_CURSOR_DFT_WM
,
1023 .cacheline_size
= I915_FIFO_LINE_SIZE
,
1025 static const struct intel_watermark_params i945_wm_info
= {
1026 .fifo_size
= I945_FIFO_SIZE
,
1027 .max_wm
= I915_MAX_WM
,
1030 .cacheline_size
= I915_FIFO_LINE_SIZE
,
1032 static const struct intel_watermark_params i915_wm_info
= {
1033 .fifo_size
= I915_FIFO_SIZE
,
1034 .max_wm
= I915_MAX_WM
,
1037 .cacheline_size
= I915_FIFO_LINE_SIZE
,
1039 static const struct intel_watermark_params i830_a_wm_info
= {
1040 .fifo_size
= I855GM_FIFO_SIZE
,
1041 .max_wm
= I915_MAX_WM
,
1044 .cacheline_size
= I830_FIFO_LINE_SIZE
,
1046 static const struct intel_watermark_params i830_bc_wm_info
= {
1047 .fifo_size
= I855GM_FIFO_SIZE
,
1048 .max_wm
= I915_MAX_WM
/2,
1051 .cacheline_size
= I830_FIFO_LINE_SIZE
,
1053 static const struct intel_watermark_params i845_wm_info
= {
1054 .fifo_size
= I830_FIFO_SIZE
,
1055 .max_wm
= I915_MAX_WM
,
1058 .cacheline_size
= I830_FIFO_LINE_SIZE
,
1062 * intel_calculate_wm - calculate watermark level
1063 * @clock_in_khz: pixel clock
1064 * @wm: chip FIFO params
1065 * @pixel_size: display pixel size
1066 * @latency_ns: memory latency for the platform
1068 * Calculate the watermark level (the level at which the display plane will
1069 * start fetching from memory again). Each chip has a different display
1070 * FIFO size and allocation, so the caller needs to figure that out and pass
1071 * in the correct intel_watermark_params structure.
1073 * As the pixel clock runs, the FIFO will be drained at a rate that depends
1074 * on the pixel size. When it reaches the watermark level, it'll start
1075 * fetching FIFO line sized based chunks from memory until the FIFO fills
1076 * past the watermark point. If the FIFO drains completely, a FIFO underrun
1077 * will occur, and a display engine hang could result.
1079 static unsigned long intel_calculate_wm(unsigned long clock_in_khz
,
1080 const struct intel_watermark_params
*wm
,
1083 unsigned long latency_ns
)
1085 long entries_required
, wm_size
;
1088 * Note: we need to make sure we don't overflow for various clock &
1090 * clocks go from a few thousand to several hundred thousand.
1091 * latency is usually a few thousand
1093 entries_required
= ((clock_in_khz
/ 1000) * pixel_size
* latency_ns
) /
1095 entries_required
= DIV_ROUND_UP(entries_required
, wm
->cacheline_size
);
1097 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required
);
1099 wm_size
= fifo_size
- (entries_required
+ wm
->guard_size
);
1101 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size
);
1103 /* Don't promote wm_size to unsigned... */
1104 if (wm_size
> (long)wm
->max_wm
)
1105 wm_size
= wm
->max_wm
;
1107 wm_size
= wm
->default_wm
;
1110 * Bspec seems to indicate that the value shouldn't be lower than
1111 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
1112 * Lets go for 8 which is the burst size since certain platforms
1113 * already use a hardcoded 8 (which is what the spec says should be
1122 static struct drm_crtc
*single_enabled_crtc(struct drm_device
*dev
)
1124 struct drm_crtc
*crtc
, *enabled
= NULL
;
1126 for_each_crtc(dev
, crtc
) {
1127 if (intel_crtc_active(crtc
)) {
1137 static void pineview_update_wm(struct drm_crtc
*unused_crtc
)
1139 struct drm_device
*dev
= unused_crtc
->dev
;
1140 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1141 struct drm_crtc
*crtc
;
1142 const struct cxsr_latency
*latency
;
1146 latency
= intel_get_cxsr_latency(IS_PINEVIEW_G(dev
), dev_priv
->is_ddr3
,
1147 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
1149 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1150 intel_set_memory_cxsr(dev_priv
, false);
1154 crtc
= single_enabled_crtc(dev
);
1156 const struct drm_display_mode
*adjusted_mode
;
1157 int pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1160 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1161 clock
= adjusted_mode
->crtc_clock
;
1164 wm
= intel_calculate_wm(clock
, &pineview_display_wm
,
1165 pineview_display_wm
.fifo_size
,
1166 pixel_size
, latency
->display_sr
);
1167 reg
= I915_READ(DSPFW1
);
1168 reg
&= ~DSPFW_SR_MASK
;
1169 reg
|= wm
<< DSPFW_SR_SHIFT
;
1170 I915_WRITE(DSPFW1
, reg
);
1171 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg
);
1174 wm
= intel_calculate_wm(clock
, &pineview_cursor_wm
,
1175 pineview_display_wm
.fifo_size
,
1176 pixel_size
, latency
->cursor_sr
);
1177 reg
= I915_READ(DSPFW3
);
1178 reg
&= ~DSPFW_CURSOR_SR_MASK
;
1179 reg
|= (wm
& 0x3f) << DSPFW_CURSOR_SR_SHIFT
;
1180 I915_WRITE(DSPFW3
, reg
);
1182 /* Display HPLL off SR */
1183 wm
= intel_calculate_wm(clock
, &pineview_display_hplloff_wm
,
1184 pineview_display_hplloff_wm
.fifo_size
,
1185 pixel_size
, latency
->display_hpll_disable
);
1186 reg
= I915_READ(DSPFW3
);
1187 reg
&= ~DSPFW_HPLL_SR_MASK
;
1188 reg
|= wm
& DSPFW_HPLL_SR_MASK
;
1189 I915_WRITE(DSPFW3
, reg
);
1191 /* cursor HPLL off SR */
1192 wm
= intel_calculate_wm(clock
, &pineview_cursor_hplloff_wm
,
1193 pineview_display_hplloff_wm
.fifo_size
,
1194 pixel_size
, latency
->cursor_hpll_disable
);
1195 reg
= I915_READ(DSPFW3
);
1196 reg
&= ~DSPFW_HPLL_CURSOR_MASK
;
1197 reg
|= (wm
& 0x3f) << DSPFW_HPLL_CURSOR_SHIFT
;
1198 I915_WRITE(DSPFW3
, reg
);
1199 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg
);
1201 intel_set_memory_cxsr(dev_priv
, true);
1203 intel_set_memory_cxsr(dev_priv
, false);
1207 static bool g4x_compute_wm0(struct drm_device
*dev
,
1209 const struct intel_watermark_params
*display
,
1210 int display_latency_ns
,
1211 const struct intel_watermark_params
*cursor
,
1212 int cursor_latency_ns
,
1216 struct drm_crtc
*crtc
;
1217 const struct drm_display_mode
*adjusted_mode
;
1218 int htotal
, hdisplay
, clock
, pixel_size
;
1219 int line_time_us
, line_count
;
1220 int entries
, tlb_miss
;
1222 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1223 if (!intel_crtc_active(crtc
)) {
1224 *cursor_wm
= cursor
->guard_size
;
1225 *plane_wm
= display
->guard_size
;
1229 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1230 clock
= adjusted_mode
->crtc_clock
;
1231 htotal
= adjusted_mode
->crtc_htotal
;
1232 hdisplay
= to_intel_crtc(crtc
)->config
.pipe_src_w
;
1233 pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1235 /* Use the small buffer method to calculate plane watermark */
1236 entries
= ((clock
* pixel_size
/ 1000) * display_latency_ns
) / 1000;
1237 tlb_miss
= display
->fifo_size
*display
->cacheline_size
- hdisplay
* 8;
1239 entries
+= tlb_miss
;
1240 entries
= DIV_ROUND_UP(entries
, display
->cacheline_size
);
1241 *plane_wm
= entries
+ display
->guard_size
;
1242 if (*plane_wm
> (int)display
->max_wm
)
1243 *plane_wm
= display
->max_wm
;
1245 /* Use the large buffer method to calculate cursor watermark */
1246 line_time_us
= max(htotal
* 1000 / clock
, 1);
1247 line_count
= (cursor_latency_ns
/ line_time_us
+ 1000) / 1000;
1248 entries
= line_count
* to_intel_crtc(crtc
)->cursor_width
* pixel_size
;
1249 tlb_miss
= cursor
->fifo_size
*cursor
->cacheline_size
- hdisplay
* 8;
1251 entries
+= tlb_miss
;
1252 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1253 *cursor_wm
= entries
+ cursor
->guard_size
;
1254 if (*cursor_wm
> (int)cursor
->max_wm
)
1255 *cursor_wm
= (int)cursor
->max_wm
;
1261 * Check the wm result.
1263 * If any calculated watermark values is larger than the maximum value that
1264 * can be programmed into the associated watermark register, that watermark
1267 static bool g4x_check_srwm(struct drm_device
*dev
,
1268 int display_wm
, int cursor_wm
,
1269 const struct intel_watermark_params
*display
,
1270 const struct intel_watermark_params
*cursor
)
1272 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1273 display_wm
, cursor_wm
);
1275 if (display_wm
> display
->max_wm
) {
1276 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1277 display_wm
, display
->max_wm
);
1281 if (cursor_wm
> cursor
->max_wm
) {
1282 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1283 cursor_wm
, cursor
->max_wm
);
1287 if (!(display_wm
|| cursor_wm
)) {
1288 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1295 static bool g4x_compute_srwm(struct drm_device
*dev
,
1298 const struct intel_watermark_params
*display
,
1299 const struct intel_watermark_params
*cursor
,
1300 int *display_wm
, int *cursor_wm
)
1302 struct drm_crtc
*crtc
;
1303 const struct drm_display_mode
*adjusted_mode
;
1304 int hdisplay
, htotal
, pixel_size
, clock
;
1305 unsigned long line_time_us
;
1306 int line_count
, line_size
;
1311 *display_wm
= *cursor_wm
= 0;
1315 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1316 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1317 clock
= adjusted_mode
->crtc_clock
;
1318 htotal
= adjusted_mode
->crtc_htotal
;
1319 hdisplay
= to_intel_crtc(crtc
)->config
.pipe_src_w
;
1320 pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1322 line_time_us
= max(htotal
* 1000 / clock
, 1);
1323 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
1324 line_size
= hdisplay
* pixel_size
;
1326 /* Use the minimum of the small and large buffer method for primary */
1327 small
= ((clock
* pixel_size
/ 1000) * latency_ns
) / 1000;
1328 large
= line_count
* line_size
;
1330 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
1331 *display_wm
= entries
+ display
->guard_size
;
1333 /* calculate the self-refresh watermark for display cursor */
1334 entries
= line_count
* pixel_size
* to_intel_crtc(crtc
)->cursor_width
;
1335 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1336 *cursor_wm
= entries
+ cursor
->guard_size
;
1338 return g4x_check_srwm(dev
,
1339 *display_wm
, *cursor_wm
,
1343 static bool vlv_compute_drain_latency(struct drm_crtc
*crtc
,
1348 struct drm_device
*dev
= crtc
->dev
;
1350 int clock
= to_intel_crtc(crtc
)->config
.adjusted_mode
.crtc_clock
;
1352 if (WARN(clock
== 0, "Pixel clock is zero!\n"))
1355 if (WARN(pixel_size
== 0, "Pixel size is zero!\n"))
1358 entries
= DIV_ROUND_UP(clock
, 1000) * pixel_size
;
1359 if (IS_CHERRYVIEW(dev
))
1360 *prec_mult
= (entries
> 128) ? DRAIN_LATENCY_PRECISION_32
:
1361 DRAIN_LATENCY_PRECISION_16
;
1363 *prec_mult
= (entries
> 128) ? DRAIN_LATENCY_PRECISION_64
:
1364 DRAIN_LATENCY_PRECISION_32
;
1365 *drain_latency
= (64 * (*prec_mult
) * 4) / entries
;
1367 if (*drain_latency
> DRAIN_LATENCY_MASK
)
1368 *drain_latency
= DRAIN_LATENCY_MASK
;
1374 * Update drain latency registers of memory arbiter
1376 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1377 * to be programmed. Each plane has a drain latency multiplier and a drain
1381 static void vlv_update_drain_latency(struct drm_crtc
*crtc
)
1383 struct drm_device
*dev
= crtc
->dev
;
1384 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1385 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1388 enum pipe pipe
= intel_crtc
->pipe
;
1389 int plane_prec
, prec_mult
, plane_dl
;
1390 const int high_precision
= IS_CHERRYVIEW(dev
) ?
1391 DRAIN_LATENCY_PRECISION_32
: DRAIN_LATENCY_PRECISION_64
;
1393 plane_dl
= I915_READ(VLV_DDL(pipe
)) & ~(DDL_PLANE_PRECISION_HIGH
|
1394 DRAIN_LATENCY_MASK
| DDL_CURSOR_PRECISION_HIGH
|
1395 (DRAIN_LATENCY_MASK
<< DDL_CURSOR_SHIFT
));
1397 if (!intel_crtc_active(crtc
)) {
1398 I915_WRITE(VLV_DDL(pipe
), plane_dl
);
1402 /* Primary plane Drain Latency */
1403 pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8; /* BPP */
1404 if (vlv_compute_drain_latency(crtc
, pixel_size
, &prec_mult
, &drain_latency
)) {
1405 plane_prec
= (prec_mult
== high_precision
) ?
1406 DDL_PLANE_PRECISION_HIGH
:
1407 DDL_PLANE_PRECISION_LOW
;
1408 plane_dl
|= plane_prec
| drain_latency
;
1411 /* Cursor Drain Latency
1412 * BPP is always 4 for cursor
1416 /* Program cursor DL only if it is enabled */
1417 if (intel_crtc
->cursor_base
&&
1418 vlv_compute_drain_latency(crtc
, pixel_size
, &prec_mult
, &drain_latency
)) {
1419 plane_prec
= (prec_mult
== high_precision
) ?
1420 DDL_CURSOR_PRECISION_HIGH
:
1421 DDL_CURSOR_PRECISION_LOW
;
1422 plane_dl
|= plane_prec
| (drain_latency
<< DDL_CURSOR_SHIFT
);
1425 I915_WRITE(VLV_DDL(pipe
), plane_dl
);
1428 #define single_plane_enabled(mask) is_power_of_2(mask)
1430 static void valleyview_update_wm(struct drm_crtc
*crtc
)
1432 struct drm_device
*dev
= crtc
->dev
;
1433 static const int sr_latency_ns
= 12000;
1434 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1435 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1436 int plane_sr
, cursor_sr
;
1437 int ignore_plane_sr
, ignore_cursor_sr
;
1438 unsigned int enabled
= 0;
1441 vlv_update_drain_latency(crtc
);
1443 if (g4x_compute_wm0(dev
, PIPE_A
,
1444 &valleyview_wm_info
, pessimal_latency_ns
,
1445 &valleyview_cursor_wm_info
, pessimal_latency_ns
,
1446 &planea_wm
, &cursora_wm
))
1447 enabled
|= 1 << PIPE_A
;
1449 if (g4x_compute_wm0(dev
, PIPE_B
,
1450 &valleyview_wm_info
, pessimal_latency_ns
,
1451 &valleyview_cursor_wm_info
, pessimal_latency_ns
,
1452 &planeb_wm
, &cursorb_wm
))
1453 enabled
|= 1 << PIPE_B
;
1455 if (single_plane_enabled(enabled
) &&
1456 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1458 &valleyview_wm_info
,
1459 &valleyview_cursor_wm_info
,
1460 &plane_sr
, &ignore_cursor_sr
) &&
1461 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1463 &valleyview_wm_info
,
1464 &valleyview_cursor_wm_info
,
1465 &ignore_plane_sr
, &cursor_sr
)) {
1466 cxsr_enabled
= true;
1468 cxsr_enabled
= false;
1469 intel_set_memory_cxsr(dev_priv
, false);
1470 plane_sr
= cursor_sr
= 0;
1473 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1474 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1475 planea_wm
, cursora_wm
,
1476 planeb_wm
, cursorb_wm
,
1477 plane_sr
, cursor_sr
);
1480 (plane_sr
<< DSPFW_SR_SHIFT
) |
1481 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1482 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1483 (planea_wm
<< DSPFW_PLANEA_SHIFT
));
1485 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1486 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1488 (I915_READ(DSPFW3
) & ~DSPFW_CURSOR_SR_MASK
) |
1489 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1492 intel_set_memory_cxsr(dev_priv
, true);
1495 static void cherryview_update_wm(struct drm_crtc
*crtc
)
1497 struct drm_device
*dev
= crtc
->dev
;
1498 static const int sr_latency_ns
= 12000;
1499 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1500 int planea_wm
, planeb_wm
, planec_wm
;
1501 int cursora_wm
, cursorb_wm
, cursorc_wm
;
1502 int plane_sr
, cursor_sr
;
1503 int ignore_plane_sr
, ignore_cursor_sr
;
1504 unsigned int enabled
= 0;
1507 vlv_update_drain_latency(crtc
);
1509 if (g4x_compute_wm0(dev
, PIPE_A
,
1510 &valleyview_wm_info
, pessimal_latency_ns
,
1511 &valleyview_cursor_wm_info
, pessimal_latency_ns
,
1512 &planea_wm
, &cursora_wm
))
1513 enabled
|= 1 << PIPE_A
;
1515 if (g4x_compute_wm0(dev
, PIPE_B
,
1516 &valleyview_wm_info
, pessimal_latency_ns
,
1517 &valleyview_cursor_wm_info
, pessimal_latency_ns
,
1518 &planeb_wm
, &cursorb_wm
))
1519 enabled
|= 1 << PIPE_B
;
1521 if (g4x_compute_wm0(dev
, PIPE_C
,
1522 &valleyview_wm_info
, pessimal_latency_ns
,
1523 &valleyview_cursor_wm_info
, pessimal_latency_ns
,
1524 &planec_wm
, &cursorc_wm
))
1525 enabled
|= 1 << PIPE_C
;
1527 if (single_plane_enabled(enabled
) &&
1528 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1530 &valleyview_wm_info
,
1531 &valleyview_cursor_wm_info
,
1532 &plane_sr
, &ignore_cursor_sr
) &&
1533 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1535 &valleyview_wm_info
,
1536 &valleyview_cursor_wm_info
,
1537 &ignore_plane_sr
, &cursor_sr
)) {
1538 cxsr_enabled
= true;
1540 cxsr_enabled
= false;
1541 intel_set_memory_cxsr(dev_priv
, false);
1542 plane_sr
= cursor_sr
= 0;
1545 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1546 "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
1547 "SR: plane=%d, cursor=%d\n",
1548 planea_wm
, cursora_wm
,
1549 planeb_wm
, cursorb_wm
,
1550 planec_wm
, cursorc_wm
,
1551 plane_sr
, cursor_sr
);
1554 (plane_sr
<< DSPFW_SR_SHIFT
) |
1555 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1556 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1557 (planea_wm
<< DSPFW_PLANEA_SHIFT
));
1559 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1560 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1562 (I915_READ(DSPFW3
) & ~DSPFW_CURSOR_SR_MASK
) |
1563 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1564 I915_WRITE(DSPFW9_CHV
,
1565 (I915_READ(DSPFW9_CHV
) & ~(DSPFW_PLANEC_MASK
|
1566 DSPFW_CURSORC_MASK
)) |
1567 (planec_wm
<< DSPFW_PLANEC_SHIFT
) |
1568 (cursorc_wm
<< DSPFW_CURSORC_SHIFT
));
1571 intel_set_memory_cxsr(dev_priv
, true);
1574 static void valleyview_update_sprite_wm(struct drm_plane
*plane
,
1575 struct drm_crtc
*crtc
,
1576 uint32_t sprite_width
,
1577 uint32_t sprite_height
,
1579 bool enabled
, bool scaled
)
1581 struct drm_device
*dev
= crtc
->dev
;
1582 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1583 int pipe
= to_intel_plane(plane
)->pipe
;
1584 int sprite
= to_intel_plane(plane
)->plane
;
1589 const int high_precision
= IS_CHERRYVIEW(dev
) ?
1590 DRAIN_LATENCY_PRECISION_32
: DRAIN_LATENCY_PRECISION_64
;
1592 sprite_dl
= I915_READ(VLV_DDL(pipe
)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite
) |
1593 (DRAIN_LATENCY_MASK
<< DDL_SPRITE_SHIFT(sprite
)));
1595 if (enabled
&& vlv_compute_drain_latency(crtc
, pixel_size
, &prec_mult
,
1597 plane_prec
= (prec_mult
== high_precision
) ?
1598 DDL_SPRITE_PRECISION_HIGH(sprite
) :
1599 DDL_SPRITE_PRECISION_LOW(sprite
);
1600 sprite_dl
|= plane_prec
|
1601 (drain_latency
<< DDL_SPRITE_SHIFT(sprite
));
1604 I915_WRITE(VLV_DDL(pipe
), sprite_dl
);
1607 static void g4x_update_wm(struct drm_crtc
*crtc
)
1609 struct drm_device
*dev
= crtc
->dev
;
1610 static const int sr_latency_ns
= 12000;
1611 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1612 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1613 int plane_sr
, cursor_sr
;
1614 unsigned int enabled
= 0;
1617 if (g4x_compute_wm0(dev
, PIPE_A
,
1618 &g4x_wm_info
, pessimal_latency_ns
,
1619 &g4x_cursor_wm_info
, pessimal_latency_ns
,
1620 &planea_wm
, &cursora_wm
))
1621 enabled
|= 1 << PIPE_A
;
1623 if (g4x_compute_wm0(dev
, PIPE_B
,
1624 &g4x_wm_info
, pessimal_latency_ns
,
1625 &g4x_cursor_wm_info
, pessimal_latency_ns
,
1626 &planeb_wm
, &cursorb_wm
))
1627 enabled
|= 1 << PIPE_B
;
1629 if (single_plane_enabled(enabled
) &&
1630 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1633 &g4x_cursor_wm_info
,
1634 &plane_sr
, &cursor_sr
)) {
1635 cxsr_enabled
= true;
1637 cxsr_enabled
= false;
1638 intel_set_memory_cxsr(dev_priv
, false);
1639 plane_sr
= cursor_sr
= 0;
1642 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1643 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1644 planea_wm
, cursora_wm
,
1645 planeb_wm
, cursorb_wm
,
1646 plane_sr
, cursor_sr
);
1649 (plane_sr
<< DSPFW_SR_SHIFT
) |
1650 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1651 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1652 (planea_wm
<< DSPFW_PLANEA_SHIFT
));
1654 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1655 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1656 /* HPLL off in SR has some issues on G4x... disable it */
1658 (I915_READ(DSPFW3
) & ~(DSPFW_HPLL_SR_EN
| DSPFW_CURSOR_SR_MASK
)) |
1659 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1662 intel_set_memory_cxsr(dev_priv
, true);
1665 static void i965_update_wm(struct drm_crtc
*unused_crtc
)
1667 struct drm_device
*dev
= unused_crtc
->dev
;
1668 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1669 struct drm_crtc
*crtc
;
1674 /* Calc sr entries for one plane configs */
1675 crtc
= single_enabled_crtc(dev
);
1677 /* self-refresh has much higher latency */
1678 static const int sr_latency_ns
= 12000;
1679 const struct drm_display_mode
*adjusted_mode
=
1680 &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1681 int clock
= adjusted_mode
->crtc_clock
;
1682 int htotal
= adjusted_mode
->crtc_htotal
;
1683 int hdisplay
= to_intel_crtc(crtc
)->config
.pipe_src_w
;
1684 int pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1685 unsigned long line_time_us
;
1688 line_time_us
= max(htotal
* 1000 / clock
, 1);
1690 /* Use ns/us then divide to preserve precision */
1691 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1692 pixel_size
* hdisplay
;
1693 entries
= DIV_ROUND_UP(entries
, I915_FIFO_LINE_SIZE
);
1694 srwm
= I965_FIFO_SIZE
- entries
;
1698 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1701 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1702 pixel_size
* to_intel_crtc(crtc
)->cursor_width
;
1703 entries
= DIV_ROUND_UP(entries
,
1704 i965_cursor_wm_info
.cacheline_size
);
1705 cursor_sr
= i965_cursor_wm_info
.fifo_size
-
1706 (entries
+ i965_cursor_wm_info
.guard_size
);
1708 if (cursor_sr
> i965_cursor_wm_info
.max_wm
)
1709 cursor_sr
= i965_cursor_wm_info
.max_wm
;
1711 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1712 "cursor %d\n", srwm
, cursor_sr
);
1714 cxsr_enabled
= true;
1716 cxsr_enabled
= false;
1717 /* Turn off self refresh if both pipes are enabled */
1718 intel_set_memory_cxsr(dev_priv
, false);
1721 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1724 /* 965 has limitations... */
1725 I915_WRITE(DSPFW1
, (srwm
<< DSPFW_SR_SHIFT
) |
1726 (8 << DSPFW_CURSORB_SHIFT
) |
1727 (8 << DSPFW_PLANEB_SHIFT
) |
1728 (8 << DSPFW_PLANEA_SHIFT
));
1729 I915_WRITE(DSPFW2
, (8 << DSPFW_CURSORA_SHIFT
) |
1730 (8 << DSPFW_PLANEC_SHIFT_OLD
));
1731 /* update cursor SR watermark */
1732 I915_WRITE(DSPFW3
, (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1735 intel_set_memory_cxsr(dev_priv
, true);
1738 static void i9xx_update_wm(struct drm_crtc
*unused_crtc
)
1740 struct drm_device
*dev
= unused_crtc
->dev
;
1741 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1742 const struct intel_watermark_params
*wm_info
;
1747 int planea_wm
, planeb_wm
;
1748 struct drm_crtc
*crtc
, *enabled
= NULL
;
1751 wm_info
= &i945_wm_info
;
1752 else if (!IS_GEN2(dev
))
1753 wm_info
= &i915_wm_info
;
1755 wm_info
= &i830_a_wm_info
;
1757 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 0);
1758 crtc
= intel_get_crtc_for_plane(dev
, 0);
1759 if (intel_crtc_active(crtc
)) {
1760 const struct drm_display_mode
*adjusted_mode
;
1761 int cpp
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1765 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1766 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1767 wm_info
, fifo_size
, cpp
,
1768 pessimal_latency_ns
);
1771 planea_wm
= fifo_size
- wm_info
->guard_size
;
1772 if (planea_wm
> (long)wm_info
->max_wm
)
1773 planea_wm
= wm_info
->max_wm
;
1777 wm_info
= &i830_bc_wm_info
;
1779 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 1);
1780 crtc
= intel_get_crtc_for_plane(dev
, 1);
1781 if (intel_crtc_active(crtc
)) {
1782 const struct drm_display_mode
*adjusted_mode
;
1783 int cpp
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1787 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1788 planeb_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1789 wm_info
, fifo_size
, cpp
,
1790 pessimal_latency_ns
);
1791 if (enabled
== NULL
)
1796 planeb_wm
= fifo_size
- wm_info
->guard_size
;
1797 if (planeb_wm
> (long)wm_info
->max_wm
)
1798 planeb_wm
= wm_info
->max_wm
;
1801 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm
, planeb_wm
);
1803 if (IS_I915GM(dev
) && enabled
) {
1804 struct drm_i915_gem_object
*obj
;
1806 obj
= intel_fb_obj(enabled
->primary
->fb
);
1808 /* self-refresh seems busted with untiled */
1809 if (obj
->tiling_mode
== I915_TILING_NONE
)
1814 * Overlay gets an aggressive default since video jitter is bad.
1818 /* Play safe and disable self-refresh before adjusting watermarks. */
1819 intel_set_memory_cxsr(dev_priv
, false);
1821 /* Calc sr entries for one plane configs */
1822 if (HAS_FW_BLC(dev
) && enabled
) {
1823 /* self-refresh has much higher latency */
1824 static const int sr_latency_ns
= 6000;
1825 const struct drm_display_mode
*adjusted_mode
=
1826 &to_intel_crtc(enabled
)->config
.adjusted_mode
;
1827 int clock
= adjusted_mode
->crtc_clock
;
1828 int htotal
= adjusted_mode
->crtc_htotal
;
1829 int hdisplay
= to_intel_crtc(enabled
)->config
.pipe_src_w
;
1830 int pixel_size
= enabled
->primary
->fb
->bits_per_pixel
/ 8;
1831 unsigned long line_time_us
;
1834 line_time_us
= max(htotal
* 1000 / clock
, 1);
1836 /* Use ns/us then divide to preserve precision */
1837 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1838 pixel_size
* hdisplay
;
1839 entries
= DIV_ROUND_UP(entries
, wm_info
->cacheline_size
);
1840 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries
);
1841 srwm
= wm_info
->fifo_size
- entries
;
1845 if (IS_I945G(dev
) || IS_I945GM(dev
))
1846 I915_WRITE(FW_BLC_SELF
,
1847 FW_BLC_SELF_FIFO_MASK
| (srwm
& 0xff));
1848 else if (IS_I915GM(dev
))
1849 I915_WRITE(FW_BLC_SELF
, srwm
& 0x3f);
1852 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1853 planea_wm
, planeb_wm
, cwm
, srwm
);
1855 fwater_lo
= ((planeb_wm
& 0x3f) << 16) | (planea_wm
& 0x3f);
1856 fwater_hi
= (cwm
& 0x1f);
1858 /* Set request length to 8 cachelines per fetch */
1859 fwater_lo
= fwater_lo
| (1 << 24) | (1 << 8);
1860 fwater_hi
= fwater_hi
| (1 << 8);
1862 I915_WRITE(FW_BLC
, fwater_lo
);
1863 I915_WRITE(FW_BLC2
, fwater_hi
);
1866 intel_set_memory_cxsr(dev_priv
, true);
1869 static void i845_update_wm(struct drm_crtc
*unused_crtc
)
1871 struct drm_device
*dev
= unused_crtc
->dev
;
1872 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1873 struct drm_crtc
*crtc
;
1874 const struct drm_display_mode
*adjusted_mode
;
1878 crtc
= single_enabled_crtc(dev
);
1882 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1883 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1885 dev_priv
->display
.get_fifo_size(dev
, 0),
1886 4, pessimal_latency_ns
);
1887 fwater_lo
= I915_READ(FW_BLC
) & ~0xfff;
1888 fwater_lo
|= (3<<8) | planea_wm
;
1890 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm
);
1892 I915_WRITE(FW_BLC
, fwater_lo
);
1895 static uint32_t ilk_pipe_pixel_rate(struct drm_device
*dev
,
1896 struct drm_crtc
*crtc
)
1898 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1899 uint32_t pixel_rate
;
1901 pixel_rate
= intel_crtc
->config
.adjusted_mode
.crtc_clock
;
1903 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1904 * adjust the pixel_rate here. */
1906 if (intel_crtc
->config
.pch_pfit
.enabled
) {
1907 uint64_t pipe_w
, pipe_h
, pfit_w
, pfit_h
;
1908 uint32_t pfit_size
= intel_crtc
->config
.pch_pfit
.size
;
1910 pipe_w
= intel_crtc
->config
.pipe_src_w
;
1911 pipe_h
= intel_crtc
->config
.pipe_src_h
;
1912 pfit_w
= (pfit_size
>> 16) & 0xFFFF;
1913 pfit_h
= pfit_size
& 0xFFFF;
1914 if (pipe_w
< pfit_w
)
1916 if (pipe_h
< pfit_h
)
1919 pixel_rate
= div_u64((uint64_t) pixel_rate
* pipe_w
* pipe_h
,
1926 /* latency must be in 0.1us units. */
1927 static uint32_t ilk_wm_method1(uint32_t pixel_rate
, uint8_t bytes_per_pixel
,
1932 if (WARN(latency
== 0, "Latency value missing\n"))
1935 ret
= (uint64_t) pixel_rate
* bytes_per_pixel
* latency
;
1936 ret
= DIV_ROUND_UP_ULL(ret
, 64 * 10000) + 2;
1941 /* latency must be in 0.1us units. */
1942 static uint32_t ilk_wm_method2(uint32_t pixel_rate
, uint32_t pipe_htotal
,
1943 uint32_t horiz_pixels
, uint8_t bytes_per_pixel
,
1948 if (WARN(latency
== 0, "Latency value missing\n"))
1951 ret
= (latency
* pixel_rate
) / (pipe_htotal
* 10000);
1952 ret
= (ret
+ 1) * horiz_pixels
* bytes_per_pixel
;
1953 ret
= DIV_ROUND_UP(ret
, 64) + 2;
1957 static uint32_t ilk_wm_fbc(uint32_t pri_val
, uint32_t horiz_pixels
,
1958 uint8_t bytes_per_pixel
)
1960 return DIV_ROUND_UP(pri_val
* 64, horiz_pixels
* bytes_per_pixel
) + 2;
1963 struct skl_pipe_wm_parameters
{
1965 uint32_t pipe_htotal
;
1966 uint32_t pixel_rate
; /* in KHz */
1967 struct intel_plane_wm_parameters plane
[I915_MAX_PLANES
];
1968 struct intel_plane_wm_parameters cursor
;
1971 struct ilk_pipe_wm_parameters
{
1973 uint32_t pipe_htotal
;
1974 uint32_t pixel_rate
;
1975 struct intel_plane_wm_parameters pri
;
1976 struct intel_plane_wm_parameters spr
;
1977 struct intel_plane_wm_parameters cur
;
1980 struct ilk_wm_maximums
{
1987 /* used in computing the new watermarks state */
1988 struct intel_wm_config
{
1989 unsigned int num_pipes_active
;
1990 bool sprites_enabled
;
1991 bool sprites_scaled
;
1995 * For both WM_PIPE and WM_LP.
1996 * mem_value must be in 0.1us units.
1998 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters
*params
,
2002 uint32_t method1
, method2
;
2004 if (!params
->active
|| !params
->pri
.enabled
)
2007 method1
= ilk_wm_method1(params
->pixel_rate
,
2008 params
->pri
.bytes_per_pixel
,
2014 method2
= ilk_wm_method2(params
->pixel_rate
,
2015 params
->pipe_htotal
,
2016 params
->pri
.horiz_pixels
,
2017 params
->pri
.bytes_per_pixel
,
2020 return min(method1
, method2
);
2024 * For both WM_PIPE and WM_LP.
2025 * mem_value must be in 0.1us units.
2027 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters
*params
,
2030 uint32_t method1
, method2
;
2032 if (!params
->active
|| !params
->spr
.enabled
)
2035 method1
= ilk_wm_method1(params
->pixel_rate
,
2036 params
->spr
.bytes_per_pixel
,
2038 method2
= ilk_wm_method2(params
->pixel_rate
,
2039 params
->pipe_htotal
,
2040 params
->spr
.horiz_pixels
,
2041 params
->spr
.bytes_per_pixel
,
2043 return min(method1
, method2
);
2047 * For both WM_PIPE and WM_LP.
2048 * mem_value must be in 0.1us units.
2050 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters
*params
,
2053 if (!params
->active
|| !params
->cur
.enabled
)
2056 return ilk_wm_method2(params
->pixel_rate
,
2057 params
->pipe_htotal
,
2058 params
->cur
.horiz_pixels
,
2059 params
->cur
.bytes_per_pixel
,
2063 /* Only for WM_LP. */
2064 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters
*params
,
2067 if (!params
->active
|| !params
->pri
.enabled
)
2070 return ilk_wm_fbc(pri_val
,
2071 params
->pri
.horiz_pixels
,
2072 params
->pri
.bytes_per_pixel
);
2075 static unsigned int ilk_display_fifo_size(const struct drm_device
*dev
)
2077 if (INTEL_INFO(dev
)->gen
>= 8)
2079 else if (INTEL_INFO(dev
)->gen
>= 7)
2085 static unsigned int ilk_plane_wm_reg_max(const struct drm_device
*dev
,
2086 int level
, bool is_sprite
)
2088 if (INTEL_INFO(dev
)->gen
>= 8)
2089 /* BDW primary/sprite plane watermarks */
2090 return level
== 0 ? 255 : 2047;
2091 else if (INTEL_INFO(dev
)->gen
>= 7)
2092 /* IVB/HSW primary/sprite plane watermarks */
2093 return level
== 0 ? 127 : 1023;
2094 else if (!is_sprite
)
2095 /* ILK/SNB primary plane watermarks */
2096 return level
== 0 ? 127 : 511;
2098 /* ILK/SNB sprite plane watermarks */
2099 return level
== 0 ? 63 : 255;
2102 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device
*dev
,
2105 if (INTEL_INFO(dev
)->gen
>= 7)
2106 return level
== 0 ? 63 : 255;
2108 return level
== 0 ? 31 : 63;
2111 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device
*dev
)
2113 if (INTEL_INFO(dev
)->gen
>= 8)
2119 /* Calculate the maximum primary/sprite plane watermark */
2120 static unsigned int ilk_plane_wm_max(const struct drm_device
*dev
,
2122 const struct intel_wm_config
*config
,
2123 enum intel_ddb_partitioning ddb_partitioning
,
2126 unsigned int fifo_size
= ilk_display_fifo_size(dev
);
2128 /* if sprites aren't enabled, sprites get nothing */
2129 if (is_sprite
&& !config
->sprites_enabled
)
2132 /* HSW allows LP1+ watermarks even with multiple pipes */
2133 if (level
== 0 || config
->num_pipes_active
> 1) {
2134 fifo_size
/= INTEL_INFO(dev
)->num_pipes
;
2137 * For some reason the non self refresh
2138 * FIFO size is only half of the self
2139 * refresh FIFO size on ILK/SNB.
2141 if (INTEL_INFO(dev
)->gen
<= 6)
2145 if (config
->sprites_enabled
) {
2146 /* level 0 is always calculated with 1:1 split */
2147 if (level
> 0 && ddb_partitioning
== INTEL_DDB_PART_5_6
) {
2156 /* clamp to max that the registers can hold */
2157 return min(fifo_size
, ilk_plane_wm_reg_max(dev
, level
, is_sprite
));
2160 /* Calculate the maximum cursor plane watermark */
2161 static unsigned int ilk_cursor_wm_max(const struct drm_device
*dev
,
2163 const struct intel_wm_config
*config
)
2165 /* HSW LP1+ watermarks w/ multiple pipes */
2166 if (level
> 0 && config
->num_pipes_active
> 1)
2169 /* otherwise just report max that registers can hold */
2170 return ilk_cursor_wm_reg_max(dev
, level
);
2173 static void ilk_compute_wm_maximums(const struct drm_device
*dev
,
2175 const struct intel_wm_config
*config
,
2176 enum intel_ddb_partitioning ddb_partitioning
,
2177 struct ilk_wm_maximums
*max
)
2179 max
->pri
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, false);
2180 max
->spr
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, true);
2181 max
->cur
= ilk_cursor_wm_max(dev
, level
, config
);
2182 max
->fbc
= ilk_fbc_wm_reg_max(dev
);
2185 static void ilk_compute_wm_reg_maximums(struct drm_device
*dev
,
2187 struct ilk_wm_maximums
*max
)
2189 max
->pri
= ilk_plane_wm_reg_max(dev
, level
, false);
2190 max
->spr
= ilk_plane_wm_reg_max(dev
, level
, true);
2191 max
->cur
= ilk_cursor_wm_reg_max(dev
, level
);
2192 max
->fbc
= ilk_fbc_wm_reg_max(dev
);
2195 static bool ilk_validate_wm_level(int level
,
2196 const struct ilk_wm_maximums
*max
,
2197 struct intel_wm_level
*result
)
2201 /* already determined to be invalid? */
2202 if (!result
->enable
)
2205 result
->enable
= result
->pri_val
<= max
->pri
&&
2206 result
->spr_val
<= max
->spr
&&
2207 result
->cur_val
<= max
->cur
;
2209 ret
= result
->enable
;
2212 * HACK until we can pre-compute everything,
2213 * and thus fail gracefully if LP0 watermarks
2216 if (level
== 0 && !result
->enable
) {
2217 if (result
->pri_val
> max
->pri
)
2218 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2219 level
, result
->pri_val
, max
->pri
);
2220 if (result
->spr_val
> max
->spr
)
2221 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2222 level
, result
->spr_val
, max
->spr
);
2223 if (result
->cur_val
> max
->cur
)
2224 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2225 level
, result
->cur_val
, max
->cur
);
2227 result
->pri_val
= min_t(uint32_t, result
->pri_val
, max
->pri
);
2228 result
->spr_val
= min_t(uint32_t, result
->spr_val
, max
->spr
);
2229 result
->cur_val
= min_t(uint32_t, result
->cur_val
, max
->cur
);
2230 result
->enable
= true;
2236 static void ilk_compute_wm_level(const struct drm_i915_private
*dev_priv
,
2238 const struct ilk_pipe_wm_parameters
*p
,
2239 struct intel_wm_level
*result
)
2241 uint16_t pri_latency
= dev_priv
->wm
.pri_latency
[level
];
2242 uint16_t spr_latency
= dev_priv
->wm
.spr_latency
[level
];
2243 uint16_t cur_latency
= dev_priv
->wm
.cur_latency
[level
];
2245 /* WM1+ latency values stored in 0.5us units */
2252 result
->pri_val
= ilk_compute_pri_wm(p
, pri_latency
, level
);
2253 result
->spr_val
= ilk_compute_spr_wm(p
, spr_latency
);
2254 result
->cur_val
= ilk_compute_cur_wm(p
, cur_latency
);
2255 result
->fbc_val
= ilk_compute_fbc_wm(p
, result
->pri_val
);
2256 result
->enable
= true;
2260 hsw_compute_linetime_wm(struct drm_device
*dev
, struct drm_crtc
*crtc
)
2262 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2263 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2264 struct drm_display_mode
*mode
= &intel_crtc
->config
.adjusted_mode
;
2265 u32 linetime
, ips_linetime
;
2267 if (!intel_crtc_active(crtc
))
2270 /* The WM are computed with base on how long it takes to fill a single
2271 * row at the given clock rate, multiplied by 8.
2273 linetime
= DIV_ROUND_CLOSEST(mode
->crtc_htotal
* 1000 * 8,
2275 ips_linetime
= DIV_ROUND_CLOSEST(mode
->crtc_htotal
* 1000 * 8,
2276 intel_ddi_get_cdclk_freq(dev_priv
));
2278 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime
) |
2279 PIPE_WM_LINETIME_TIME(linetime
);
2282 static void intel_read_wm_latency(struct drm_device
*dev
, uint16_t wm
[8])
2284 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2289 int level
, max_level
= ilk_wm_max_level(dev
);
2291 /* read the first set of memory latencies[0:3] */
2292 val
= 0; /* data0 to be programmed to 0 for first set */
2293 mutex_lock(&dev_priv
->rps
.hw_lock
);
2294 ret
= sandybridge_pcode_read(dev_priv
,
2295 GEN9_PCODE_READ_MEM_LATENCY
,
2297 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2300 DRM_ERROR("SKL Mailbox read error = %d\n", ret
);
2304 wm
[0] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2305 wm
[1] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2306 GEN9_MEM_LATENCY_LEVEL_MASK
;
2307 wm
[2] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2308 GEN9_MEM_LATENCY_LEVEL_MASK
;
2309 wm
[3] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2310 GEN9_MEM_LATENCY_LEVEL_MASK
;
2312 /* read the second set of memory latencies[4:7] */
2313 val
= 1; /* data0 to be programmed to 1 for second set */
2314 mutex_lock(&dev_priv
->rps
.hw_lock
);
2315 ret
= sandybridge_pcode_read(dev_priv
,
2316 GEN9_PCODE_READ_MEM_LATENCY
,
2318 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2320 DRM_ERROR("SKL Mailbox read error = %d\n", ret
);
2324 wm
[4] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2325 wm
[5] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2326 GEN9_MEM_LATENCY_LEVEL_MASK
;
2327 wm
[6] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2328 GEN9_MEM_LATENCY_LEVEL_MASK
;
2329 wm
[7] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2330 GEN9_MEM_LATENCY_LEVEL_MASK
;
2333 * punit doesn't take into account the read latency so we need
2334 * to add 2us to the various latency levels we retrieve from
2336 * - W0 is a bit special in that it's the only level that
2337 * can't be disabled if we want to have display working, so
2338 * we always add 2us there.
2339 * - For levels >=1, punit returns 0us latency when they are
2340 * disabled, so we respect that and don't add 2us then
2342 * Additionally, if a level n (n > 1) has a 0us latency, all
2343 * levels m (m >= n) need to be disabled. We make sure to
2344 * sanitize the values out of the punit to satisfy this
2348 for (level
= 1; level
<= max_level
; level
++)
2352 for (i
= level
+ 1; i
<= max_level
; i
++)
2357 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
2358 uint64_t sskpd
= I915_READ64(MCH_SSKPD
);
2360 wm
[0] = (sskpd
>> 56) & 0xFF;
2362 wm
[0] = sskpd
& 0xF;
2363 wm
[1] = (sskpd
>> 4) & 0xFF;
2364 wm
[2] = (sskpd
>> 12) & 0xFF;
2365 wm
[3] = (sskpd
>> 20) & 0x1FF;
2366 wm
[4] = (sskpd
>> 32) & 0x1FF;
2367 } else if (INTEL_INFO(dev
)->gen
>= 6) {
2368 uint32_t sskpd
= I915_READ(MCH_SSKPD
);
2370 wm
[0] = (sskpd
>> SSKPD_WM0_SHIFT
) & SSKPD_WM_MASK
;
2371 wm
[1] = (sskpd
>> SSKPD_WM1_SHIFT
) & SSKPD_WM_MASK
;
2372 wm
[2] = (sskpd
>> SSKPD_WM2_SHIFT
) & SSKPD_WM_MASK
;
2373 wm
[3] = (sskpd
>> SSKPD_WM3_SHIFT
) & SSKPD_WM_MASK
;
2374 } else if (INTEL_INFO(dev
)->gen
>= 5) {
2375 uint32_t mltr
= I915_READ(MLTR_ILK
);
2377 /* ILK primary LP0 latency is 700 ns */
2379 wm
[1] = (mltr
>> MLTR_WM1_SHIFT
) & ILK_SRLT_MASK
;
2380 wm
[2] = (mltr
>> MLTR_WM2_SHIFT
) & ILK_SRLT_MASK
;
2384 static void intel_fixup_spr_wm_latency(struct drm_device
*dev
, uint16_t wm
[5])
2386 /* ILK sprite LP0 latency is 1300 ns */
2387 if (INTEL_INFO(dev
)->gen
== 5)
2391 static void intel_fixup_cur_wm_latency(struct drm_device
*dev
, uint16_t wm
[5])
2393 /* ILK cursor LP0 latency is 1300 ns */
2394 if (INTEL_INFO(dev
)->gen
== 5)
2397 /* WaDoubleCursorLP3Latency:ivb */
2398 if (IS_IVYBRIDGE(dev
))
2402 int ilk_wm_max_level(const struct drm_device
*dev
)
2404 /* how many WM levels are we expecting */
2407 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2409 else if (INTEL_INFO(dev
)->gen
>= 6)
2415 static void intel_print_wm_latency(struct drm_device
*dev
,
2417 const uint16_t wm
[8])
2419 int level
, max_level
= ilk_wm_max_level(dev
);
2421 for (level
= 0; level
<= max_level
; level
++) {
2422 unsigned int latency
= wm
[level
];
2425 DRM_ERROR("%s WM%d latency not provided\n",
2431 * - latencies are in us on gen9.
2432 * - before then, WM1+ latency values are in 0.5us units
2439 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2440 name
, level
, wm
[level
],
2441 latency
/ 10, latency
% 10);
2445 static bool ilk_increase_wm_latency(struct drm_i915_private
*dev_priv
,
2446 uint16_t wm
[5], uint16_t min
)
2448 int level
, max_level
= ilk_wm_max_level(dev_priv
->dev
);
2453 wm
[0] = max(wm
[0], min
);
2454 for (level
= 1; level
<= max_level
; level
++)
2455 wm
[level
] = max_t(uint16_t, wm
[level
], DIV_ROUND_UP(min
, 5));
2460 static void snb_wm_latency_quirk(struct drm_device
*dev
)
2462 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2466 * The BIOS provided WM memory latency values are often
2467 * inadequate for high resolution displays. Adjust them.
2469 changed
= ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
, 12) |
2470 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
, 12) |
2471 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
, 12);
2476 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2477 intel_print_wm_latency(dev
, "Primary", dev_priv
->wm
.pri_latency
);
2478 intel_print_wm_latency(dev
, "Sprite", dev_priv
->wm
.spr_latency
);
2479 intel_print_wm_latency(dev
, "Cursor", dev_priv
->wm
.cur_latency
);
2482 static void ilk_setup_wm_latency(struct drm_device
*dev
)
2484 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2486 intel_read_wm_latency(dev
, dev_priv
->wm
.pri_latency
);
2488 memcpy(dev_priv
->wm
.spr_latency
, dev_priv
->wm
.pri_latency
,
2489 sizeof(dev_priv
->wm
.pri_latency
));
2490 memcpy(dev_priv
->wm
.cur_latency
, dev_priv
->wm
.pri_latency
,
2491 sizeof(dev_priv
->wm
.pri_latency
));
2493 intel_fixup_spr_wm_latency(dev
, dev_priv
->wm
.spr_latency
);
2494 intel_fixup_cur_wm_latency(dev
, dev_priv
->wm
.cur_latency
);
2496 intel_print_wm_latency(dev
, "Primary", dev_priv
->wm
.pri_latency
);
2497 intel_print_wm_latency(dev
, "Sprite", dev_priv
->wm
.spr_latency
);
2498 intel_print_wm_latency(dev
, "Cursor", dev_priv
->wm
.cur_latency
);
2501 snb_wm_latency_quirk(dev
);
2504 static void skl_setup_wm_latency(struct drm_device
*dev
)
2506 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2508 intel_read_wm_latency(dev
, dev_priv
->wm
.skl_latency
);
2509 intel_print_wm_latency(dev
, "Gen9 Plane", dev_priv
->wm
.skl_latency
);
2512 static void ilk_compute_wm_parameters(struct drm_crtc
*crtc
,
2513 struct ilk_pipe_wm_parameters
*p
)
2515 struct drm_device
*dev
= crtc
->dev
;
2516 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2517 enum pipe pipe
= intel_crtc
->pipe
;
2518 struct drm_plane
*plane
;
2520 if (!intel_crtc_active(crtc
))
2524 p
->pipe_htotal
= intel_crtc
->config
.adjusted_mode
.crtc_htotal
;
2525 p
->pixel_rate
= ilk_pipe_pixel_rate(dev
, crtc
);
2526 p
->pri
.bytes_per_pixel
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
2527 p
->cur
.bytes_per_pixel
= 4;
2528 p
->pri
.horiz_pixels
= intel_crtc
->config
.pipe_src_w
;
2529 p
->cur
.horiz_pixels
= intel_crtc
->cursor_width
;
2530 /* TODO: for now, assume primary and cursor planes are always enabled. */
2531 p
->pri
.enabled
= true;
2532 p
->cur
.enabled
= true;
2534 drm_for_each_legacy_plane(plane
, &dev
->mode_config
.plane_list
) {
2535 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
2537 if (intel_plane
->pipe
== pipe
) {
2538 p
->spr
= intel_plane
->wm
;
2544 static void ilk_compute_wm_config(struct drm_device
*dev
,
2545 struct intel_wm_config
*config
)
2547 struct intel_crtc
*intel_crtc
;
2549 /* Compute the currently _active_ config */
2550 for_each_intel_crtc(dev
, intel_crtc
) {
2551 const struct intel_pipe_wm
*wm
= &intel_crtc
->wm
.active
;
2553 if (!wm
->pipe_enabled
)
2556 config
->sprites_enabled
|= wm
->sprites_enabled
;
2557 config
->sprites_scaled
|= wm
->sprites_scaled
;
2558 config
->num_pipes_active
++;
2562 /* Compute new watermarks for the pipe */
2563 static bool intel_compute_pipe_wm(struct drm_crtc
*crtc
,
2564 const struct ilk_pipe_wm_parameters
*params
,
2565 struct intel_pipe_wm
*pipe_wm
)
2567 struct drm_device
*dev
= crtc
->dev
;
2568 const struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2569 int level
, max_level
= ilk_wm_max_level(dev
);
2570 /* LP0 watermark maximums depend on this pipe alone */
2571 struct intel_wm_config config
= {
2572 .num_pipes_active
= 1,
2573 .sprites_enabled
= params
->spr
.enabled
,
2574 .sprites_scaled
= params
->spr
.scaled
,
2576 struct ilk_wm_maximums max
;
2578 pipe_wm
->pipe_enabled
= params
->active
;
2579 pipe_wm
->sprites_enabled
= params
->spr
.enabled
;
2580 pipe_wm
->sprites_scaled
= params
->spr
.scaled
;
2582 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2583 if (INTEL_INFO(dev
)->gen
<= 6 && params
->spr
.enabled
)
2586 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2587 if (params
->spr
.scaled
)
2590 ilk_compute_wm_level(dev_priv
, 0, params
, &pipe_wm
->wm
[0]);
2592 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2593 pipe_wm
->linetime
= hsw_compute_linetime_wm(dev
, crtc
);
2595 /* LP0 watermarks always use 1/2 DDB partitioning */
2596 ilk_compute_wm_maximums(dev
, 0, &config
, INTEL_DDB_PART_1_2
, &max
);
2598 /* At least LP0 must be valid */
2599 if (!ilk_validate_wm_level(0, &max
, &pipe_wm
->wm
[0]))
2602 ilk_compute_wm_reg_maximums(dev
, 1, &max
);
2604 for (level
= 1; level
<= max_level
; level
++) {
2605 struct intel_wm_level wm
= {};
2607 ilk_compute_wm_level(dev_priv
, level
, params
, &wm
);
2610 * Disable any watermark level that exceeds the
2611 * register maximums since such watermarks are
2614 if (!ilk_validate_wm_level(level
, &max
, &wm
))
2617 pipe_wm
->wm
[level
] = wm
;
2624 * Merge the watermarks from all active pipes for a specific level.
2626 static void ilk_merge_wm_level(struct drm_device
*dev
,
2628 struct intel_wm_level
*ret_wm
)
2630 const struct intel_crtc
*intel_crtc
;
2632 ret_wm
->enable
= true;
2634 for_each_intel_crtc(dev
, intel_crtc
) {
2635 const struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
;
2636 const struct intel_wm_level
*wm
= &active
->wm
[level
];
2638 if (!active
->pipe_enabled
)
2642 * The watermark values may have been used in the past,
2643 * so we must maintain them in the registers for some
2644 * time even if the level is now disabled.
2647 ret_wm
->enable
= false;
2649 ret_wm
->pri_val
= max(ret_wm
->pri_val
, wm
->pri_val
);
2650 ret_wm
->spr_val
= max(ret_wm
->spr_val
, wm
->spr_val
);
2651 ret_wm
->cur_val
= max(ret_wm
->cur_val
, wm
->cur_val
);
2652 ret_wm
->fbc_val
= max(ret_wm
->fbc_val
, wm
->fbc_val
);
2657 * Merge all low power watermarks for all active pipes.
2659 static void ilk_wm_merge(struct drm_device
*dev
,
2660 const struct intel_wm_config
*config
,
2661 const struct ilk_wm_maximums
*max
,
2662 struct intel_pipe_wm
*merged
)
2664 int level
, max_level
= ilk_wm_max_level(dev
);
2665 int last_enabled_level
= max_level
;
2667 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2668 if ((INTEL_INFO(dev
)->gen
<= 6 || IS_IVYBRIDGE(dev
)) &&
2669 config
->num_pipes_active
> 1)
2672 /* ILK: FBC WM must be disabled always */
2673 merged
->fbc_wm_enabled
= INTEL_INFO(dev
)->gen
>= 6;
2675 /* merge each WM1+ level */
2676 for (level
= 1; level
<= max_level
; level
++) {
2677 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2679 ilk_merge_wm_level(dev
, level
, wm
);
2681 if (level
> last_enabled_level
)
2683 else if (!ilk_validate_wm_level(level
, max
, wm
))
2684 /* make sure all following levels get disabled */
2685 last_enabled_level
= level
- 1;
2688 * The spec says it is preferred to disable
2689 * FBC WMs instead of disabling a WM level.
2691 if (wm
->fbc_val
> max
->fbc
) {
2693 merged
->fbc_wm_enabled
= false;
2698 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2700 * FIXME this is racy. FBC might get enabled later.
2701 * What we should check here is whether FBC can be
2702 * enabled sometime later.
2704 if (IS_GEN5(dev
) && !merged
->fbc_wm_enabled
&& intel_fbc_enabled(dev
)) {
2705 for (level
= 2; level
<= max_level
; level
++) {
2706 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2713 static int ilk_wm_lp_to_level(int wm_lp
, const struct intel_pipe_wm
*pipe_wm
)
2715 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2716 return wm_lp
+ (wm_lp
>= 2 && pipe_wm
->wm
[4].enable
);
2719 /* The value we need to program into the WM_LPx latency field */
2720 static unsigned int ilk_wm_lp_latency(struct drm_device
*dev
, int level
)
2722 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2724 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2727 return dev_priv
->wm
.pri_latency
[level
];
2730 static void ilk_compute_wm_results(struct drm_device
*dev
,
2731 const struct intel_pipe_wm
*merged
,
2732 enum intel_ddb_partitioning partitioning
,
2733 struct ilk_wm_values
*results
)
2735 struct intel_crtc
*intel_crtc
;
2738 results
->enable_fbc_wm
= merged
->fbc_wm_enabled
;
2739 results
->partitioning
= partitioning
;
2741 /* LP1+ register values */
2742 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2743 const struct intel_wm_level
*r
;
2745 level
= ilk_wm_lp_to_level(wm_lp
, merged
);
2747 r
= &merged
->wm
[level
];
2750 * Maintain the watermark values even if the level is
2751 * disabled. Doing otherwise could cause underruns.
2753 results
->wm_lp
[wm_lp
- 1] =
2754 (ilk_wm_lp_latency(dev
, level
) << WM1_LP_LATENCY_SHIFT
) |
2755 (r
->pri_val
<< WM1_LP_SR_SHIFT
) |
2759 results
->wm_lp
[wm_lp
- 1] |= WM1_LP_SR_EN
;
2761 if (INTEL_INFO(dev
)->gen
>= 8)
2762 results
->wm_lp
[wm_lp
- 1] |=
2763 r
->fbc_val
<< WM1_LP_FBC_SHIFT_BDW
;
2765 results
->wm_lp
[wm_lp
- 1] |=
2766 r
->fbc_val
<< WM1_LP_FBC_SHIFT
;
2769 * Always set WM1S_LP_EN when spr_val != 0, even if the
2770 * level is disabled. Doing otherwise could cause underruns.
2772 if (INTEL_INFO(dev
)->gen
<= 6 && r
->spr_val
) {
2773 WARN_ON(wm_lp
!= 1);
2774 results
->wm_lp_spr
[wm_lp
- 1] = WM1S_LP_EN
| r
->spr_val
;
2776 results
->wm_lp_spr
[wm_lp
- 1] = r
->spr_val
;
2779 /* LP0 register values */
2780 for_each_intel_crtc(dev
, intel_crtc
) {
2781 enum pipe pipe
= intel_crtc
->pipe
;
2782 const struct intel_wm_level
*r
=
2783 &intel_crtc
->wm
.active
.wm
[0];
2785 if (WARN_ON(!r
->enable
))
2788 results
->wm_linetime
[pipe
] = intel_crtc
->wm
.active
.linetime
;
2790 results
->wm_pipe
[pipe
] =
2791 (r
->pri_val
<< WM0_PIPE_PLANE_SHIFT
) |
2792 (r
->spr_val
<< WM0_PIPE_SPRITE_SHIFT
) |
2797 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2798 * case both are at the same level. Prefer r1 in case they're the same. */
2799 static struct intel_pipe_wm
*ilk_find_best_result(struct drm_device
*dev
,
2800 struct intel_pipe_wm
*r1
,
2801 struct intel_pipe_wm
*r2
)
2803 int level
, max_level
= ilk_wm_max_level(dev
);
2804 int level1
= 0, level2
= 0;
2806 for (level
= 1; level
<= max_level
; level
++) {
2807 if (r1
->wm
[level
].enable
)
2809 if (r2
->wm
[level
].enable
)
2813 if (level1
== level2
) {
2814 if (r2
->fbc_wm_enabled
&& !r1
->fbc_wm_enabled
)
2818 } else if (level1
> level2
) {
2825 /* dirty bits used to track which watermarks need changes */
2826 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2827 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2828 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2829 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2830 #define WM_DIRTY_FBC (1 << 24)
2831 #define WM_DIRTY_DDB (1 << 25)
2833 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private
*dev_priv
,
2834 const struct ilk_wm_values
*old
,
2835 const struct ilk_wm_values
*new)
2837 unsigned int dirty
= 0;
2841 for_each_pipe(dev_priv
, pipe
) {
2842 if (old
->wm_linetime
[pipe
] != new->wm_linetime
[pipe
]) {
2843 dirty
|= WM_DIRTY_LINETIME(pipe
);
2844 /* Must disable LP1+ watermarks too */
2845 dirty
|= WM_DIRTY_LP_ALL
;
2848 if (old
->wm_pipe
[pipe
] != new->wm_pipe
[pipe
]) {
2849 dirty
|= WM_DIRTY_PIPE(pipe
);
2850 /* Must disable LP1+ watermarks too */
2851 dirty
|= WM_DIRTY_LP_ALL
;
2855 if (old
->enable_fbc_wm
!= new->enable_fbc_wm
) {
2856 dirty
|= WM_DIRTY_FBC
;
2857 /* Must disable LP1+ watermarks too */
2858 dirty
|= WM_DIRTY_LP_ALL
;
2861 if (old
->partitioning
!= new->partitioning
) {
2862 dirty
|= WM_DIRTY_DDB
;
2863 /* Must disable LP1+ watermarks too */
2864 dirty
|= WM_DIRTY_LP_ALL
;
2867 /* LP1+ watermarks already deemed dirty, no need to continue */
2868 if (dirty
& WM_DIRTY_LP_ALL
)
2871 /* Find the lowest numbered LP1+ watermark in need of an update... */
2872 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2873 if (old
->wm_lp
[wm_lp
- 1] != new->wm_lp
[wm_lp
- 1] ||
2874 old
->wm_lp_spr
[wm_lp
- 1] != new->wm_lp_spr
[wm_lp
- 1])
2878 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2879 for (; wm_lp
<= 3; wm_lp
++)
2880 dirty
|= WM_DIRTY_LP(wm_lp
);
2885 static bool _ilk_disable_lp_wm(struct drm_i915_private
*dev_priv
,
2888 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2889 bool changed
= false;
2891 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] & WM1_LP_SR_EN
) {
2892 previous
->wm_lp
[2] &= ~WM1_LP_SR_EN
;
2893 I915_WRITE(WM3_LP_ILK
, previous
->wm_lp
[2]);
2896 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] & WM1_LP_SR_EN
) {
2897 previous
->wm_lp
[1] &= ~WM1_LP_SR_EN
;
2898 I915_WRITE(WM2_LP_ILK
, previous
->wm_lp
[1]);
2901 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] & WM1_LP_SR_EN
) {
2902 previous
->wm_lp
[0] &= ~WM1_LP_SR_EN
;
2903 I915_WRITE(WM1_LP_ILK
, previous
->wm_lp
[0]);
2908 * Don't touch WM1S_LP_EN here.
2909 * Doing so could cause underruns.
2916 * The spec says we shouldn't write when we don't need, because every write
2917 * causes WMs to be re-evaluated, expending some power.
2919 static void ilk_write_wm_values(struct drm_i915_private
*dev_priv
,
2920 struct ilk_wm_values
*results
)
2922 struct drm_device
*dev
= dev_priv
->dev
;
2923 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2927 dirty
= ilk_compute_wm_dirty(dev_priv
, previous
, results
);
2931 _ilk_disable_lp_wm(dev_priv
, dirty
);
2933 if (dirty
& WM_DIRTY_PIPE(PIPE_A
))
2934 I915_WRITE(WM0_PIPEA_ILK
, results
->wm_pipe
[0]);
2935 if (dirty
& WM_DIRTY_PIPE(PIPE_B
))
2936 I915_WRITE(WM0_PIPEB_ILK
, results
->wm_pipe
[1]);
2937 if (dirty
& WM_DIRTY_PIPE(PIPE_C
))
2938 I915_WRITE(WM0_PIPEC_IVB
, results
->wm_pipe
[2]);
2940 if (dirty
& WM_DIRTY_LINETIME(PIPE_A
))
2941 I915_WRITE(PIPE_WM_LINETIME(PIPE_A
), results
->wm_linetime
[0]);
2942 if (dirty
& WM_DIRTY_LINETIME(PIPE_B
))
2943 I915_WRITE(PIPE_WM_LINETIME(PIPE_B
), results
->wm_linetime
[1]);
2944 if (dirty
& WM_DIRTY_LINETIME(PIPE_C
))
2945 I915_WRITE(PIPE_WM_LINETIME(PIPE_C
), results
->wm_linetime
[2]);
2947 if (dirty
& WM_DIRTY_DDB
) {
2948 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
2949 val
= I915_READ(WM_MISC
);
2950 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
2951 val
&= ~WM_MISC_DATA_PARTITION_5_6
;
2953 val
|= WM_MISC_DATA_PARTITION_5_6
;
2954 I915_WRITE(WM_MISC
, val
);
2956 val
= I915_READ(DISP_ARB_CTL2
);
2957 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
2958 val
&= ~DISP_DATA_PARTITION_5_6
;
2960 val
|= DISP_DATA_PARTITION_5_6
;
2961 I915_WRITE(DISP_ARB_CTL2
, val
);
2965 if (dirty
& WM_DIRTY_FBC
) {
2966 val
= I915_READ(DISP_ARB_CTL
);
2967 if (results
->enable_fbc_wm
)
2968 val
&= ~DISP_FBC_WM_DIS
;
2970 val
|= DISP_FBC_WM_DIS
;
2971 I915_WRITE(DISP_ARB_CTL
, val
);
2974 if (dirty
& WM_DIRTY_LP(1) &&
2975 previous
->wm_lp_spr
[0] != results
->wm_lp_spr
[0])
2976 I915_WRITE(WM1S_LP_ILK
, results
->wm_lp_spr
[0]);
2978 if (INTEL_INFO(dev
)->gen
>= 7) {
2979 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp_spr
[1] != results
->wm_lp_spr
[1])
2980 I915_WRITE(WM2S_LP_IVB
, results
->wm_lp_spr
[1]);
2981 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp_spr
[2] != results
->wm_lp_spr
[2])
2982 I915_WRITE(WM3S_LP_IVB
, results
->wm_lp_spr
[2]);
2985 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] != results
->wm_lp
[0])
2986 I915_WRITE(WM1_LP_ILK
, results
->wm_lp
[0]);
2987 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] != results
->wm_lp
[1])
2988 I915_WRITE(WM2_LP_ILK
, results
->wm_lp
[1]);
2989 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] != results
->wm_lp
[2])
2990 I915_WRITE(WM3_LP_ILK
, results
->wm_lp
[2]);
2992 dev_priv
->wm
.hw
= *results
;
2995 static bool ilk_disable_lp_wm(struct drm_device
*dev
)
2997 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2999 return _ilk_disable_lp_wm(dev_priv
, WM_DIRTY_LP_ALL
);
3003 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
3004 * different active planes.
3007 #define SKL_DDB_SIZE 896 /* in blocks */
3010 skl_ddb_get_pipe_allocation_limits(struct drm_device
*dev
,
3011 struct drm_crtc
*for_crtc
,
3012 const struct intel_wm_config
*config
,
3013 const struct skl_pipe_wm_parameters
*params
,
3014 struct skl_ddb_entry
*alloc
/* out */)
3016 struct drm_crtc
*crtc
;
3017 unsigned int pipe_size
, ddb_size
;
3018 int nth_active_pipe
;
3020 if (!params
->active
) {
3026 ddb_size
= SKL_DDB_SIZE
;
3028 ddb_size
-= 4; /* 4 blocks for bypass path allocation */
3030 nth_active_pipe
= 0;
3031 for_each_crtc(dev
, crtc
) {
3032 if (!intel_crtc_active(crtc
))
3035 if (crtc
== for_crtc
)
3041 pipe_size
= ddb_size
/ config
->num_pipes_active
;
3042 alloc
->start
= nth_active_pipe
* ddb_size
/ config
->num_pipes_active
;
3043 alloc
->end
= alloc
->start
+ pipe_size
- 1;
3046 static unsigned int skl_cursor_allocation(const struct intel_wm_config
*config
)
3048 if (config
->num_pipes_active
== 1)
3054 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry
*entry
, u32 reg
)
3056 entry
->start
= reg
& 0x3ff;
3057 entry
->end
= (reg
>> 16) & 0x3ff;
3060 void skl_ddb_get_hw_state(struct drm_i915_private
*dev_priv
,
3061 struct skl_ddb_allocation
*ddb
/* out */)
3063 struct drm_device
*dev
= dev_priv
->dev
;
3068 for_each_pipe(dev_priv
, pipe
) {
3069 for_each_plane(pipe
, plane
) {
3070 val
= I915_READ(PLANE_BUF_CFG(pipe
, plane
));
3071 skl_ddb_entry_init_from_hw(&ddb
->plane
[pipe
][plane
],
3075 val
= I915_READ(CUR_BUF_CFG(pipe
));
3076 skl_ddb_entry_init_from_hw(&ddb
->cursor
[pipe
], val
);
3081 skl_plane_relative_data_rate(const struct intel_plane_wm_parameters
*p
)
3083 return p
->horiz_pixels
* p
->vert_pixels
* p
->bytes_per_pixel
;
3087 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3088 * a 8192x4096@32bpp framebuffer:
3089 * 3 * 4096 * 8192 * 4 < 2^32
3092 skl_get_total_relative_data_rate(struct intel_crtc
*intel_crtc
,
3093 const struct skl_pipe_wm_parameters
*params
)
3095 unsigned int total_data_rate
= 0;
3098 for (plane
= 0; plane
< intel_num_planes(intel_crtc
); plane
++) {
3099 const struct intel_plane_wm_parameters
*p
;
3101 p
= ¶ms
->plane
[plane
];
3105 total_data_rate
+= skl_plane_relative_data_rate(p
);
3108 return total_data_rate
;
3112 skl_allocate_pipe_ddb(struct drm_crtc
*crtc
,
3113 const struct intel_wm_config
*config
,
3114 const struct skl_pipe_wm_parameters
*params
,
3115 struct skl_ddb_allocation
*ddb
/* out */)
3117 struct drm_device
*dev
= crtc
->dev
;
3118 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3119 enum pipe pipe
= intel_crtc
->pipe
;
3120 struct skl_ddb_entry alloc
;
3121 uint16_t alloc_size
, start
, cursor_blocks
;
3122 unsigned int total_data_rate
;
3125 skl_ddb_get_pipe_allocation_limits(dev
, crtc
, config
, params
, &alloc
);
3126 alloc_size
= skl_ddb_entry_size(&alloc
);
3127 if (alloc_size
== 0) {
3128 memset(ddb
->plane
[pipe
], 0, sizeof(ddb
->plane
[pipe
]));
3129 memset(&ddb
->cursor
[pipe
], 0, sizeof(ddb
->cursor
[pipe
]));
3133 cursor_blocks
= skl_cursor_allocation(config
);
3134 ddb
->cursor
[pipe
].start
= alloc
.end
- cursor_blocks
+ 1;
3135 ddb
->cursor
[pipe
].end
= alloc
.end
;
3137 alloc_size
-= cursor_blocks
;
3138 alloc
.end
-= cursor_blocks
;
3141 * Each active plane get a portion of the remaining space, in
3142 * proportion to the amount of data they need to fetch from memory.
3144 * FIXME: we may not allocate every single block here.
3146 total_data_rate
= skl_get_total_relative_data_rate(intel_crtc
, params
);
3148 start
= alloc
.start
;
3149 for (plane
= 0; plane
< intel_num_planes(intel_crtc
); plane
++) {
3150 const struct intel_plane_wm_parameters
*p
;
3151 unsigned int data_rate
;
3152 uint16_t plane_blocks
;
3154 p
= ¶ms
->plane
[plane
];
3158 data_rate
= skl_plane_relative_data_rate(p
);
3161 * promote the expression to 64 bits to avoid overflowing, the
3162 * result is < available as data_rate / total_data_rate < 1
3164 plane_blocks
= div_u64((uint64_t)alloc_size
* data_rate
,
3167 ddb
->plane
[pipe
][plane
].start
= start
;
3168 ddb
->plane
[pipe
][plane
].end
= start
+ plane_blocks
- 1;
3170 start
+= plane_blocks
;
3175 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_config
*config
)
3177 /* TODO: Take into account the scalers once we support them */
3178 return config
->adjusted_mode
.crtc_clock
;
3182 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3183 * for the read latency) and bytes_per_pixel should always be <= 8, so that
3184 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3185 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3187 static uint32_t skl_wm_method1(uint32_t pixel_rate
, uint8_t bytes_per_pixel
,
3190 uint32_t wm_intermediate_val
, ret
;
3195 wm_intermediate_val
= latency
* pixel_rate
* bytes_per_pixel
;
3196 ret
= DIV_ROUND_UP(wm_intermediate_val
, 1000);
3201 static uint32_t skl_wm_method2(uint32_t pixel_rate
, uint32_t pipe_htotal
,
3202 uint32_t horiz_pixels
, uint8_t bytes_per_pixel
,
3205 uint32_t ret
, plane_bytes_per_line
, wm_intermediate_val
;
3210 plane_bytes_per_line
= horiz_pixels
* bytes_per_pixel
;
3211 wm_intermediate_val
= latency
* pixel_rate
;
3212 ret
= DIV_ROUND_UP(wm_intermediate_val
, pipe_htotal
* 1000) *
3213 plane_bytes_per_line
;
3218 static void skl_compute_transition_wm(struct drm_crtc
*crtc
,
3219 struct skl_pipe_wm_parameters
*params
,
3220 struct skl_pipe_wm
*pipe_wm
)
3223 * For now it is suggested to use the LP0 wm val of corresponding
3224 * plane as transition wm val. This is done while computing results.
3226 if (!params
->active
)
3231 skl_compute_linetime_wm(struct drm_crtc
*crtc
, struct skl_pipe_wm_parameters
*p
)
3233 if (!intel_crtc_active(crtc
))
3236 return DIV_ROUND_UP(8 * p
->pipe_htotal
* 1000, p
->pixel_rate
);
3240 static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation
*new_ddb
,
3241 const struct intel_crtc
*intel_crtc
)
3243 struct drm_device
*dev
= intel_crtc
->base
.dev
;
3244 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3245 const struct skl_ddb_allocation
*cur_ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
3246 enum pipe pipe
= intel_crtc
->pipe
;
3248 if (memcmp(new_ddb
->plane
[pipe
], cur_ddb
->plane
[pipe
],
3249 sizeof(new_ddb
->plane
[pipe
])))
3252 if (memcmp(&new_ddb
->cursor
[pipe
], &cur_ddb
->cursor
[pipe
],
3253 sizeof(new_ddb
->cursor
[pipe
])))
3259 static void skl_compute_wm_global_parameters(struct drm_device
*dev
,
3260 struct intel_wm_config
*config
)
3262 struct drm_crtc
*crtc
;
3263 struct drm_plane
*plane
;
3265 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
)
3266 config
->num_pipes_active
+= intel_crtc_active(crtc
);
3268 /* FIXME: I don't think we need those two global parameters on SKL */
3269 list_for_each_entry(plane
, &dev
->mode_config
.plane_list
, head
) {
3270 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
3272 config
->sprites_enabled
|= intel_plane
->wm
.enabled
;
3273 config
->sprites_scaled
|= intel_plane
->wm
.scaled
;
3277 static void skl_compute_wm_pipe_parameters(struct drm_crtc
*crtc
,
3278 struct skl_pipe_wm_parameters
*p
)
3280 struct drm_device
*dev
= crtc
->dev
;
3281 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3282 enum pipe pipe
= intel_crtc
->pipe
;
3283 struct drm_plane
*plane
;
3284 int i
= 1; /* Index for sprite planes start */
3286 p
->active
= intel_crtc_active(crtc
);
3288 p
->pipe_htotal
= intel_crtc
->config
.adjusted_mode
.crtc_htotal
;
3289 p
->pixel_rate
= skl_pipe_pixel_rate(&intel_crtc
->config
);
3292 * For now, assume primary and cursor planes are always enabled.
3294 p
->plane
[0].enabled
= true;
3295 p
->plane
[0].bytes_per_pixel
=
3296 crtc
->primary
->fb
->bits_per_pixel
/ 8;
3297 p
->plane
[0].horiz_pixels
= intel_crtc
->config
.pipe_src_w
;
3298 p
->plane
[0].vert_pixels
= intel_crtc
->config
.pipe_src_h
;
3300 p
->cursor
.enabled
= true;
3301 p
->cursor
.bytes_per_pixel
= 4;
3302 p
->cursor
.horiz_pixels
= intel_crtc
->cursor_width
?
3303 intel_crtc
->cursor_width
: 64;
3306 list_for_each_entry(plane
, &dev
->mode_config
.plane_list
, head
) {
3307 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
3309 if (intel_plane
->pipe
== pipe
)
3310 p
->plane
[i
++] = intel_plane
->wm
;
3314 static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters
*p
,
3315 struct intel_plane_wm_parameters
*p_params
,
3316 uint16_t max_page_buff_alloc
,
3318 uint16_t *res_blocks
, /* out */
3319 uint8_t *res_lines
/* out */)
3321 uint32_t method1
, method2
, plane_bytes_per_line
;
3322 uint32_t result_bytes
;
3324 if (mem_value
== 0 || !p
->active
|| !p_params
->enabled
)
3327 method1
= skl_wm_method1(p
->pixel_rate
,
3328 p_params
->bytes_per_pixel
,
3330 method2
= skl_wm_method2(p
->pixel_rate
,
3332 p_params
->horiz_pixels
,
3333 p_params
->bytes_per_pixel
,
3336 plane_bytes_per_line
= p_params
->horiz_pixels
*
3337 p_params
->bytes_per_pixel
;
3339 /* For now xtile and linear */
3340 if (((max_page_buff_alloc
* 512) / plane_bytes_per_line
) >= 1)
3341 result_bytes
= min(method1
, method2
);
3343 result_bytes
= method1
;
3345 *res_blocks
= DIV_ROUND_UP(result_bytes
, 512) + 1;
3346 *res_lines
= DIV_ROUND_UP(result_bytes
, plane_bytes_per_line
);
3351 static void skl_compute_wm_level(const struct drm_i915_private
*dev_priv
,
3352 struct skl_ddb_allocation
*ddb
,
3353 struct skl_pipe_wm_parameters
*p
,
3357 struct skl_wm_level
*result
)
3359 uint16_t latency
= dev_priv
->wm
.skl_latency
[level
];
3360 uint16_t ddb_blocks
;
3363 for (i
= 0; i
< num_planes
; i
++) {
3364 ddb_blocks
= skl_ddb_entry_size(&ddb
->plane
[pipe
][i
]);
3366 result
->plane_en
[i
] = skl_compute_plane_wm(p
, &p
->plane
[i
],
3369 &result
->plane_res_b
[i
],
3370 &result
->plane_res_l
[i
]);
3373 ddb_blocks
= skl_ddb_entry_size(&ddb
->cursor
[pipe
]);
3374 result
->cursor_en
= skl_compute_plane_wm(p
, &p
->cursor
, ddb_blocks
,
3375 latency
, &result
->cursor_res_b
,
3376 &result
->cursor_res_l
);
3379 static void skl_compute_pipe_wm(struct drm_crtc
*crtc
,
3380 struct skl_ddb_allocation
*ddb
,
3381 struct skl_pipe_wm_parameters
*params
,
3382 struct skl_pipe_wm
*pipe_wm
)
3384 struct drm_device
*dev
= crtc
->dev
;
3385 const struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3386 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3387 int level
, max_level
= ilk_wm_max_level(dev
);
3389 for (level
= 0; level
<= max_level
; level
++) {
3390 skl_compute_wm_level(dev_priv
, ddb
, params
, intel_crtc
->pipe
,
3391 level
, intel_num_planes(intel_crtc
),
3392 &pipe_wm
->wm
[level
]);
3394 pipe_wm
->linetime
= skl_compute_linetime_wm(crtc
, params
);
3396 skl_compute_transition_wm(crtc
, params
, pipe_wm
);
3399 static void skl_compute_wm_results(struct drm_device
*dev
,
3400 struct skl_pipe_wm_parameters
*p
,
3401 struct skl_pipe_wm
*p_wm
,
3402 struct skl_wm_values
*r
,
3403 struct intel_crtc
*intel_crtc
)
3405 int level
, max_level
= ilk_wm_max_level(dev
);
3406 enum pipe pipe
= intel_crtc
->pipe
;
3408 for (level
= 0; level
<= max_level
; level
++) {
3409 uint16_t ddb_blocks
;
3413 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
3415 ddb_blocks
= skl_ddb_entry_size(&r
->ddb
.plane
[pipe
][i
]);
3417 if ((p_wm
->wm
[level
].plane_res_b
[i
] > ddb_blocks
) ||
3418 (p_wm
->wm
[level
].plane_res_l
[i
] > 31))
3419 p_wm
->wm
[level
].plane_en
[i
] = false;
3421 temp
|= p_wm
->wm
[level
].plane_res_l
[i
] <<
3422 PLANE_WM_LINES_SHIFT
;
3423 temp
|= p_wm
->wm
[level
].plane_res_b
[i
];
3424 if (p_wm
->wm
[level
].plane_en
[i
])
3425 temp
|= PLANE_WM_EN
;
3427 r
->plane
[pipe
][i
][level
] = temp
;
3428 /* Use the LP0 WM value for transition WM for now. */
3430 r
->plane_trans
[pipe
][i
] = temp
;
3434 ddb_blocks
= skl_ddb_entry_size(&r
->ddb
.cursor
[pipe
]);
3436 if ((p_wm
->wm
[level
].cursor_res_b
> ddb_blocks
) ||
3437 (p_wm
->wm
[level
].cursor_res_l
> 31))
3438 p_wm
->wm
[level
].cursor_en
= false;
3440 temp
|= p_wm
->wm
[level
].cursor_res_l
<< PLANE_WM_LINES_SHIFT
;
3441 temp
|= p_wm
->wm
[level
].cursor_res_b
;
3443 if (p_wm
->wm
[level
].cursor_en
)
3444 temp
|= PLANE_WM_EN
;
3446 r
->cursor
[pipe
][level
] = temp
;
3447 /* Use the LP0 WM value for transition WM for now. */
3449 r
->cursor_trans
[pipe
] = temp
;
3453 r
->wm_linetime
[pipe
] = p_wm
->linetime
;
3456 static void skl_write_wm_values(struct drm_i915_private
*dev_priv
,
3457 const struct skl_wm_values
*new)
3459 struct drm_device
*dev
= dev_priv
->dev
;
3460 struct intel_crtc
*crtc
;
3462 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, base
.head
) {
3463 int i
, level
, max_level
= ilk_wm_max_level(dev
);
3464 enum pipe pipe
= crtc
->pipe
;
3466 if (new->dirty
[pipe
]) {
3467 I915_WRITE(PIPE_WM_LINETIME(pipe
),
3468 new->wm_linetime
[pipe
]);
3470 for (level
= 0; level
<= max_level
; level
++) {
3471 for (i
= 0; i
< intel_num_planes(crtc
); i
++)
3472 I915_WRITE(PLANE_WM(pipe
, i
, level
),
3473 new->plane
[pipe
][i
][level
]);
3474 I915_WRITE(CUR_WM(pipe
, level
),
3475 new->cursor
[pipe
][level
]);
3477 for (i
= 0; i
< intel_num_planes(crtc
); i
++)
3478 I915_WRITE(PLANE_WM_TRANS(pipe
, i
),
3479 new->plane_trans
[pipe
][i
]);
3480 I915_WRITE(CUR_WM_TRANS(pipe
), new->cursor_trans
[pipe
]);
3482 for (i
= 0; i
< intel_num_planes(crtc
); i
++)
3483 I915_WRITE(PLANE_BUF_CFG(pipe
, i
),
3484 new->ddb
.plane
[pipe
][i
].end
<< 16 |
3485 new->ddb
.plane
[pipe
][i
].start
);
3487 I915_WRITE(CUR_BUF_CFG(pipe
),
3488 new->ddb
.cursor
[pipe
].end
<< 16 |
3489 new->ddb
.cursor
[pipe
].start
);
3494 static bool skl_update_pipe_wm(struct drm_crtc
*crtc
,
3495 struct skl_pipe_wm_parameters
*params
,
3496 struct intel_wm_config
*config
,
3497 struct skl_ddb_allocation
*ddb
, /* out */
3498 struct skl_pipe_wm
*pipe_wm
/* out */)
3500 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3502 skl_compute_wm_pipe_parameters(crtc
, params
);
3503 skl_allocate_pipe_ddb(crtc
, config
, params
, ddb
);
3504 skl_compute_pipe_wm(crtc
, ddb
, params
, pipe_wm
);
3506 if (!memcmp(&intel_crtc
->wm
.skl_active
, pipe_wm
, sizeof(*pipe_wm
)))
3509 intel_crtc
->wm
.skl_active
= *pipe_wm
;
3513 static void skl_update_other_pipe_wm(struct drm_device
*dev
,
3514 struct drm_crtc
*crtc
,
3515 struct intel_wm_config
*config
,
3516 struct skl_wm_values
*r
)
3518 struct intel_crtc
*intel_crtc
;
3519 struct intel_crtc
*this_crtc
= to_intel_crtc(crtc
);
3522 * If the WM update hasn't changed the allocation for this_crtc (the
3523 * crtc we are currently computing the new WM values for), other
3524 * enabled crtcs will keep the same allocation and we don't need to
3525 * recompute anything for them.
3527 if (!skl_ddb_allocation_changed(&r
->ddb
, this_crtc
))
3531 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3532 * other active pipes need new DDB allocation and WM values.
3534 list_for_each_entry(intel_crtc
, &dev
->mode_config
.crtc_list
,
3536 struct skl_pipe_wm_parameters params
= {};
3537 struct skl_pipe_wm pipe_wm
= {};
3540 if (this_crtc
->pipe
== intel_crtc
->pipe
)
3543 if (!intel_crtc
->active
)
3546 wm_changed
= skl_update_pipe_wm(&intel_crtc
->base
,
3551 * If we end up re-computing the other pipe WM values, it's
3552 * because it was really needed, so we expect the WM values to
3555 WARN_ON(!wm_changed
);
3557 skl_compute_wm_results(dev
, ¶ms
, &pipe_wm
, r
, intel_crtc
);
3558 r
->dirty
[intel_crtc
->pipe
] = true;
3562 static void skl_update_wm(struct drm_crtc
*crtc
)
3564 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3565 struct drm_device
*dev
= crtc
->dev
;
3566 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3567 struct skl_pipe_wm_parameters params
= {};
3568 struct skl_wm_values
*results
= &dev_priv
->wm
.skl_results
;
3569 struct skl_pipe_wm pipe_wm
= {};
3570 struct intel_wm_config config
= {};
3572 memset(results
, 0, sizeof(*results
));
3574 skl_compute_wm_global_parameters(dev
, &config
);
3576 if (!skl_update_pipe_wm(crtc
, ¶ms
, &config
,
3577 &results
->ddb
, &pipe_wm
))
3580 skl_compute_wm_results(dev
, ¶ms
, &pipe_wm
, results
, intel_crtc
);
3581 results
->dirty
[intel_crtc
->pipe
] = true;
3583 skl_update_other_pipe_wm(dev
, crtc
, &config
, results
);
3584 skl_write_wm_values(dev_priv
, results
);
3586 /* store the new configuration */
3587 dev_priv
->wm
.skl_hw
= *results
;
3591 skl_update_sprite_wm(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
3592 uint32_t sprite_width
, uint32_t sprite_height
,
3593 int pixel_size
, bool enabled
, bool scaled
)
3595 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
3597 intel_plane
->wm
.enabled
= enabled
;
3598 intel_plane
->wm
.scaled
= scaled
;
3599 intel_plane
->wm
.horiz_pixels
= sprite_width
;
3600 intel_plane
->wm
.vert_pixels
= sprite_height
;
3601 intel_plane
->wm
.bytes_per_pixel
= pixel_size
;
3603 skl_update_wm(crtc
);
3606 static void ilk_update_wm(struct drm_crtc
*crtc
)
3608 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3609 struct drm_device
*dev
= crtc
->dev
;
3610 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3611 struct ilk_wm_maximums max
;
3612 struct ilk_pipe_wm_parameters params
= {};
3613 struct ilk_wm_values results
= {};
3614 enum intel_ddb_partitioning partitioning
;
3615 struct intel_pipe_wm pipe_wm
= {};
3616 struct intel_pipe_wm lp_wm_1_2
= {}, lp_wm_5_6
= {}, *best_lp_wm
;
3617 struct intel_wm_config config
= {};
3619 ilk_compute_wm_parameters(crtc
, ¶ms
);
3621 intel_compute_pipe_wm(crtc
, ¶ms
, &pipe_wm
);
3623 if (!memcmp(&intel_crtc
->wm
.active
, &pipe_wm
, sizeof(pipe_wm
)))
3626 intel_crtc
->wm
.active
= pipe_wm
;
3628 ilk_compute_wm_config(dev
, &config
);
3630 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_1_2
, &max
);
3631 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_1_2
);
3633 /* 5/6 split only in single pipe config on IVB+ */
3634 if (INTEL_INFO(dev
)->gen
>= 7 &&
3635 config
.num_pipes_active
== 1 && config
.sprites_enabled
) {
3636 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_5_6
, &max
);
3637 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_5_6
);
3639 best_lp_wm
= ilk_find_best_result(dev
, &lp_wm_1_2
, &lp_wm_5_6
);
3641 best_lp_wm
= &lp_wm_1_2
;
3644 partitioning
= (best_lp_wm
== &lp_wm_1_2
) ?
3645 INTEL_DDB_PART_1_2
: INTEL_DDB_PART_5_6
;
3647 ilk_compute_wm_results(dev
, best_lp_wm
, partitioning
, &results
);
3649 ilk_write_wm_values(dev_priv
, &results
);
3653 ilk_update_sprite_wm(struct drm_plane
*plane
,
3654 struct drm_crtc
*crtc
,
3655 uint32_t sprite_width
, uint32_t sprite_height
,
3656 int pixel_size
, bool enabled
, bool scaled
)
3658 struct drm_device
*dev
= plane
->dev
;
3659 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
3661 intel_plane
->wm
.enabled
= enabled
;
3662 intel_plane
->wm
.scaled
= scaled
;
3663 intel_plane
->wm
.horiz_pixels
= sprite_width
;
3664 intel_plane
->wm
.vert_pixels
= sprite_width
;
3665 intel_plane
->wm
.bytes_per_pixel
= pixel_size
;
3668 * IVB workaround: must disable low power watermarks for at least
3669 * one frame before enabling scaling. LP watermarks can be re-enabled
3670 * when scaling is disabled.
3672 * WaCxSRDisabledForSpriteScaling:ivb
3674 if (IS_IVYBRIDGE(dev
) && scaled
&& ilk_disable_lp_wm(dev
))
3675 intel_wait_for_vblank(dev
, intel_plane
->pipe
);
3677 ilk_update_wm(crtc
);
3680 static void skl_pipe_wm_active_state(uint32_t val
,
3681 struct skl_pipe_wm
*active
,
3687 bool is_enabled
= (val
& PLANE_WM_EN
) != 0;
3691 active
->wm
[level
].plane_en
[i
] = is_enabled
;
3692 active
->wm
[level
].plane_res_b
[i
] =
3693 val
& PLANE_WM_BLOCKS_MASK
;
3694 active
->wm
[level
].plane_res_l
[i
] =
3695 (val
>> PLANE_WM_LINES_SHIFT
) &
3696 PLANE_WM_LINES_MASK
;
3698 active
->wm
[level
].cursor_en
= is_enabled
;
3699 active
->wm
[level
].cursor_res_b
=
3700 val
& PLANE_WM_BLOCKS_MASK
;
3701 active
->wm
[level
].cursor_res_l
=
3702 (val
>> PLANE_WM_LINES_SHIFT
) &
3703 PLANE_WM_LINES_MASK
;
3707 active
->trans_wm
.plane_en
[i
] = is_enabled
;
3708 active
->trans_wm
.plane_res_b
[i
] =
3709 val
& PLANE_WM_BLOCKS_MASK
;
3710 active
->trans_wm
.plane_res_l
[i
] =
3711 (val
>> PLANE_WM_LINES_SHIFT
) &
3712 PLANE_WM_LINES_MASK
;
3714 active
->trans_wm
.cursor_en
= is_enabled
;
3715 active
->trans_wm
.cursor_res_b
=
3716 val
& PLANE_WM_BLOCKS_MASK
;
3717 active
->trans_wm
.cursor_res_l
=
3718 (val
>> PLANE_WM_LINES_SHIFT
) &
3719 PLANE_WM_LINES_MASK
;
3724 static void skl_pipe_wm_get_hw_state(struct drm_crtc
*crtc
)
3726 struct drm_device
*dev
= crtc
->dev
;
3727 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3728 struct skl_wm_values
*hw
= &dev_priv
->wm
.skl_hw
;
3729 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3730 struct skl_pipe_wm
*active
= &intel_crtc
->wm
.skl_active
;
3731 enum pipe pipe
= intel_crtc
->pipe
;
3732 int level
, i
, max_level
;
3735 max_level
= ilk_wm_max_level(dev
);
3737 hw
->wm_linetime
[pipe
] = I915_READ(PIPE_WM_LINETIME(pipe
));
3739 for (level
= 0; level
<= max_level
; level
++) {
3740 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++)
3741 hw
->plane
[pipe
][i
][level
] =
3742 I915_READ(PLANE_WM(pipe
, i
, level
));
3743 hw
->cursor
[pipe
][level
] = I915_READ(CUR_WM(pipe
, level
));
3746 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++)
3747 hw
->plane_trans
[pipe
][i
] = I915_READ(PLANE_WM_TRANS(pipe
, i
));
3748 hw
->cursor_trans
[pipe
] = I915_READ(CUR_WM_TRANS(pipe
));
3750 if (!intel_crtc_active(crtc
))
3753 hw
->dirty
[pipe
] = true;
3755 active
->linetime
= hw
->wm_linetime
[pipe
];
3757 for (level
= 0; level
<= max_level
; level
++) {
3758 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
3759 temp
= hw
->plane
[pipe
][i
][level
];
3760 skl_pipe_wm_active_state(temp
, active
, false,
3763 temp
= hw
->cursor
[pipe
][level
];
3764 skl_pipe_wm_active_state(temp
, active
, false, true, i
, level
);
3767 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
3768 temp
= hw
->plane_trans
[pipe
][i
];
3769 skl_pipe_wm_active_state(temp
, active
, true, false, i
, 0);
3772 temp
= hw
->cursor_trans
[pipe
];
3773 skl_pipe_wm_active_state(temp
, active
, true, true, i
, 0);
3776 void skl_wm_get_hw_state(struct drm_device
*dev
)
3778 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3779 struct skl_ddb_allocation
*ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
3780 struct drm_crtc
*crtc
;
3782 skl_ddb_get_hw_state(dev_priv
, ddb
);
3783 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
)
3784 skl_pipe_wm_get_hw_state(crtc
);
3787 static void ilk_pipe_wm_get_hw_state(struct drm_crtc
*crtc
)
3789 struct drm_device
*dev
= crtc
->dev
;
3790 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3791 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
3792 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3793 struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
;
3794 enum pipe pipe
= intel_crtc
->pipe
;
3795 static const unsigned int wm0_pipe_reg
[] = {
3796 [PIPE_A
] = WM0_PIPEA_ILK
,
3797 [PIPE_B
] = WM0_PIPEB_ILK
,
3798 [PIPE_C
] = WM0_PIPEC_IVB
,
3801 hw
->wm_pipe
[pipe
] = I915_READ(wm0_pipe_reg
[pipe
]);
3802 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
3803 hw
->wm_linetime
[pipe
] = I915_READ(PIPE_WM_LINETIME(pipe
));
3805 active
->pipe_enabled
= intel_crtc_active(crtc
);
3807 if (active
->pipe_enabled
) {
3808 u32 tmp
= hw
->wm_pipe
[pipe
];
3811 * For active pipes LP0 watermark is marked as
3812 * enabled, and LP1+ watermaks as disabled since
3813 * we can't really reverse compute them in case
3814 * multiple pipes are active.
3816 active
->wm
[0].enable
= true;
3817 active
->wm
[0].pri_val
= (tmp
& WM0_PIPE_PLANE_MASK
) >> WM0_PIPE_PLANE_SHIFT
;
3818 active
->wm
[0].spr_val
= (tmp
& WM0_PIPE_SPRITE_MASK
) >> WM0_PIPE_SPRITE_SHIFT
;
3819 active
->wm
[0].cur_val
= tmp
& WM0_PIPE_CURSOR_MASK
;
3820 active
->linetime
= hw
->wm_linetime
[pipe
];
3822 int level
, max_level
= ilk_wm_max_level(dev
);
3825 * For inactive pipes, all watermark levels
3826 * should be marked as enabled but zeroed,
3827 * which is what we'd compute them to.
3829 for (level
= 0; level
<= max_level
; level
++)
3830 active
->wm
[level
].enable
= true;
3834 void ilk_wm_get_hw_state(struct drm_device
*dev
)
3836 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3837 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
3838 struct drm_crtc
*crtc
;
3840 for_each_crtc(dev
, crtc
)
3841 ilk_pipe_wm_get_hw_state(crtc
);
3843 hw
->wm_lp
[0] = I915_READ(WM1_LP_ILK
);
3844 hw
->wm_lp
[1] = I915_READ(WM2_LP_ILK
);
3845 hw
->wm_lp
[2] = I915_READ(WM3_LP_ILK
);
3847 hw
->wm_lp_spr
[0] = I915_READ(WM1S_LP_ILK
);
3848 if (INTEL_INFO(dev
)->gen
>= 7) {
3849 hw
->wm_lp_spr
[1] = I915_READ(WM2S_LP_IVB
);
3850 hw
->wm_lp_spr
[2] = I915_READ(WM3S_LP_IVB
);
3853 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
3854 hw
->partitioning
= (I915_READ(WM_MISC
) & WM_MISC_DATA_PARTITION_5_6
) ?
3855 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
3856 else if (IS_IVYBRIDGE(dev
))
3857 hw
->partitioning
= (I915_READ(DISP_ARB_CTL2
) & DISP_DATA_PARTITION_5_6
) ?
3858 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
3861 !(I915_READ(DISP_ARB_CTL
) & DISP_FBC_WM_DIS
);
3865 * intel_update_watermarks - update FIFO watermark values based on current modes
3867 * Calculate watermark values for the various WM regs based on current mode
3868 * and plane configuration.
3870 * There are several cases to deal with here:
3871 * - normal (i.e. non-self-refresh)
3872 * - self-refresh (SR) mode
3873 * - lines are large relative to FIFO size (buffer can hold up to 2)
3874 * - lines are small relative to FIFO size (buffer can hold more than 2
3875 * lines), so need to account for TLB latency
3877 * The normal calculation is:
3878 * watermark = dotclock * bytes per pixel * latency
3879 * where latency is platform & configuration dependent (we assume pessimal
3882 * The SR calculation is:
3883 * watermark = (trunc(latency/line time)+1) * surface width *
3886 * line time = htotal / dotclock
3887 * surface width = hdisplay for normal plane and 64 for cursor
3888 * and latency is assumed to be high, as above.
3890 * The final value programmed to the register should always be rounded up,
3891 * and include an extra 2 entries to account for clock crossings.
3893 * We don't use the sprite, so we can ignore that. And on Crestline we have
3894 * to set the non-SR watermarks to 8.
3896 void intel_update_watermarks(struct drm_crtc
*crtc
)
3898 struct drm_i915_private
*dev_priv
= crtc
->dev
->dev_private
;
3900 if (dev_priv
->display
.update_wm
)
3901 dev_priv
->display
.update_wm(crtc
);
3904 void intel_update_sprite_watermarks(struct drm_plane
*plane
,
3905 struct drm_crtc
*crtc
,
3906 uint32_t sprite_width
,
3907 uint32_t sprite_height
,
3909 bool enabled
, bool scaled
)
3911 struct drm_i915_private
*dev_priv
= plane
->dev
->dev_private
;
3913 if (dev_priv
->display
.update_sprite_wm
)
3914 dev_priv
->display
.update_sprite_wm(plane
, crtc
,
3915 sprite_width
, sprite_height
,
3916 pixel_size
, enabled
, scaled
);
3919 static struct drm_i915_gem_object
*
3920 intel_alloc_context_page(struct drm_device
*dev
)
3922 struct drm_i915_gem_object
*ctx
;
3925 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
3927 ctx
= i915_gem_alloc_object(dev
, 4096);
3929 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
3933 ret
= i915_gem_obj_ggtt_pin(ctx
, 4096, 0);
3935 DRM_ERROR("failed to pin power context: %d\n", ret
);
3939 ret
= i915_gem_object_set_to_gtt_domain(ctx
, 1);
3941 DRM_ERROR("failed to set-domain on power context: %d\n", ret
);
3948 i915_gem_object_ggtt_unpin(ctx
);
3950 drm_gem_object_unreference(&ctx
->base
);
3955 * Lock protecting IPS related data structures
3957 DEFINE_SPINLOCK(mchdev_lock
);
3959 /* Global for IPS driver to get at the current i915 device. Protected by
3961 static struct drm_i915_private
*i915_mch_dev
;
3963 bool ironlake_set_drps(struct drm_device
*dev
, u8 val
)
3965 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3968 assert_spin_locked(&mchdev_lock
);
3970 rgvswctl
= I915_READ16(MEMSWCTL
);
3971 if (rgvswctl
& MEMCTL_CMD_STS
) {
3972 DRM_DEBUG("gpu busy, RCS change rejected\n");
3973 return false; /* still busy with another command */
3976 rgvswctl
= (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
3977 (val
<< MEMCTL_FREQ_SHIFT
) | MEMCTL_SFCAVM
;
3978 I915_WRITE16(MEMSWCTL
, rgvswctl
);
3979 POSTING_READ16(MEMSWCTL
);
3981 rgvswctl
|= MEMCTL_CMD_STS
;
3982 I915_WRITE16(MEMSWCTL
, rgvswctl
);
3987 static void ironlake_enable_drps(struct drm_device
*dev
)
3989 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3990 u32 rgvmodectl
= I915_READ(MEMMODECTL
);
3991 u8 fmax
, fmin
, fstart
, vstart
;
3993 spin_lock_irq(&mchdev_lock
);
3995 /* Enable temp reporting */
3996 I915_WRITE16(PMMISC
, I915_READ(PMMISC
) | MCPPCE_EN
);
3997 I915_WRITE16(TSC1
, I915_READ(TSC1
) | TSE
);
3999 /* 100ms RC evaluation intervals */
4000 I915_WRITE(RCUPEI
, 100000);
4001 I915_WRITE(RCDNEI
, 100000);
4003 /* Set max/min thresholds to 90ms and 80ms respectively */
4004 I915_WRITE(RCBMAXAVG
, 90000);
4005 I915_WRITE(RCBMINAVG
, 80000);
4007 I915_WRITE(MEMIHYST
, 1);
4009 /* Set up min, max, and cur for interrupt handling */
4010 fmax
= (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
;
4011 fmin
= (rgvmodectl
& MEMMODE_FMIN_MASK
);
4012 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
4013 MEMMODE_FSTART_SHIFT
;
4015 vstart
= (I915_READ(PXVFREQ_BASE
+ (fstart
* 4)) & PXVFREQ_PX_MASK
) >>
4018 dev_priv
->ips
.fmax
= fmax
; /* IPS callback will increase this */
4019 dev_priv
->ips
.fstart
= fstart
;
4021 dev_priv
->ips
.max_delay
= fstart
;
4022 dev_priv
->ips
.min_delay
= fmin
;
4023 dev_priv
->ips
.cur_delay
= fstart
;
4025 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4026 fmax
, fmin
, fstart
);
4028 I915_WRITE(MEMINTREN
, MEMINT_CX_SUPR_EN
| MEMINT_EVAL_CHG_EN
);
4031 * Interrupts will be enabled in ironlake_irq_postinstall
4034 I915_WRITE(VIDSTART
, vstart
);
4035 POSTING_READ(VIDSTART
);
4037 rgvmodectl
|= MEMMODE_SWMODE_EN
;
4038 I915_WRITE(MEMMODECTL
, rgvmodectl
);
4040 if (wait_for_atomic((I915_READ(MEMSWCTL
) & MEMCTL_CMD_STS
) == 0, 10))
4041 DRM_ERROR("stuck trying to change perf mode\n");
4044 ironlake_set_drps(dev
, fstart
);
4046 dev_priv
->ips
.last_count1
= I915_READ(0x112e4) + I915_READ(0x112e8) +
4048 dev_priv
->ips
.last_time1
= jiffies_to_msecs(jiffies
);
4049 dev_priv
->ips
.last_count2
= I915_READ(0x112f4);
4050 dev_priv
->ips
.last_time2
= ktime_get_raw_ns();
4052 spin_unlock_irq(&mchdev_lock
);
4055 static void ironlake_disable_drps(struct drm_device
*dev
)
4057 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4060 spin_lock_irq(&mchdev_lock
);
4062 rgvswctl
= I915_READ16(MEMSWCTL
);
4064 /* Ack interrupts, disable EFC interrupt */
4065 I915_WRITE(MEMINTREN
, I915_READ(MEMINTREN
) & ~MEMINT_EVAL_CHG_EN
);
4066 I915_WRITE(MEMINTRSTS
, MEMINT_EVAL_CHG
);
4067 I915_WRITE(DEIER
, I915_READ(DEIER
) & ~DE_PCU_EVENT
);
4068 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
4069 I915_WRITE(DEIMR
, I915_READ(DEIMR
) | DE_PCU_EVENT
);
4071 /* Go back to the starting frequency */
4072 ironlake_set_drps(dev
, dev_priv
->ips
.fstart
);
4074 rgvswctl
|= MEMCTL_CMD_STS
;
4075 I915_WRITE(MEMSWCTL
, rgvswctl
);
4078 spin_unlock_irq(&mchdev_lock
);
4081 /* There's a funny hw issue where the hw returns all 0 when reading from
4082 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4083 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4084 * all limits and the gpu stuck at whatever frequency it is at atm).
4086 static u32
gen6_rps_limits(struct drm_i915_private
*dev_priv
, u8 val
)
4090 /* Only set the down limit when we've reached the lowest level to avoid
4091 * getting more interrupts, otherwise leave this clear. This prevents a
4092 * race in the hw when coming out of rc6: There's a tiny window where
4093 * the hw runs at the minimal clock before selecting the desired
4094 * frequency, if the down threshold expires in that window we will not
4095 * receive a down interrupt. */
4096 limits
= dev_priv
->rps
.max_freq_softlimit
<< 24;
4097 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
4098 limits
|= dev_priv
->rps
.min_freq_softlimit
<< 16;
4103 static void gen6_set_rps_thresholds(struct drm_i915_private
*dev_priv
, u8 val
)
4107 new_power
= dev_priv
->rps
.power
;
4108 switch (dev_priv
->rps
.power
) {
4110 if (val
> dev_priv
->rps
.efficient_freq
+ 1 && val
> dev_priv
->rps
.cur_freq
)
4111 new_power
= BETWEEN
;
4115 if (val
<= dev_priv
->rps
.efficient_freq
&& val
< dev_priv
->rps
.cur_freq
)
4116 new_power
= LOW_POWER
;
4117 else if (val
>= dev_priv
->rps
.rp0_freq
&& val
> dev_priv
->rps
.cur_freq
)
4118 new_power
= HIGH_POWER
;
4122 if (val
< (dev_priv
->rps
.rp1_freq
+ dev_priv
->rps
.rp0_freq
) >> 1 && val
< dev_priv
->rps
.cur_freq
)
4123 new_power
= BETWEEN
;
4126 /* Max/min bins are special */
4127 if (val
== dev_priv
->rps
.min_freq_softlimit
)
4128 new_power
= LOW_POWER
;
4129 if (val
== dev_priv
->rps
.max_freq_softlimit
)
4130 new_power
= HIGH_POWER
;
4131 if (new_power
== dev_priv
->rps
.power
)
4134 /* Note the units here are not exactly 1us, but 1280ns. */
4135 switch (new_power
) {
4137 /* Upclock if more than 95% busy over 16ms */
4138 I915_WRITE(GEN6_RP_UP_EI
, 12500);
4139 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 11800);
4141 /* Downclock if less than 85% busy over 32ms */
4142 I915_WRITE(GEN6_RP_DOWN_EI
, 25000);
4143 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 21250);
4145 I915_WRITE(GEN6_RP_CONTROL
,
4146 GEN6_RP_MEDIA_TURBO
|
4147 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
4148 GEN6_RP_MEDIA_IS_GFX
|
4150 GEN6_RP_UP_BUSY_AVG
|
4151 GEN6_RP_DOWN_IDLE_AVG
);
4155 /* Upclock if more than 90% busy over 13ms */
4156 I915_WRITE(GEN6_RP_UP_EI
, 10250);
4157 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 9225);
4159 /* Downclock if less than 75% busy over 32ms */
4160 I915_WRITE(GEN6_RP_DOWN_EI
, 25000);
4161 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 18750);
4163 I915_WRITE(GEN6_RP_CONTROL
,
4164 GEN6_RP_MEDIA_TURBO
|
4165 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
4166 GEN6_RP_MEDIA_IS_GFX
|
4168 GEN6_RP_UP_BUSY_AVG
|
4169 GEN6_RP_DOWN_IDLE_AVG
);
4173 /* Upclock if more than 85% busy over 10ms */
4174 I915_WRITE(GEN6_RP_UP_EI
, 8000);
4175 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 6800);
4177 /* Downclock if less than 60% busy over 32ms */
4178 I915_WRITE(GEN6_RP_DOWN_EI
, 25000);
4179 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 15000);
4181 I915_WRITE(GEN6_RP_CONTROL
,
4182 GEN6_RP_MEDIA_TURBO
|
4183 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
4184 GEN6_RP_MEDIA_IS_GFX
|
4186 GEN6_RP_UP_BUSY_AVG
|
4187 GEN6_RP_DOWN_IDLE_AVG
);
4191 dev_priv
->rps
.power
= new_power
;
4192 dev_priv
->rps
.last_adj
= 0;
4195 static u32
gen6_rps_pm_mask(struct drm_i915_private
*dev_priv
, u8 val
)
4199 if (val
> dev_priv
->rps
.min_freq_softlimit
)
4200 mask
|= GEN6_PM_RP_DOWN_THRESHOLD
| GEN6_PM_RP_DOWN_TIMEOUT
;
4201 if (val
< dev_priv
->rps
.max_freq_softlimit
)
4202 mask
|= GEN6_PM_RP_UP_THRESHOLD
;
4204 mask
|= dev_priv
->pm_rps_events
& (GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_UP_EI_EXPIRED
);
4205 mask
&= dev_priv
->pm_rps_events
;
4207 /* IVB and SNB hard hangs on looping batchbuffer
4208 * if GEN6_PM_UP_EI_EXPIRED is masked.
4210 if (INTEL_INFO(dev_priv
->dev
)->gen
<= 7 && !IS_HASWELL(dev_priv
->dev
))
4211 mask
|= GEN6_PM_RP_UP_EI_EXPIRED
;
4213 if (IS_GEN8(dev_priv
->dev
))
4214 mask
|= GEN8_PMINTR_REDIRECT_TO_NON_DISP
;
4219 /* gen6_set_rps is called to update the frequency request, but should also be
4220 * called when the range (min_delay and max_delay) is modified so that we can
4221 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4222 void gen6_set_rps(struct drm_device
*dev
, u8 val
)
4224 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4226 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4227 WARN_ON(val
> dev_priv
->rps
.max_freq_softlimit
);
4228 WARN_ON(val
< dev_priv
->rps
.min_freq_softlimit
);
4230 /* min/max delay may still have been modified so be sure to
4231 * write the limits value.
4233 if (val
!= dev_priv
->rps
.cur_freq
) {
4234 gen6_set_rps_thresholds(dev_priv
, val
);
4236 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
4237 I915_WRITE(GEN6_RPNSWREQ
,
4238 HSW_FREQUENCY(val
));
4240 I915_WRITE(GEN6_RPNSWREQ
,
4241 GEN6_FREQUENCY(val
) |
4243 GEN6_AGGRESSIVE_TURBO
);
4246 /* Make sure we continue to get interrupts
4247 * until we hit the minimum or maximum frequencies.
4249 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
, gen6_rps_limits(dev_priv
, val
));
4250 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
4252 POSTING_READ(GEN6_RPNSWREQ
);
4254 dev_priv
->rps
.cur_freq
= val
;
4255 trace_intel_gpu_freq_change(val
* 50);
4258 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
4260 * * If Gfx is Idle, then
4261 * 1. Mask Turbo interrupts
4262 * 2. Bring up Gfx clock
4263 * 3. Change the freq to Rpn and wait till P-Unit updates freq
4264 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
4265 * 5. Unmask Turbo interrupts
4267 static void vlv_set_rps_idle(struct drm_i915_private
*dev_priv
)
4269 struct drm_device
*dev
= dev_priv
->dev
;
4271 /* Latest VLV doesn't need to force the gfx clock */
4272 if (dev
->pdev
->revision
>= 0xd) {
4273 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
4278 * When we are idle. Drop to min voltage state.
4281 if (dev_priv
->rps
.cur_freq
<= dev_priv
->rps
.min_freq_softlimit
)
4284 /* Mask turbo interrupt so that they will not come in between */
4285 I915_WRITE(GEN6_PMINTRMSK
, 0xffffffff);
4287 vlv_force_gfx_clock(dev_priv
, true);
4289 dev_priv
->rps
.cur_freq
= dev_priv
->rps
.min_freq_softlimit
;
4291 vlv_punit_write(dev_priv
, PUNIT_REG_GPU_FREQ_REQ
,
4292 dev_priv
->rps
.min_freq_softlimit
);
4294 if (wait_for(((vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
))
4295 & GENFREQSTATUS
) == 0, 5))
4296 DRM_ERROR("timed out waiting for Punit\n");
4298 vlv_force_gfx_clock(dev_priv
, false);
4300 I915_WRITE(GEN6_PMINTRMSK
,
4301 gen6_rps_pm_mask(dev_priv
, dev_priv
->rps
.cur_freq
));
4304 void gen6_rps_idle(struct drm_i915_private
*dev_priv
)
4306 struct drm_device
*dev
= dev_priv
->dev
;
4308 mutex_lock(&dev_priv
->rps
.hw_lock
);
4309 if (dev_priv
->rps
.enabled
) {
4310 if (IS_CHERRYVIEW(dev
))
4311 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
4312 else if (IS_VALLEYVIEW(dev
))
4313 vlv_set_rps_idle(dev_priv
);
4315 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
4316 dev_priv
->rps
.last_adj
= 0;
4318 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4321 void gen6_rps_boost(struct drm_i915_private
*dev_priv
)
4323 struct drm_device
*dev
= dev_priv
->dev
;
4325 mutex_lock(&dev_priv
->rps
.hw_lock
);
4326 if (dev_priv
->rps
.enabled
) {
4327 if (IS_VALLEYVIEW(dev
))
4328 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.max_freq_softlimit
);
4330 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.max_freq_softlimit
);
4331 dev_priv
->rps
.last_adj
= 0;
4333 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4336 void valleyview_set_rps(struct drm_device
*dev
, u8 val
)
4338 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4340 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4341 WARN_ON(val
> dev_priv
->rps
.max_freq_softlimit
);
4342 WARN_ON(val
< dev_priv
->rps
.min_freq_softlimit
);
4344 if (WARN_ONCE(IS_CHERRYVIEW(dev
) && (val
& 1),
4345 "Odd GPU freq value\n"))
4348 if (val
!= dev_priv
->rps
.cur_freq
) {
4349 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
4350 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
4351 dev_priv
->rps
.cur_freq
,
4352 vlv_gpu_freq(dev_priv
, val
), val
);
4354 vlv_punit_write(dev_priv
, PUNIT_REG_GPU_FREQ_REQ
, val
);
4357 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
4359 dev_priv
->rps
.cur_freq
= val
;
4360 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv
, val
));
4363 static void gen8_disable_rps_interrupts(struct drm_device
*dev
)
4365 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4367 I915_WRITE(GEN6_PMINTRMSK
, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP
);
4368 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
4369 ~dev_priv
->pm_rps_events
);
4370 /* Complete PM interrupt masking here doesn't race with the rps work
4371 * item again unmasking PM interrupts because that is using a different
4372 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
4373 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
4374 * gen8_enable_rps will clean up. */
4376 spin_lock_irq(&dev_priv
->irq_lock
);
4377 dev_priv
->rps
.pm_iir
= 0;
4378 spin_unlock_irq(&dev_priv
->irq_lock
);
4380 I915_WRITE(GEN8_GT_IIR(2), dev_priv
->pm_rps_events
);
4383 static void gen6_disable_rps_interrupts(struct drm_device
*dev
)
4385 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4387 I915_WRITE(GEN6_PMINTRMSK
, 0xffffffff);
4388 I915_WRITE(GEN6_PMIER
, I915_READ(GEN6_PMIER
) &
4389 ~dev_priv
->pm_rps_events
);
4390 /* Complete PM interrupt masking here doesn't race with the rps work
4391 * item again unmasking PM interrupts because that is using a different
4392 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
4393 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
4395 spin_lock_irq(&dev_priv
->irq_lock
);
4396 dev_priv
->rps
.pm_iir
= 0;
4397 spin_unlock_irq(&dev_priv
->irq_lock
);
4399 I915_WRITE(GEN6_PMIIR
, dev_priv
->pm_rps_events
);
4402 static void gen6_disable_rps(struct drm_device
*dev
)
4404 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4406 I915_WRITE(GEN6_RC_CONTROL
, 0);
4407 I915_WRITE(GEN6_RPNSWREQ
, 1 << 31);
4409 if (IS_BROADWELL(dev
))
4410 gen8_disable_rps_interrupts(dev
);
4412 gen6_disable_rps_interrupts(dev
);
4415 static void cherryview_disable_rps(struct drm_device
*dev
)
4417 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4419 I915_WRITE(GEN6_RC_CONTROL
, 0);
4421 gen8_disable_rps_interrupts(dev
);
4424 static void valleyview_disable_rps(struct drm_device
*dev
)
4426 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4428 /* we're doing forcewake before Disabling RC6,
4429 * This what the BIOS expects when going into suspend */
4430 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
4432 I915_WRITE(GEN6_RC_CONTROL
, 0);
4434 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
4436 gen6_disable_rps_interrupts(dev
);
4439 static void intel_print_rc6_info(struct drm_device
*dev
, u32 mode
)
4441 if (IS_VALLEYVIEW(dev
)) {
4442 if (mode
& (GEN7_RC_CTL_TO_MODE
| GEN6_RC_CTL_EI_MODE(1)))
4443 mode
= GEN6_RC_CTL_RC6_ENABLE
;
4448 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4449 (mode
& GEN6_RC_CTL_RC6_ENABLE
) ? "on" : "off",
4450 (mode
& GEN6_RC_CTL_RC6p_ENABLE
) ? "on" : "off",
4451 (mode
& GEN6_RC_CTL_RC6pp_ENABLE
) ? "on" : "off");
4454 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
4455 (mode
& GEN6_RC_CTL_RC6_ENABLE
) ? "on" : "off");
4458 static int sanitize_rc6_option(const struct drm_device
*dev
, int enable_rc6
)
4460 /* No RC6 before Ironlake */
4461 if (INTEL_INFO(dev
)->gen
< 5)
4464 /* RC6 is only on Ironlake mobile not on desktop */
4465 if (INTEL_INFO(dev
)->gen
== 5 && !IS_IRONLAKE_M(dev
))
4468 /* Respect the kernel parameter if it is set */
4469 if (enable_rc6
>= 0) {
4473 mask
= INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
|
4476 mask
= INTEL_RC6_ENABLE
;
4478 if ((enable_rc6
& mask
) != enable_rc6
)
4479 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
4480 enable_rc6
& mask
, enable_rc6
, mask
);
4482 return enable_rc6
& mask
;
4485 /* Disable RC6 on Ironlake */
4486 if (INTEL_INFO(dev
)->gen
== 5)
4489 if (IS_IVYBRIDGE(dev
))
4490 return (INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
);
4492 return INTEL_RC6_ENABLE
;
4495 int intel_enable_rc6(const struct drm_device
*dev
)
4497 return i915
.enable_rc6
;
4500 static void gen8_enable_rps_interrupts(struct drm_device
*dev
)
4502 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4504 spin_lock_irq(&dev_priv
->irq_lock
);
4505 WARN_ON(dev_priv
->rps
.pm_iir
);
4506 gen8_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
4507 I915_WRITE(GEN8_GT_IIR(2), dev_priv
->pm_rps_events
);
4508 spin_unlock_irq(&dev_priv
->irq_lock
);
4511 static void gen6_enable_rps_interrupts(struct drm_device
*dev
)
4513 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4515 spin_lock_irq(&dev_priv
->irq_lock
);
4516 WARN_ON(dev_priv
->rps
.pm_iir
);
4517 gen6_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
4518 I915_WRITE(GEN6_PMIIR
, dev_priv
->pm_rps_events
);
4519 spin_unlock_irq(&dev_priv
->irq_lock
);
4522 static void parse_rp_state_cap(struct drm_i915_private
*dev_priv
, u32 rp_state_cap
)
4524 /* All of these values are in units of 50MHz */
4525 dev_priv
->rps
.cur_freq
= 0;
4526 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
4527 dev_priv
->rps
.rp1_freq
= (rp_state_cap
>> 8) & 0xff;
4528 dev_priv
->rps
.rp0_freq
= (rp_state_cap
>> 0) & 0xff;
4529 dev_priv
->rps
.min_freq
= (rp_state_cap
>> 16) & 0xff;
4530 /* XXX: only BYT has a special efficient freq */
4531 dev_priv
->rps
.efficient_freq
= dev_priv
->rps
.rp1_freq
;
4532 /* hw_max = RP0 until we check for overclocking */
4533 dev_priv
->rps
.max_freq
= dev_priv
->rps
.rp0_freq
;
4535 /* Preserve min/max settings in case of re-init */
4536 if (dev_priv
->rps
.max_freq_softlimit
== 0)
4537 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
4539 if (dev_priv
->rps
.min_freq_softlimit
== 0)
4540 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
4543 static void gen8_enable_rps(struct drm_device
*dev
)
4545 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4546 struct intel_engine_cs
*ring
;
4547 uint32_t rc6_mask
= 0, rp_state_cap
;
4550 /* 1a: Software RC state - RC0 */
4551 I915_WRITE(GEN6_RC_STATE
, 0);
4553 /* 1c & 1d: Get forcewake during program sequence. Although the driver
4554 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4555 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
4557 /* 2a: Disable RC states. */
4558 I915_WRITE(GEN6_RC_CONTROL
, 0);
4560 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
4561 parse_rp_state_cap(dev_priv
, rp_state_cap
);
4563 /* 2b: Program RC6 thresholds.*/
4564 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
4565 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
4566 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
4567 for_each_ring(ring
, dev_priv
, unused
)
4568 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
4569 I915_WRITE(GEN6_RC_SLEEP
, 0);
4570 if (IS_BROADWELL(dev
))
4571 I915_WRITE(GEN6_RC6_THRESHOLD
, 625); /* 800us/1.28 for TO */
4573 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000); /* 50/125ms per EI */
4576 if (intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
)
4577 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
4578 intel_print_rc6_info(dev
, rc6_mask
);
4579 if (IS_BROADWELL(dev
))
4580 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
4581 GEN7_RC_CTL_TO_MODE
|
4584 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
4585 GEN6_RC_CTL_EI_MODE(1) |
4588 /* 4 Program defaults and thresholds for RPS*/
4589 I915_WRITE(GEN6_RPNSWREQ
,
4590 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
4591 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
4592 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
4593 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
4594 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 100000000 / 128); /* 1 second timeout */
4596 /* Docs recommend 900MHz, and 300 MHz respectively */
4597 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
4598 dev_priv
->rps
.max_freq_softlimit
<< 24 |
4599 dev_priv
->rps
.min_freq_softlimit
<< 16);
4601 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 7600000 / 128); /* 76ms busyness per EI, 90% */
4602 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 31300000 / 128); /* 313ms busyness per EI, 70%*/
4603 I915_WRITE(GEN6_RP_UP_EI
, 66000); /* 84.48ms, XXX: random? */
4604 I915_WRITE(GEN6_RP_DOWN_EI
, 350000); /* 448ms, XXX: random? */
4606 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
4609 I915_WRITE(GEN6_RP_CONTROL
,
4610 GEN6_RP_MEDIA_TURBO
|
4611 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
4612 GEN6_RP_MEDIA_IS_GFX
|
4614 GEN6_RP_UP_BUSY_AVG
|
4615 GEN6_RP_DOWN_IDLE_AVG
);
4617 /* 6: Ring frequency + overclocking (our driver does this later */
4619 gen6_set_rps(dev
, (I915_READ(GEN6_GT_PERF_STATUS
) & 0xff00) >> 8);
4621 gen8_enable_rps_interrupts(dev
);
4623 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
4626 static void gen6_enable_rps(struct drm_device
*dev
)
4628 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4629 struct intel_engine_cs
*ring
;
4631 u32 rc6vids
, pcu_mbox
= 0, rc6_mask
= 0;
4636 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4638 /* Here begins a magic sequence of register writes to enable
4639 * auto-downclocking.
4641 * Perhaps there might be some value in exposing these to
4644 I915_WRITE(GEN6_RC_STATE
, 0);
4646 /* Clear the DBG now so we don't confuse earlier errors */
4647 if ((gtfifodbg
= I915_READ(GTFIFODBG
))) {
4648 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg
);
4649 I915_WRITE(GTFIFODBG
, gtfifodbg
);
4652 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
4654 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
4656 parse_rp_state_cap(dev_priv
, rp_state_cap
);
4658 /* disable the counters and set deterministic thresholds */
4659 I915_WRITE(GEN6_RC_CONTROL
, 0);
4661 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT
, 1000 << 16);
4662 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16 | 30);
4663 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT
, 30);
4664 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
4665 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
4667 for_each_ring(ring
, dev_priv
, i
)
4668 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
4670 I915_WRITE(GEN6_RC_SLEEP
, 0);
4671 I915_WRITE(GEN6_RC1e_THRESHOLD
, 1000);
4672 if (IS_IVYBRIDGE(dev
))
4673 I915_WRITE(GEN6_RC6_THRESHOLD
, 125000);
4675 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000);
4676 I915_WRITE(GEN6_RC6p_THRESHOLD
, 150000);
4677 I915_WRITE(GEN6_RC6pp_THRESHOLD
, 64000); /* unused */
4679 /* Check if we are enabling RC6 */
4680 rc6_mode
= intel_enable_rc6(dev_priv
->dev
);
4681 if (rc6_mode
& INTEL_RC6_ENABLE
)
4682 rc6_mask
|= GEN6_RC_CTL_RC6_ENABLE
;
4684 /* We don't use those on Haswell */
4685 if (!IS_HASWELL(dev
)) {
4686 if (rc6_mode
& INTEL_RC6p_ENABLE
)
4687 rc6_mask
|= GEN6_RC_CTL_RC6p_ENABLE
;
4689 if (rc6_mode
& INTEL_RC6pp_ENABLE
)
4690 rc6_mask
|= GEN6_RC_CTL_RC6pp_ENABLE
;
4693 intel_print_rc6_info(dev
, rc6_mask
);
4695 I915_WRITE(GEN6_RC_CONTROL
,
4697 GEN6_RC_CTL_EI_MODE(1) |
4698 GEN6_RC_CTL_HW_ENABLE
);
4700 /* Power down if completely idle for over 50ms */
4701 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 50000);
4702 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
4704 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_MIN_FREQ_TABLE
, 0);
4706 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
4708 ret
= sandybridge_pcode_read(dev_priv
, GEN6_READ_OC_PARAMS
, &pcu_mbox
);
4709 if (!ret
&& (pcu_mbox
& (1<<31))) { /* OC supported */
4710 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
4711 (dev_priv
->rps
.max_freq_softlimit
& 0xff) * 50,
4712 (pcu_mbox
& 0xff) * 50);
4713 dev_priv
->rps
.max_freq
= pcu_mbox
& 0xff;
4716 dev_priv
->rps
.power
= HIGH_POWER
; /* force a reset */
4717 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
4719 gen6_enable_rps_interrupts(dev
);
4722 ret
= sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
4723 if (IS_GEN6(dev
) && ret
) {
4724 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
4725 } else if (IS_GEN6(dev
) && (GEN6_DECODE_RC6_VID(rc6vids
& 0xff) < 450)) {
4726 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
4727 GEN6_DECODE_RC6_VID(rc6vids
& 0xff), 450);
4728 rc6vids
&= 0xffff00;
4729 rc6vids
|= GEN6_ENCODE_RC6_VID(450);
4730 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_RC6VIDS
, rc6vids
);
4732 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
4735 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
4738 static void __gen6_update_ring_freq(struct drm_device
*dev
)
4740 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4742 unsigned int gpu_freq
;
4743 unsigned int max_ia_freq
, min_ring_freq
;
4744 int scaling_factor
= 180;
4745 struct cpufreq_policy
*policy
;
4747 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4749 policy
= cpufreq_cpu_get(0);
4751 max_ia_freq
= policy
->cpuinfo
.max_freq
;
4752 cpufreq_cpu_put(policy
);
4755 * Default to measured freq if none found, PCU will ensure we
4758 max_ia_freq
= tsc_khz
;
4761 /* Convert from kHz to MHz */
4762 max_ia_freq
/= 1000;
4764 min_ring_freq
= I915_READ(DCLK
) & 0xf;
4765 /* convert DDR frequency from units of 266.6MHz to bandwidth */
4766 min_ring_freq
= mult_frac(min_ring_freq
, 8, 3);
4769 * For each potential GPU frequency, load a ring frequency we'd like
4770 * to use for memory access. We do this by specifying the IA frequency
4771 * the PCU should use as a reference to determine the ring frequency.
4773 for (gpu_freq
= dev_priv
->rps
.max_freq_softlimit
; gpu_freq
>= dev_priv
->rps
.min_freq_softlimit
;
4775 int diff
= dev_priv
->rps
.max_freq_softlimit
- gpu_freq
;
4776 unsigned int ia_freq
= 0, ring_freq
= 0;
4778 if (INTEL_INFO(dev
)->gen
>= 8) {
4779 /* max(2 * GT, DDR). NB: GT is 50MHz units */
4780 ring_freq
= max(min_ring_freq
, gpu_freq
);
4781 } else if (IS_HASWELL(dev
)) {
4782 ring_freq
= mult_frac(gpu_freq
, 5, 4);
4783 ring_freq
= max(min_ring_freq
, ring_freq
);
4784 /* leave ia_freq as the default, chosen by cpufreq */
4786 /* On older processors, there is no separate ring
4787 * clock domain, so in order to boost the bandwidth
4788 * of the ring, we need to upclock the CPU (ia_freq).
4790 * For GPU frequencies less than 750MHz,
4791 * just use the lowest ring freq.
4793 if (gpu_freq
< min_freq
)
4796 ia_freq
= max_ia_freq
- ((diff
* scaling_factor
) / 2);
4797 ia_freq
= DIV_ROUND_CLOSEST(ia_freq
, 100);
4800 sandybridge_pcode_write(dev_priv
,
4801 GEN6_PCODE_WRITE_MIN_FREQ_TABLE
,
4802 ia_freq
<< GEN6_PCODE_FREQ_IA_RATIO_SHIFT
|
4803 ring_freq
<< GEN6_PCODE_FREQ_RING_RATIO_SHIFT
|
4808 void gen6_update_ring_freq(struct drm_device
*dev
)
4810 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4812 if (INTEL_INFO(dev
)->gen
< 6 || IS_VALLEYVIEW(dev
))
4815 mutex_lock(&dev_priv
->rps
.hw_lock
);
4816 __gen6_update_ring_freq(dev
);
4817 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4820 static int cherryview_rps_max_freq(struct drm_i915_private
*dev_priv
)
4824 val
= vlv_punit_read(dev_priv
, PUNIT_GPU_STATUS_REG
);
4825 rp0
= (val
>> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT
) & PUNIT_GPU_STATUS_MAX_FREQ_MASK
;
4830 static int cherryview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
4834 val
= vlv_punit_read(dev_priv
, PUNIT_GPU_DUTYCYCLE_REG
);
4835 rpe
= (val
>> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT
) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK
;
4840 static int cherryview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
4844 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
4845 rp1
= (val
>> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT
) & PUNIT_GPU_STATUS_MAX_FREQ_MASK
;
4850 static int cherryview_rps_min_freq(struct drm_i915_private
*dev_priv
)
4854 val
= vlv_punit_read(dev_priv
, PUNIT_GPU_STATUS_REG
);
4855 rpn
= (val
>> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT
) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK
;
4859 static int valleyview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
4863 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
4865 rp1
= (val
& FB_GFX_FGUARANTEED_FREQ_FUSE_MASK
) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT
;
4870 static int valleyview_rps_max_freq(struct drm_i915_private
*dev_priv
)
4874 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
4876 rp0
= (val
& FB_GFX_MAX_FREQ_FUSE_MASK
) >> FB_GFX_MAX_FREQ_FUSE_SHIFT
;
4878 rp0
= min_t(u32
, rp0
, 0xea);
4883 static int valleyview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
4887 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_LO
);
4888 rpe
= (val
& FB_FMAX_VMIN_FREQ_LO_MASK
) >> FB_FMAX_VMIN_FREQ_LO_SHIFT
;
4889 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_HI
);
4890 rpe
|= (val
& FB_FMAX_VMIN_FREQ_HI_MASK
) << 5;
4895 static int valleyview_rps_min_freq(struct drm_i915_private
*dev_priv
)
4897 return vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
) & 0xff;
4900 /* Check that the pctx buffer wasn't move under us. */
4901 static void valleyview_check_pctx(struct drm_i915_private
*dev_priv
)
4903 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
4905 WARN_ON(pctx_addr
!= dev_priv
->mm
.stolen_base
+
4906 dev_priv
->vlv_pctx
->stolen
->start
);
4910 /* Check that the pcbr address is not empty. */
4911 static void cherryview_check_pctx(struct drm_i915_private
*dev_priv
)
4913 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
4915 WARN_ON((pctx_addr
>> VLV_PCBR_ADDR_SHIFT
) == 0);
4918 static void cherryview_setup_pctx(struct drm_device
*dev
)
4920 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4921 unsigned long pctx_paddr
, paddr
;
4922 struct i915_gtt
*gtt
= &dev_priv
->gtt
;
4924 int pctx_size
= 32*1024;
4926 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
4928 pcbr
= I915_READ(VLV_PCBR
);
4929 if ((pcbr
>> VLV_PCBR_ADDR_SHIFT
) == 0) {
4930 paddr
= (dev_priv
->mm
.stolen_base
+
4931 (gtt
->stolen_size
- pctx_size
));
4933 pctx_paddr
= (paddr
& (~4095));
4934 I915_WRITE(VLV_PCBR
, pctx_paddr
);
4938 static void valleyview_setup_pctx(struct drm_device
*dev
)
4940 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4941 struct drm_i915_gem_object
*pctx
;
4942 unsigned long pctx_paddr
;
4944 int pctx_size
= 24*1024;
4946 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
4948 pcbr
= I915_READ(VLV_PCBR
);
4950 /* BIOS set it up already, grab the pre-alloc'd space */
4953 pcbr_offset
= (pcbr
& (~4095)) - dev_priv
->mm
.stolen_base
;
4954 pctx
= i915_gem_object_create_stolen_for_preallocated(dev_priv
->dev
,
4956 I915_GTT_OFFSET_NONE
,
4962 * From the Gunit register HAS:
4963 * The Gfx driver is expected to program this register and ensure
4964 * proper allocation within Gfx stolen memory. For example, this
4965 * register should be programmed such than the PCBR range does not
4966 * overlap with other ranges, such as the frame buffer, protected
4967 * memory, or any other relevant ranges.
4969 pctx
= i915_gem_object_create_stolen(dev
, pctx_size
);
4971 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
4975 pctx_paddr
= dev_priv
->mm
.stolen_base
+ pctx
->stolen
->start
;
4976 I915_WRITE(VLV_PCBR
, pctx_paddr
);
4979 dev_priv
->vlv_pctx
= pctx
;
4982 static void valleyview_cleanup_pctx(struct drm_device
*dev
)
4984 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4986 if (WARN_ON(!dev_priv
->vlv_pctx
))
4989 drm_gem_object_unreference(&dev_priv
->vlv_pctx
->base
);
4990 dev_priv
->vlv_pctx
= NULL
;
4993 static void valleyview_init_gt_powersave(struct drm_device
*dev
)
4995 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4998 valleyview_setup_pctx(dev
);
5000 mutex_lock(&dev_priv
->rps
.hw_lock
);
5002 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
5003 switch ((val
>> 6) & 3) {
5006 dev_priv
->mem_freq
= 800;
5009 dev_priv
->mem_freq
= 1066;
5012 dev_priv
->mem_freq
= 1333;
5015 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv
->mem_freq
);
5017 dev_priv
->rps
.max_freq
= valleyview_rps_max_freq(dev_priv
);
5018 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
5019 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5020 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
5021 dev_priv
->rps
.max_freq
);
5023 dev_priv
->rps
.efficient_freq
= valleyview_rps_rpe_freq(dev_priv
);
5024 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5025 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
5026 dev_priv
->rps
.efficient_freq
);
5028 dev_priv
->rps
.rp1_freq
= valleyview_rps_guar_freq(dev_priv
);
5029 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5030 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
5031 dev_priv
->rps
.rp1_freq
);
5033 dev_priv
->rps
.min_freq
= valleyview_rps_min_freq(dev_priv
);
5034 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5035 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
5036 dev_priv
->rps
.min_freq
);
5038 /* Preserve min/max settings in case of re-init */
5039 if (dev_priv
->rps
.max_freq_softlimit
== 0)
5040 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
5042 if (dev_priv
->rps
.min_freq_softlimit
== 0)
5043 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
5045 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5048 static void cherryview_init_gt_powersave(struct drm_device
*dev
)
5050 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5053 cherryview_setup_pctx(dev
);
5055 mutex_lock(&dev_priv
->rps
.hw_lock
);
5057 val
= vlv_punit_read(dev_priv
, CCK_FUSE_REG
);
5058 switch ((val
>> 2) & 0x7) {
5061 dev_priv
->rps
.cz_freq
= 200;
5062 dev_priv
->mem_freq
= 1600;
5065 dev_priv
->rps
.cz_freq
= 267;
5066 dev_priv
->mem_freq
= 1600;
5069 dev_priv
->rps
.cz_freq
= 333;
5070 dev_priv
->mem_freq
= 2000;
5073 dev_priv
->rps
.cz_freq
= 320;
5074 dev_priv
->mem_freq
= 1600;
5077 dev_priv
->rps
.cz_freq
= 400;
5078 dev_priv
->mem_freq
= 1600;
5081 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv
->mem_freq
);
5083 dev_priv
->rps
.max_freq
= cherryview_rps_max_freq(dev_priv
);
5084 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
5085 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5086 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
5087 dev_priv
->rps
.max_freq
);
5089 dev_priv
->rps
.efficient_freq
= cherryview_rps_rpe_freq(dev_priv
);
5090 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5091 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
5092 dev_priv
->rps
.efficient_freq
);
5094 dev_priv
->rps
.rp1_freq
= cherryview_rps_guar_freq(dev_priv
);
5095 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
5096 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
5097 dev_priv
->rps
.rp1_freq
);
5099 dev_priv
->rps
.min_freq
= cherryview_rps_min_freq(dev_priv
);
5100 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5101 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
5102 dev_priv
->rps
.min_freq
);
5104 WARN_ONCE((dev_priv
->rps
.max_freq
|
5105 dev_priv
->rps
.efficient_freq
|
5106 dev_priv
->rps
.rp1_freq
|
5107 dev_priv
->rps
.min_freq
) & 1,
5108 "Odd GPU freq values\n");
5110 /* Preserve min/max settings in case of re-init */
5111 if (dev_priv
->rps
.max_freq_softlimit
== 0)
5112 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
5114 if (dev_priv
->rps
.min_freq_softlimit
== 0)
5115 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
5117 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5120 static void valleyview_cleanup_gt_powersave(struct drm_device
*dev
)
5122 valleyview_cleanup_pctx(dev
);
5125 static void cherryview_enable_rps(struct drm_device
*dev
)
5127 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5128 struct intel_engine_cs
*ring
;
5129 u32 gtfifodbg
, val
, rc6_mode
= 0, pcbr
;
5132 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
5134 gtfifodbg
= I915_READ(GTFIFODBG
);
5136 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5138 I915_WRITE(GTFIFODBG
, gtfifodbg
);
5141 cherryview_check_pctx(dev_priv
);
5143 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5144 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5145 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
5147 /* 2a: Program RC6 thresholds.*/
5148 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
5149 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
5150 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
5152 for_each_ring(ring
, dev_priv
, i
)
5153 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
5154 I915_WRITE(GEN6_RC_SLEEP
, 0);
5156 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000); /* 50/125ms per EI */
5158 /* allows RC6 residency counter to work */
5159 I915_WRITE(VLV_COUNTER_CONTROL
,
5160 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
|
5161 VLV_MEDIA_RC6_COUNT_EN
|
5162 VLV_RENDER_RC6_COUNT_EN
));
5164 /* For now we assume BIOS is allocating and populating the PCBR */
5165 pcbr
= I915_READ(VLV_PCBR
);
5167 DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr
);
5170 if ((intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
) &&
5171 (pcbr
>> VLV_PCBR_ADDR_SHIFT
))
5172 rc6_mode
= GEN6_RC_CTL_EI_MODE(1);
5174 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
5176 /* 4 Program defaults and thresholds for RPS*/
5177 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
5178 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
5179 I915_WRITE(GEN6_RP_UP_EI
, 66000);
5180 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
5182 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
5184 /* WaDisablePwrmtrEvent:chv (pre-production hw) */
5185 I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
5186 I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
5189 I915_WRITE(GEN6_RP_CONTROL
,
5190 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
5191 GEN6_RP_MEDIA_IS_GFX
| /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
5193 GEN6_RP_UP_BUSY_AVG
|
5194 GEN6_RP_DOWN_IDLE_AVG
);
5196 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
5198 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val
& 0x10 ? "yes" : "no");
5199 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
5201 dev_priv
->rps
.cur_freq
= (val
>> 8) & 0xff;
5202 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5203 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
5204 dev_priv
->rps
.cur_freq
);
5206 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5207 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
5208 dev_priv
->rps
.efficient_freq
);
5210 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.efficient_freq
);
5212 gen8_enable_rps_interrupts(dev
);
5214 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
5217 static void valleyview_enable_rps(struct drm_device
*dev
)
5219 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5220 struct intel_engine_cs
*ring
;
5221 u32 gtfifodbg
, val
, rc6_mode
= 0;
5224 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
5226 valleyview_check_pctx(dev_priv
);
5228 if ((gtfifodbg
= I915_READ(GTFIFODBG
))) {
5229 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5231 I915_WRITE(GTFIFODBG
, gtfifodbg
);
5234 /* If VLV, Forcewake all wells, else re-direct to regular path */
5235 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
5237 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
5238 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
5239 I915_WRITE(GEN6_RP_UP_EI
, 66000);
5240 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
5242 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
5243 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 0xf4240);
5245 I915_WRITE(GEN6_RP_CONTROL
,
5246 GEN6_RP_MEDIA_TURBO
|
5247 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
5248 GEN6_RP_MEDIA_IS_GFX
|
5250 GEN6_RP_UP_BUSY_AVG
|
5251 GEN6_RP_DOWN_IDLE_CONT
);
5253 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 0x00280000);
5254 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
5255 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
5257 for_each_ring(ring
, dev_priv
, i
)
5258 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
5260 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x557);
5262 /* allows RC6 residency counter to work */
5263 I915_WRITE(VLV_COUNTER_CONTROL
,
5264 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN
|
5265 VLV_RENDER_RC0_COUNT_EN
|
5266 VLV_MEDIA_RC6_COUNT_EN
|
5267 VLV_RENDER_RC6_COUNT_EN
));
5269 if (intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
)
5270 rc6_mode
= GEN7_RC_CTL_TO_MODE
| VLV_RC_CTL_CTX_RST_PARALLEL
;
5272 intel_print_rc6_info(dev
, rc6_mode
);
5274 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
5276 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
5278 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val
& 0x10 ? "yes" : "no");
5279 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
5281 dev_priv
->rps
.cur_freq
= (val
>> 8) & 0xff;
5282 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5283 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
5284 dev_priv
->rps
.cur_freq
);
5286 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5287 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
5288 dev_priv
->rps
.efficient_freq
);
5290 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.efficient_freq
);
5292 gen6_enable_rps_interrupts(dev
);
5294 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
5297 void ironlake_teardown_rc6(struct drm_device
*dev
)
5299 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5301 if (dev_priv
->ips
.renderctx
) {
5302 i915_gem_object_ggtt_unpin(dev_priv
->ips
.renderctx
);
5303 drm_gem_object_unreference(&dev_priv
->ips
.renderctx
->base
);
5304 dev_priv
->ips
.renderctx
= NULL
;
5307 if (dev_priv
->ips
.pwrctx
) {
5308 i915_gem_object_ggtt_unpin(dev_priv
->ips
.pwrctx
);
5309 drm_gem_object_unreference(&dev_priv
->ips
.pwrctx
->base
);
5310 dev_priv
->ips
.pwrctx
= NULL
;
5314 static void ironlake_disable_rc6(struct drm_device
*dev
)
5316 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5318 if (I915_READ(PWRCTXA
)) {
5319 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
5320 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) | RCX_SW_EXIT
);
5321 wait_for(((I915_READ(RSTDBYCTL
) & RSX_STATUS_MASK
) == RSX_STATUS_ON
),
5324 I915_WRITE(PWRCTXA
, 0);
5325 POSTING_READ(PWRCTXA
);
5327 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) & ~RCX_SW_EXIT
);
5328 POSTING_READ(RSTDBYCTL
);
5332 static int ironlake_setup_rc6(struct drm_device
*dev
)
5334 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5336 if (dev_priv
->ips
.renderctx
== NULL
)
5337 dev_priv
->ips
.renderctx
= intel_alloc_context_page(dev
);
5338 if (!dev_priv
->ips
.renderctx
)
5341 if (dev_priv
->ips
.pwrctx
== NULL
)
5342 dev_priv
->ips
.pwrctx
= intel_alloc_context_page(dev
);
5343 if (!dev_priv
->ips
.pwrctx
) {
5344 ironlake_teardown_rc6(dev
);
5351 static void ironlake_enable_rc6(struct drm_device
*dev
)
5353 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5354 struct intel_engine_cs
*ring
= &dev_priv
->ring
[RCS
];
5355 bool was_interruptible
;
5358 /* rc6 disabled by default due to repeated reports of hanging during
5361 if (!intel_enable_rc6(dev
))
5364 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
5366 ret
= ironlake_setup_rc6(dev
);
5370 was_interruptible
= dev_priv
->mm
.interruptible
;
5371 dev_priv
->mm
.interruptible
= false;
5374 * GPU can automatically power down the render unit if given a page
5377 ret
= intel_ring_begin(ring
, 6);
5379 ironlake_teardown_rc6(dev
);
5380 dev_priv
->mm
.interruptible
= was_interruptible
;
5384 intel_ring_emit(ring
, MI_SUSPEND_FLUSH
| MI_SUSPEND_FLUSH_EN
);
5385 intel_ring_emit(ring
, MI_SET_CONTEXT
);
5386 intel_ring_emit(ring
, i915_gem_obj_ggtt_offset(dev_priv
->ips
.renderctx
) |
5388 MI_SAVE_EXT_STATE_EN
|
5389 MI_RESTORE_EXT_STATE_EN
|
5390 MI_RESTORE_INHIBIT
);
5391 intel_ring_emit(ring
, MI_SUSPEND_FLUSH
);
5392 intel_ring_emit(ring
, MI_NOOP
);
5393 intel_ring_emit(ring
, MI_FLUSH
);
5394 intel_ring_advance(ring
);
5397 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
5398 * does an implicit flush, combined with MI_FLUSH above, it should be
5399 * safe to assume that renderctx is valid
5401 ret
= intel_ring_idle(ring
);
5402 dev_priv
->mm
.interruptible
= was_interruptible
;
5404 DRM_ERROR("failed to enable ironlake power savings\n");
5405 ironlake_teardown_rc6(dev
);
5409 I915_WRITE(PWRCTXA
, i915_gem_obj_ggtt_offset(dev_priv
->ips
.pwrctx
) | PWRCTX_EN
);
5410 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) & ~RCX_SW_EXIT
);
5412 intel_print_rc6_info(dev
, GEN6_RC_CTL_RC6_ENABLE
);
5415 static unsigned long intel_pxfreq(u32 vidfreq
)
5418 int div
= (vidfreq
& 0x3f0000) >> 16;
5419 int post
= (vidfreq
& 0x3000) >> 12;
5420 int pre
= (vidfreq
& 0x7);
5425 freq
= ((div
* 133333) / ((1<<post
) * pre
));
5430 static const struct cparams
{
5436 { 1, 1333, 301, 28664 },
5437 { 1, 1066, 294, 24460 },
5438 { 1, 800, 294, 25192 },
5439 { 0, 1333, 276, 27605 },
5440 { 0, 1066, 276, 27605 },
5441 { 0, 800, 231, 23784 },
5444 static unsigned long __i915_chipset_val(struct drm_i915_private
*dev_priv
)
5446 u64 total_count
, diff
, ret
;
5447 u32 count1
, count2
, count3
, m
= 0, c
= 0;
5448 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
5451 assert_spin_locked(&mchdev_lock
);
5453 diff1
= now
- dev_priv
->ips
.last_time1
;
5455 /* Prevent division-by-zero if we are asking too fast.
5456 * Also, we don't get interesting results if we are polling
5457 * faster than once in 10ms, so just return the saved value
5461 return dev_priv
->ips
.chipset_power
;
5463 count1
= I915_READ(DMIEC
);
5464 count2
= I915_READ(DDREC
);
5465 count3
= I915_READ(CSIEC
);
5467 total_count
= count1
+ count2
+ count3
;
5469 /* FIXME: handle per-counter overflow */
5470 if (total_count
< dev_priv
->ips
.last_count1
) {
5471 diff
= ~0UL - dev_priv
->ips
.last_count1
;
5472 diff
+= total_count
;
5474 diff
= total_count
- dev_priv
->ips
.last_count1
;
5477 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
5478 if (cparams
[i
].i
== dev_priv
->ips
.c_m
&&
5479 cparams
[i
].t
== dev_priv
->ips
.r_t
) {
5486 diff
= div_u64(diff
, diff1
);
5487 ret
= ((m
* diff
) + c
);
5488 ret
= div_u64(ret
, 10);
5490 dev_priv
->ips
.last_count1
= total_count
;
5491 dev_priv
->ips
.last_time1
= now
;
5493 dev_priv
->ips
.chipset_power
= ret
;
5498 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
5500 struct drm_device
*dev
= dev_priv
->dev
;
5503 if (INTEL_INFO(dev
)->gen
!= 5)
5506 spin_lock_irq(&mchdev_lock
);
5508 val
= __i915_chipset_val(dev_priv
);
5510 spin_unlock_irq(&mchdev_lock
);
5515 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
5517 unsigned long m
, x
, b
;
5520 tsfs
= I915_READ(TSFS
);
5522 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
5523 x
= I915_READ8(TR1
);
5525 b
= tsfs
& TSFS_INTR_MASK
;
5527 return ((m
* x
) / 127) - b
;
5530 static u16
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
5532 struct drm_device
*dev
= dev_priv
->dev
;
5533 static const struct v_table
{
5534 u16 vd
; /* in .1 mil */
5535 u16 vm
; /* in .1 mil */
5666 if (INTEL_INFO(dev
)->is_mobile
)
5667 return v_table
[pxvid
].vm
;
5669 return v_table
[pxvid
].vd
;
5672 static void __i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
5674 u64 now
, diff
, diffms
;
5677 assert_spin_locked(&mchdev_lock
);
5679 now
= ktime_get_raw_ns();
5680 diffms
= now
- dev_priv
->ips
.last_time2
;
5681 do_div(diffms
, NSEC_PER_MSEC
);
5683 /* Don't divide by 0 */
5687 count
= I915_READ(GFXEC
);
5689 if (count
< dev_priv
->ips
.last_count2
) {
5690 diff
= ~0UL - dev_priv
->ips
.last_count2
;
5693 diff
= count
- dev_priv
->ips
.last_count2
;
5696 dev_priv
->ips
.last_count2
= count
;
5697 dev_priv
->ips
.last_time2
= now
;
5699 /* More magic constants... */
5701 diff
= div_u64(diff
, diffms
* 10);
5702 dev_priv
->ips
.gfx_power
= diff
;
5705 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
5707 struct drm_device
*dev
= dev_priv
->dev
;
5709 if (INTEL_INFO(dev
)->gen
!= 5)
5712 spin_lock_irq(&mchdev_lock
);
5714 __i915_update_gfx_val(dev_priv
);
5716 spin_unlock_irq(&mchdev_lock
);
5719 static unsigned long __i915_gfx_val(struct drm_i915_private
*dev_priv
)
5721 unsigned long t
, corr
, state1
, corr2
, state2
;
5724 assert_spin_locked(&mchdev_lock
);
5726 pxvid
= I915_READ(PXVFREQ_BASE
+ (dev_priv
->rps
.cur_freq
* 4));
5727 pxvid
= (pxvid
>> 24) & 0x7f;
5728 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
5732 t
= i915_mch_val(dev_priv
);
5734 /* Revel in the empirically derived constants */
5736 /* Correction factor in 1/100000 units */
5738 corr
= ((t
* 2349) + 135940);
5740 corr
= ((t
* 964) + 29317);
5742 corr
= ((t
* 301) + 1004);
5744 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
5746 corr2
= (corr
* dev_priv
->ips
.corr
);
5748 state2
= (corr2
* state1
) / 10000;
5749 state2
/= 100; /* convert to mW */
5751 __i915_update_gfx_val(dev_priv
);
5753 return dev_priv
->ips
.gfx_power
+ state2
;
5756 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
5758 struct drm_device
*dev
= dev_priv
->dev
;
5761 if (INTEL_INFO(dev
)->gen
!= 5)
5764 spin_lock_irq(&mchdev_lock
);
5766 val
= __i915_gfx_val(dev_priv
);
5768 spin_unlock_irq(&mchdev_lock
);
5774 * i915_read_mch_val - return value for IPS use
5776 * Calculate and return a value for the IPS driver to use when deciding whether
5777 * we have thermal and power headroom to increase CPU or GPU power budget.
5779 unsigned long i915_read_mch_val(void)
5781 struct drm_i915_private
*dev_priv
;
5782 unsigned long chipset_val
, graphics_val
, ret
= 0;
5784 spin_lock_irq(&mchdev_lock
);
5787 dev_priv
= i915_mch_dev
;
5789 chipset_val
= __i915_chipset_val(dev_priv
);
5790 graphics_val
= __i915_gfx_val(dev_priv
);
5792 ret
= chipset_val
+ graphics_val
;
5795 spin_unlock_irq(&mchdev_lock
);
5799 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
5802 * i915_gpu_raise - raise GPU frequency limit
5804 * Raise the limit; IPS indicates we have thermal headroom.
5806 bool i915_gpu_raise(void)
5808 struct drm_i915_private
*dev_priv
;
5811 spin_lock_irq(&mchdev_lock
);
5812 if (!i915_mch_dev
) {
5816 dev_priv
= i915_mch_dev
;
5818 if (dev_priv
->ips
.max_delay
> dev_priv
->ips
.fmax
)
5819 dev_priv
->ips
.max_delay
--;
5822 spin_unlock_irq(&mchdev_lock
);
5826 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
5829 * i915_gpu_lower - lower GPU frequency limit
5831 * IPS indicates we're close to a thermal limit, so throttle back the GPU
5832 * frequency maximum.
5834 bool i915_gpu_lower(void)
5836 struct drm_i915_private
*dev_priv
;
5839 spin_lock_irq(&mchdev_lock
);
5840 if (!i915_mch_dev
) {
5844 dev_priv
= i915_mch_dev
;
5846 if (dev_priv
->ips
.max_delay
< dev_priv
->ips
.min_delay
)
5847 dev_priv
->ips
.max_delay
++;
5850 spin_unlock_irq(&mchdev_lock
);
5854 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
5857 * i915_gpu_busy - indicate GPU business to IPS
5859 * Tell the IPS driver whether or not the GPU is busy.
5861 bool i915_gpu_busy(void)
5863 struct drm_i915_private
*dev_priv
;
5864 struct intel_engine_cs
*ring
;
5868 spin_lock_irq(&mchdev_lock
);
5871 dev_priv
= i915_mch_dev
;
5873 for_each_ring(ring
, dev_priv
, i
)
5874 ret
|= !list_empty(&ring
->request_list
);
5877 spin_unlock_irq(&mchdev_lock
);
5881 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
5884 * i915_gpu_turbo_disable - disable graphics turbo
5886 * Disable graphics turbo by resetting the max frequency and setting the
5887 * current frequency to the default.
5889 bool i915_gpu_turbo_disable(void)
5891 struct drm_i915_private
*dev_priv
;
5894 spin_lock_irq(&mchdev_lock
);
5895 if (!i915_mch_dev
) {
5899 dev_priv
= i915_mch_dev
;
5901 dev_priv
->ips
.max_delay
= dev_priv
->ips
.fstart
;
5903 if (!ironlake_set_drps(dev_priv
->dev
, dev_priv
->ips
.fstart
))
5907 spin_unlock_irq(&mchdev_lock
);
5911 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
5914 * Tells the intel_ips driver that the i915 driver is now loaded, if
5915 * IPS got loaded first.
5917 * This awkward dance is so that neither module has to depend on the
5918 * other in order for IPS to do the appropriate communication of
5919 * GPU turbo limits to i915.
5922 ips_ping_for_i915_load(void)
5926 link
= symbol_get(ips_link_to_i915_driver
);
5929 symbol_put(ips_link_to_i915_driver
);
5933 void intel_gpu_ips_init(struct drm_i915_private
*dev_priv
)
5935 /* We only register the i915 ips part with intel-ips once everything is
5936 * set up, to avoid intel-ips sneaking in and reading bogus values. */
5937 spin_lock_irq(&mchdev_lock
);
5938 i915_mch_dev
= dev_priv
;
5939 spin_unlock_irq(&mchdev_lock
);
5941 ips_ping_for_i915_load();
5944 void intel_gpu_ips_teardown(void)
5946 spin_lock_irq(&mchdev_lock
);
5947 i915_mch_dev
= NULL
;
5948 spin_unlock_irq(&mchdev_lock
);
5951 static void intel_init_emon(struct drm_device
*dev
)
5953 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5958 /* Disable to program */
5962 /* Program energy weights for various events */
5963 I915_WRITE(SDEW
, 0x15040d00);
5964 I915_WRITE(CSIEW0
, 0x007f0000);
5965 I915_WRITE(CSIEW1
, 0x1e220004);
5966 I915_WRITE(CSIEW2
, 0x04000004);
5968 for (i
= 0; i
< 5; i
++)
5969 I915_WRITE(PEW
+ (i
* 4), 0);
5970 for (i
= 0; i
< 3; i
++)
5971 I915_WRITE(DEW
+ (i
* 4), 0);
5973 /* Program P-state weights to account for frequency power adjustment */
5974 for (i
= 0; i
< 16; i
++) {
5975 u32 pxvidfreq
= I915_READ(PXVFREQ_BASE
+ (i
* 4));
5976 unsigned long freq
= intel_pxfreq(pxvidfreq
);
5977 unsigned long vid
= (pxvidfreq
& PXVFREQ_PX_MASK
) >>
5982 val
*= (freq
/ 1000);
5984 val
/= (127*127*900);
5986 DRM_ERROR("bad pxval: %ld\n", val
);
5989 /* Render standby states get 0 weight */
5993 for (i
= 0; i
< 4; i
++) {
5994 u32 val
= (pxw
[i
*4] << 24) | (pxw
[(i
*4)+1] << 16) |
5995 (pxw
[(i
*4)+2] << 8) | (pxw
[(i
*4)+3]);
5996 I915_WRITE(PXW
+ (i
* 4), val
);
5999 /* Adjust magic regs to magic values (more experimental results) */
6000 I915_WRITE(OGW0
, 0);
6001 I915_WRITE(OGW1
, 0);
6002 I915_WRITE(EG0
, 0x00007f00);
6003 I915_WRITE(EG1
, 0x0000000e);
6004 I915_WRITE(EG2
, 0x000e0000);
6005 I915_WRITE(EG3
, 0x68000300);
6006 I915_WRITE(EG4
, 0x42000000);
6007 I915_WRITE(EG5
, 0x00140031);
6011 for (i
= 0; i
< 8; i
++)
6012 I915_WRITE(PXWL
+ (i
* 4), 0);
6014 /* Enable PMON + select events */
6015 I915_WRITE(ECR
, 0x80000019);
6017 lcfuse
= I915_READ(LCFUSE02
);
6019 dev_priv
->ips
.corr
= (lcfuse
& LCFUSE_HIV_MASK
);
6022 void intel_init_gt_powersave(struct drm_device
*dev
)
6024 i915
.enable_rc6
= sanitize_rc6_option(dev
, i915
.enable_rc6
);
6026 if (IS_CHERRYVIEW(dev
))
6027 cherryview_init_gt_powersave(dev
);
6028 else if (IS_VALLEYVIEW(dev
))
6029 valleyview_init_gt_powersave(dev
);
6032 void intel_cleanup_gt_powersave(struct drm_device
*dev
)
6034 if (IS_CHERRYVIEW(dev
))
6036 else if (IS_VALLEYVIEW(dev
))
6037 valleyview_cleanup_gt_powersave(dev
);
6041 * intel_suspend_gt_powersave - suspend PM work and helper threads
6044 * We don't want to disable RC6 or other features here, we just want
6045 * to make sure any work we've queued has finished and won't bother
6046 * us while we're suspended.
6048 void intel_suspend_gt_powersave(struct drm_device
*dev
)
6050 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6052 /* Interrupts should be disabled already to avoid re-arming. */
6053 WARN_ON(intel_irqs_enabled(dev_priv
));
6055 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
6057 cancel_work_sync(&dev_priv
->rps
.work
);
6059 /* Force GPU to min freq during suspend */
6060 gen6_rps_idle(dev_priv
);
6063 void intel_disable_gt_powersave(struct drm_device
*dev
)
6065 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6067 /* Interrupts should be disabled already to avoid re-arming. */
6068 WARN_ON(intel_irqs_enabled(dev_priv
));
6070 if (IS_IRONLAKE_M(dev
)) {
6071 ironlake_disable_drps(dev
);
6072 ironlake_disable_rc6(dev
);
6073 } else if (INTEL_INFO(dev
)->gen
>= 6) {
6074 intel_suspend_gt_powersave(dev
);
6076 mutex_lock(&dev_priv
->rps
.hw_lock
);
6077 if (IS_CHERRYVIEW(dev
))
6078 cherryview_disable_rps(dev
);
6079 else if (IS_VALLEYVIEW(dev
))
6080 valleyview_disable_rps(dev
);
6082 gen6_disable_rps(dev
);
6083 dev_priv
->rps
.enabled
= false;
6084 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6088 static void intel_gen6_powersave_work(struct work_struct
*work
)
6090 struct drm_i915_private
*dev_priv
=
6091 container_of(work
, struct drm_i915_private
,
6092 rps
.delayed_resume_work
.work
);
6093 struct drm_device
*dev
= dev_priv
->dev
;
6095 mutex_lock(&dev_priv
->rps
.hw_lock
);
6097 if (IS_CHERRYVIEW(dev
)) {
6098 cherryview_enable_rps(dev
);
6099 } else if (IS_VALLEYVIEW(dev
)) {
6100 valleyview_enable_rps(dev
);
6101 } else if (IS_BROADWELL(dev
)) {
6102 gen8_enable_rps(dev
);
6103 __gen6_update_ring_freq(dev
);
6105 gen6_enable_rps(dev
);
6106 __gen6_update_ring_freq(dev
);
6108 dev_priv
->rps
.enabled
= true;
6109 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6111 intel_runtime_pm_put(dev_priv
);
6114 void intel_enable_gt_powersave(struct drm_device
*dev
)
6116 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6118 if (IS_IRONLAKE_M(dev
)) {
6119 mutex_lock(&dev
->struct_mutex
);
6120 ironlake_enable_drps(dev
);
6121 ironlake_enable_rc6(dev
);
6122 intel_init_emon(dev
);
6123 mutex_unlock(&dev
->struct_mutex
);
6124 } else if (INTEL_INFO(dev
)->gen
>= 6) {
6126 * PCU communication is slow and this doesn't need to be
6127 * done at any specific time, so do this out of our fast path
6128 * to make resume and init faster.
6130 * We depend on the HW RC6 power context save/restore
6131 * mechanism when entering D3 through runtime PM suspend. So
6132 * disable RPM until RPS/RC6 is properly setup. We can only
6133 * get here via the driver load/system resume/runtime resume
6134 * paths, so the _noresume version is enough (and in case of
6135 * runtime resume it's necessary).
6137 if (schedule_delayed_work(&dev_priv
->rps
.delayed_resume_work
,
6138 round_jiffies_up_relative(HZ
)))
6139 intel_runtime_pm_get_noresume(dev_priv
);
6143 void intel_reset_gt_powersave(struct drm_device
*dev
)
6145 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6147 dev_priv
->rps
.enabled
= false;
6148 intel_enable_gt_powersave(dev
);
6151 static void ibx_init_clock_gating(struct drm_device
*dev
)
6153 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6156 * On Ibex Peak and Cougar Point, we need to disable clock
6157 * gating for the panel power sequencer or it will fail to
6158 * start up when no ports are active.
6160 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
6163 static void g4x_disable_trickle_feed(struct drm_device
*dev
)
6165 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6168 for_each_pipe(dev_priv
, pipe
) {
6169 I915_WRITE(DSPCNTR(pipe
),
6170 I915_READ(DSPCNTR(pipe
)) |
6171 DISPPLANE_TRICKLE_FEED_DISABLE
);
6172 intel_flush_primary_plane(dev_priv
, pipe
);
6176 static void ilk_init_lp_watermarks(struct drm_device
*dev
)
6178 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6180 I915_WRITE(WM3_LP_ILK
, I915_READ(WM3_LP_ILK
) & ~WM1_LP_SR_EN
);
6181 I915_WRITE(WM2_LP_ILK
, I915_READ(WM2_LP_ILK
) & ~WM1_LP_SR_EN
);
6182 I915_WRITE(WM1_LP_ILK
, I915_READ(WM1_LP_ILK
) & ~WM1_LP_SR_EN
);
6185 * Don't touch WM1S_LP_EN here.
6186 * Doing so could cause underruns.
6190 static void ironlake_init_clock_gating(struct drm_device
*dev
)
6192 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6193 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
6197 * WaFbcDisableDpfcClockGating:ilk
6199 dspclk_gate
|= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE
|
6200 ILK_DPFCUNIT_CLOCK_GATE_DISABLE
|
6201 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
;
6203 I915_WRITE(PCH_3DCGDIS0
,
6204 MARIUNIT_CLOCK_GATE_DISABLE
|
6205 SVSMUNIT_CLOCK_GATE_DISABLE
);
6206 I915_WRITE(PCH_3DCGDIS1
,
6207 VFMUNIT_CLOCK_GATE_DISABLE
);
6210 * According to the spec the following bits should be set in
6211 * order to enable memory self-refresh
6212 * The bit 22/21 of 0x42004
6213 * The bit 5 of 0x42020
6214 * The bit 15 of 0x45000
6216 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6217 (I915_READ(ILK_DISPLAY_CHICKEN2
) |
6218 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
));
6219 dspclk_gate
|= ILK_DPARBUNIT_CLOCK_GATE_ENABLE
;
6220 I915_WRITE(DISP_ARB_CTL
,
6221 (I915_READ(DISP_ARB_CTL
) |
6224 ilk_init_lp_watermarks(dev
);
6227 * Based on the document from hardware guys the following bits
6228 * should be set unconditionally in order to enable FBC.
6229 * The bit 22 of 0x42000
6230 * The bit 22 of 0x42004
6231 * The bit 7,8,9 of 0x42020.
6233 if (IS_IRONLAKE_M(dev
)) {
6234 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6235 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
6236 I915_READ(ILK_DISPLAY_CHICKEN1
) |
6238 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6239 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6243 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
6245 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6246 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6247 ILK_ELPIN_409_SELECT
);
6248 I915_WRITE(_3D_CHICKEN2
,
6249 _3D_CHICKEN2_WM_READ_PIPELINED
<< 16 |
6250 _3D_CHICKEN2_WM_READ_PIPELINED
);
6252 /* WaDisableRenderCachePipelinedFlush:ilk */
6253 I915_WRITE(CACHE_MODE_0
,
6254 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
6256 /* WaDisable_RenderCache_OperationalFlush:ilk */
6257 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6259 g4x_disable_trickle_feed(dev
);
6261 ibx_init_clock_gating(dev
);
6264 static void cpt_init_clock_gating(struct drm_device
*dev
)
6266 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6271 * On Ibex Peak and Cougar Point, we need to disable clock
6272 * gating for the panel power sequencer or it will fail to
6273 * start up when no ports are active.
6275 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
|
6276 PCH_DPLUNIT_CLOCK_GATE_DISABLE
|
6277 PCH_CPUNIT_CLOCK_GATE_DISABLE
);
6278 I915_WRITE(SOUTH_CHICKEN2
, I915_READ(SOUTH_CHICKEN2
) |
6279 DPLS_EDP_PPS_FIX_DIS
);
6280 /* The below fixes the weird display corruption, a few pixels shifted
6281 * downward, on (only) LVDS of some HP laptops with IVY.
6283 for_each_pipe(dev_priv
, pipe
) {
6284 val
= I915_READ(TRANS_CHICKEN2(pipe
));
6285 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
6286 val
&= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
6287 if (dev_priv
->vbt
.fdi_rx_polarity_inverted
)
6288 val
|= TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
6289 val
&= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK
;
6290 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER
;
6291 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH
;
6292 I915_WRITE(TRANS_CHICKEN2(pipe
), val
);
6294 /* WADP0ClockGatingDisable */
6295 for_each_pipe(dev_priv
, pipe
) {
6296 I915_WRITE(TRANS_CHICKEN1(pipe
),
6297 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
6301 static void gen6_check_mch_setup(struct drm_device
*dev
)
6303 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6306 tmp
= I915_READ(MCH_SSKPD
);
6307 if ((tmp
& MCH_SSKPD_WM0_MASK
) != MCH_SSKPD_WM0_VAL
)
6308 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6312 static void gen6_init_clock_gating(struct drm_device
*dev
)
6314 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6315 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
6317 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
6319 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6320 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6321 ILK_ELPIN_409_SELECT
);
6323 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6324 I915_WRITE(_3D_CHICKEN
,
6325 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB
));
6327 /* WaSetupGtModeTdRowDispatch:snb */
6328 if (IS_SNB_GT1(dev
))
6329 I915_WRITE(GEN6_GT_MODE
,
6330 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE
));
6332 /* WaDisable_RenderCache_OperationalFlush:snb */
6333 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6336 * BSpec recoomends 8x4 when MSAA is used,
6337 * however in practice 16x4 seems fastest.
6339 * Note that PS/WM thread counts depend on the WIZ hashing
6340 * disable bit, which we don't touch here, but it's good
6341 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6343 I915_WRITE(GEN6_GT_MODE
,
6344 GEN6_WIZ_HASHING_MASK
| GEN6_WIZ_HASHING_16x4
);
6346 ilk_init_lp_watermarks(dev
);
6348 I915_WRITE(CACHE_MODE_0
,
6349 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
6351 I915_WRITE(GEN6_UCGCTL1
,
6352 I915_READ(GEN6_UCGCTL1
) |
6353 GEN6_BLBUNIT_CLOCK_GATE_DISABLE
|
6354 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
6356 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6357 * gating disable must be set. Failure to set it results in
6358 * flickering pixels due to Z write ordering failures after
6359 * some amount of runtime in the Mesa "fire" demo, and Unigine
6360 * Sanctuary and Tropics, and apparently anything else with
6361 * alpha test or pixel discard.
6363 * According to the spec, bit 11 (RCCUNIT) must also be set,
6364 * but we didn't debug actual testcases to find it out.
6366 * WaDisableRCCUnitClockGating:snb
6367 * WaDisableRCPBUnitClockGating:snb
6369 I915_WRITE(GEN6_UCGCTL2
,
6370 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
6371 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
6373 /* WaStripsFansDisableFastClipPerformanceFix:snb */
6374 I915_WRITE(_3D_CHICKEN3
,
6375 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL
));
6379 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6380 * 3DSTATE_SF number of SF output attributes is more than 16."
6382 I915_WRITE(_3D_CHICKEN3
,
6383 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH
));
6386 * According to the spec the following bits should be
6387 * set in order to enable memory self-refresh and fbc:
6388 * The bit21 and bit22 of 0x42000
6389 * The bit21 and bit22 of 0x42004
6390 * The bit5 and bit7 of 0x42020
6391 * The bit14 of 0x70180
6392 * The bit14 of 0x71180
6394 * WaFbcAsynchFlipDisableFbcQueue:snb
6396 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
6397 I915_READ(ILK_DISPLAY_CHICKEN1
) |
6398 ILK_FBCQ_DIS
| ILK_PABSTRETCH_DIS
);
6399 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6400 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6401 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
);
6402 I915_WRITE(ILK_DSPCLK_GATE_D
,
6403 I915_READ(ILK_DSPCLK_GATE_D
) |
6404 ILK_DPARBUNIT_CLOCK_GATE_ENABLE
|
6405 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
);
6407 g4x_disable_trickle_feed(dev
);
6409 cpt_init_clock_gating(dev
);
6411 gen6_check_mch_setup(dev
);
6414 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private
*dev_priv
)
6416 uint32_t reg
= I915_READ(GEN7_FF_THREAD_MODE
);
6419 * WaVSThreadDispatchOverride:ivb,vlv
6421 * This actually overrides the dispatch
6422 * mode for all thread types.
6424 reg
&= ~GEN7_FF_SCHED_MASK
;
6425 reg
|= GEN7_FF_TS_SCHED_HW
;
6426 reg
|= GEN7_FF_VS_SCHED_HW
;
6427 reg
|= GEN7_FF_DS_SCHED_HW
;
6429 I915_WRITE(GEN7_FF_THREAD_MODE
, reg
);
6432 static void lpt_init_clock_gating(struct drm_device
*dev
)
6434 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6437 * TODO: this bit should only be enabled when really needed, then
6438 * disabled when not needed anymore in order to save power.
6440 if (dev_priv
->pch_id
== INTEL_PCH_LPT_LP_DEVICE_ID_TYPE
)
6441 I915_WRITE(SOUTH_DSPCLK_GATE_D
,
6442 I915_READ(SOUTH_DSPCLK_GATE_D
) |
6443 PCH_LP_PARTITION_LEVEL_DISABLE
);
6445 /* WADPOClockGatingDisable:hsw */
6446 I915_WRITE(_TRANSA_CHICKEN1
,
6447 I915_READ(_TRANSA_CHICKEN1
) |
6448 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
6451 static void lpt_suspend_hw(struct drm_device
*dev
)
6453 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6455 if (dev_priv
->pch_id
== INTEL_PCH_LPT_LP_DEVICE_ID_TYPE
) {
6456 uint32_t val
= I915_READ(SOUTH_DSPCLK_GATE_D
);
6458 val
&= ~PCH_LP_PARTITION_LEVEL_DISABLE
;
6459 I915_WRITE(SOUTH_DSPCLK_GATE_D
, val
);
6463 static void broadwell_init_clock_gating(struct drm_device
*dev
)
6465 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6468 I915_WRITE(WM3_LP_ILK
, 0);
6469 I915_WRITE(WM2_LP_ILK
, 0);
6470 I915_WRITE(WM1_LP_ILK
, 0);
6472 /* WaSwitchSolVfFArbitrationPriority:bdw */
6473 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
6475 /* WaPsrDPAMaskVBlankInSRD:bdw */
6476 I915_WRITE(CHICKEN_PAR1_1
,
6477 I915_READ(CHICKEN_PAR1_1
) | DPA_MASK_VBLANK_SRD
);
6479 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
6480 for_each_pipe(dev_priv
, pipe
) {
6481 I915_WRITE(CHICKEN_PIPESL_1(pipe
),
6482 I915_READ(CHICKEN_PIPESL_1(pipe
)) |
6483 BDW_DPRS_MASK_VBLANK_SRD
);
6486 /* WaVSRefCountFullforceMissDisable:bdw */
6487 /* WaDSRefCountFullforceMissDisable:bdw */
6488 I915_WRITE(GEN7_FF_THREAD_MODE
,
6489 I915_READ(GEN7_FF_THREAD_MODE
) &
6490 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
6492 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
6493 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
6495 /* WaDisableSDEUnitClockGating:bdw */
6496 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
6497 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
6499 lpt_init_clock_gating(dev
);
6502 static void haswell_init_clock_gating(struct drm_device
*dev
)
6504 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6506 ilk_init_lp_watermarks(dev
);
6508 /* L3 caching of data atomics doesn't work -- disable it. */
6509 I915_WRITE(HSW_SCRATCH1
, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE
);
6510 I915_WRITE(HSW_ROW_CHICKEN3
,
6511 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
));
6513 /* This is required by WaCatErrorRejectionIssue:hsw */
6514 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
6515 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
6516 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
6518 /* WaVSRefCountFullforceMissDisable:hsw */
6519 I915_WRITE(GEN7_FF_THREAD_MODE
,
6520 I915_READ(GEN7_FF_THREAD_MODE
) & ~GEN7_FF_VS_REF_CNT_FFME
);
6522 /* WaDisable_RenderCache_OperationalFlush:hsw */
6523 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6525 /* enable HiZ Raw Stall Optimization */
6526 I915_WRITE(CACHE_MODE_0_GEN7
,
6527 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
6529 /* WaDisable4x2SubspanOptimization:hsw */
6530 I915_WRITE(CACHE_MODE_1
,
6531 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
6534 * BSpec recommends 8x4 when MSAA is used,
6535 * however in practice 16x4 seems fastest.
6537 * Note that PS/WM thread counts depend on the WIZ hashing
6538 * disable bit, which we don't touch here, but it's good
6539 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6541 I915_WRITE(GEN7_GT_MODE
,
6542 GEN6_WIZ_HASHING_MASK
| GEN6_WIZ_HASHING_16x4
);
6544 /* WaSwitchSolVfFArbitrationPriority:hsw */
6545 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
6547 /* WaRsPkgCStateDisplayPMReq:hsw */
6548 I915_WRITE(CHICKEN_PAR1_1
,
6549 I915_READ(CHICKEN_PAR1_1
) | FORCE_ARB_IDLE_PLANES
);
6551 lpt_init_clock_gating(dev
);
6554 static void ivybridge_init_clock_gating(struct drm_device
*dev
)
6556 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6559 ilk_init_lp_watermarks(dev
);
6561 I915_WRITE(ILK_DSPCLK_GATE_D
, ILK_VRHUNIT_CLOCK_GATE_DISABLE
);
6563 /* WaDisableEarlyCull:ivb */
6564 I915_WRITE(_3D_CHICKEN3
,
6565 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
6567 /* WaDisableBackToBackFlipFix:ivb */
6568 I915_WRITE(IVB_CHICKEN3
,
6569 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
6570 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
6572 /* WaDisablePSDDualDispatchEnable:ivb */
6573 if (IS_IVB_GT1(dev
))
6574 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
6575 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
6577 /* WaDisable_RenderCache_OperationalFlush:ivb */
6578 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6580 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6581 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
6582 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
6584 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6585 I915_WRITE(GEN7_L3CNTLREG1
,
6586 GEN7_WA_FOR_GEN7_L3_CONTROL
);
6587 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
6588 GEN7_WA_L3_CHICKEN_MODE
);
6589 if (IS_IVB_GT1(dev
))
6590 I915_WRITE(GEN7_ROW_CHICKEN2
,
6591 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6593 /* must write both registers */
6594 I915_WRITE(GEN7_ROW_CHICKEN2
,
6595 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6596 I915_WRITE(GEN7_ROW_CHICKEN2_GT2
,
6597 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6600 /* WaForceL3Serialization:ivb */
6601 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
6602 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
6605 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6606 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
6608 I915_WRITE(GEN6_UCGCTL2
,
6609 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
6611 /* This is required by WaCatErrorRejectionIssue:ivb */
6612 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
6613 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
6614 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
6616 g4x_disable_trickle_feed(dev
);
6618 gen7_setup_fixed_func_scheduler(dev_priv
);
6620 if (0) { /* causes HiZ corruption on ivb:gt1 */
6621 /* enable HiZ Raw Stall Optimization */
6622 I915_WRITE(CACHE_MODE_0_GEN7
,
6623 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
6626 /* WaDisable4x2SubspanOptimization:ivb */
6627 I915_WRITE(CACHE_MODE_1
,
6628 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
6631 * BSpec recommends 8x4 when MSAA is used,
6632 * however in practice 16x4 seems fastest.
6634 * Note that PS/WM thread counts depend on the WIZ hashing
6635 * disable bit, which we don't touch here, but it's good
6636 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6638 I915_WRITE(GEN7_GT_MODE
,
6639 GEN6_WIZ_HASHING_MASK
| GEN6_WIZ_HASHING_16x4
);
6641 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
6642 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
6643 snpcr
|= GEN6_MBC_SNPCR_MED
;
6644 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
6646 if (!HAS_PCH_NOP(dev
))
6647 cpt_init_clock_gating(dev
);
6649 gen6_check_mch_setup(dev
);
6652 static void valleyview_init_clock_gating(struct drm_device
*dev
)
6654 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6656 I915_WRITE(DSPCLK_GATE_D
, VRHUNIT_CLOCK_GATE_DISABLE
);
6658 /* WaDisableEarlyCull:vlv */
6659 I915_WRITE(_3D_CHICKEN3
,
6660 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
6662 /* WaDisableBackToBackFlipFix:vlv */
6663 I915_WRITE(IVB_CHICKEN3
,
6664 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
6665 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
6667 /* WaPsdDispatchEnable:vlv */
6668 /* WaDisablePSDDualDispatchEnable:vlv */
6669 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
6670 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP
|
6671 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
6673 /* WaDisable_RenderCache_OperationalFlush:vlv */
6674 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6676 /* WaForceL3Serialization:vlv */
6677 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
6678 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
6680 /* WaDisableDopClockGating:vlv */
6681 I915_WRITE(GEN7_ROW_CHICKEN2
,
6682 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6684 /* This is required by WaCatErrorRejectionIssue:vlv */
6685 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
6686 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
6687 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
6689 gen7_setup_fixed_func_scheduler(dev_priv
);
6692 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6693 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
6695 I915_WRITE(GEN6_UCGCTL2
,
6696 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
6698 /* WaDisableL3Bank2xClockGate:vlv
6699 * Disabling L3 clock gating- MMIO 940c[25] = 1
6700 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
6701 I915_WRITE(GEN7_UCGCTL4
,
6702 I915_READ(GEN7_UCGCTL4
) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE
);
6704 I915_WRITE(MI_ARB_VLV
, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
);
6707 * BSpec says this must be set, even though
6708 * WaDisable4x2SubspanOptimization isn't listed for VLV.
6710 I915_WRITE(CACHE_MODE_1
,
6711 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
6714 * WaIncreaseL3CreditsForVLVB0:vlv
6715 * This is the hardware default actually.
6717 I915_WRITE(GEN7_L3SQCREG1
, VLV_B0_WA_L3SQCREG1_VALUE
);
6720 * WaDisableVLVClockGating_VBIIssue:vlv
6721 * Disable clock gating on th GCFG unit to prevent a delay
6722 * in the reporting of vblank events.
6724 I915_WRITE(VLV_GUNIT_CLOCK_GATE
, GCFG_DIS
);
6727 static void cherryview_init_clock_gating(struct drm_device
*dev
)
6729 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6731 I915_WRITE(DSPCLK_GATE_D
, VRHUNIT_CLOCK_GATE_DISABLE
);
6733 I915_WRITE(MI_ARB_VLV
, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
);
6735 /* WaVSRefCountFullforceMissDisable:chv */
6736 /* WaDSRefCountFullforceMissDisable:chv */
6737 I915_WRITE(GEN7_FF_THREAD_MODE
,
6738 I915_READ(GEN7_FF_THREAD_MODE
) &
6739 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
6741 /* WaDisableSemaphoreAndSyncFlipWait:chv */
6742 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
6743 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
6745 /* WaDisableCSUnitClockGating:chv */
6746 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
6747 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
6749 /* WaDisableSDEUnitClockGating:chv */
6750 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
6751 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
6753 /* WaDisableGunitClockGating:chv (pre-production hw) */
6754 I915_WRITE(VLV_GUNIT_CLOCK_GATE
, I915_READ(VLV_GUNIT_CLOCK_GATE
) |
6757 /* WaDisableFfDopClockGating:chv (pre-production hw) */
6758 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
6759 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE
));
6761 /* WaDisableDopClockGating:chv (pre-production hw) */
6762 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
6763 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE
);
6766 static void g4x_init_clock_gating(struct drm_device
*dev
)
6768 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6769 uint32_t dspclk_gate
;
6771 I915_WRITE(RENCLK_GATE_D1
, 0);
6772 I915_WRITE(RENCLK_GATE_D2
, VF_UNIT_CLOCK_GATE_DISABLE
|
6773 GS_UNIT_CLOCK_GATE_DISABLE
|
6774 CL_UNIT_CLOCK_GATE_DISABLE
);
6775 I915_WRITE(RAMCLK_GATE_D
, 0);
6776 dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
|
6777 OVRUNIT_CLOCK_GATE_DISABLE
|
6778 OVCUNIT_CLOCK_GATE_DISABLE
;
6780 dspclk_gate
|= DSSUNIT_CLOCK_GATE_DISABLE
;
6781 I915_WRITE(DSPCLK_GATE_D
, dspclk_gate
);
6783 /* WaDisableRenderCachePipelinedFlush */
6784 I915_WRITE(CACHE_MODE_0
,
6785 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
6787 /* WaDisable_RenderCache_OperationalFlush:g4x */
6788 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6790 g4x_disable_trickle_feed(dev
);
6793 static void crestline_init_clock_gating(struct drm_device
*dev
)
6795 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6797 I915_WRITE(RENCLK_GATE_D1
, I965_RCC_CLOCK_GATE_DISABLE
);
6798 I915_WRITE(RENCLK_GATE_D2
, 0);
6799 I915_WRITE(DSPCLK_GATE_D
, 0);
6800 I915_WRITE(RAMCLK_GATE_D
, 0);
6801 I915_WRITE16(DEUC
, 0);
6802 I915_WRITE(MI_ARB_STATE
,
6803 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
6805 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6806 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6809 static void broadwater_init_clock_gating(struct drm_device
*dev
)
6811 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6813 I915_WRITE(RENCLK_GATE_D1
, I965_RCZ_CLOCK_GATE_DISABLE
|
6814 I965_RCC_CLOCK_GATE_DISABLE
|
6815 I965_RCPB_CLOCK_GATE_DISABLE
|
6816 I965_ISC_CLOCK_GATE_DISABLE
|
6817 I965_FBC_CLOCK_GATE_DISABLE
);
6818 I915_WRITE(RENCLK_GATE_D2
, 0);
6819 I915_WRITE(MI_ARB_STATE
,
6820 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
6822 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6823 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6826 static void gen3_init_clock_gating(struct drm_device
*dev
)
6828 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6829 u32 dstate
= I915_READ(D_STATE
);
6831 dstate
|= DSTATE_PLL_D3_OFF
| DSTATE_GFX_CLOCK_GATING
|
6832 DSTATE_DOT_CLOCK_GATING
;
6833 I915_WRITE(D_STATE
, dstate
);
6835 if (IS_PINEVIEW(dev
))
6836 I915_WRITE(ECOSKPD
, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY
));
6838 /* IIR "flip pending" means done if this bit is set */
6839 I915_WRITE(ECOSKPD
, _MASKED_BIT_DISABLE(ECO_FLIP_DONE
));
6841 /* interrupts should cause a wake up from C3 */
6842 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN
));
6844 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
6845 I915_WRITE(MI_ARB_STATE
, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE
));
6847 I915_WRITE(MI_ARB_STATE
,
6848 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
6851 static void i85x_init_clock_gating(struct drm_device
*dev
)
6853 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6855 I915_WRITE(RENCLK_GATE_D1
, SV_CLOCK_GATE_DISABLE
);
6857 /* interrupts should cause a wake up from C3 */
6858 I915_WRITE(MI_STATE
, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN
) |
6859 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE
));
6861 I915_WRITE(MEM_MODE
,
6862 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE
));
6865 static void i830_init_clock_gating(struct drm_device
*dev
)
6867 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6869 I915_WRITE(DSPCLK_GATE_D
, OVRUNIT_CLOCK_GATE_DISABLE
);
6871 I915_WRITE(MEM_MODE
,
6872 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE
) |
6873 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE
));
6876 void intel_init_clock_gating(struct drm_device
*dev
)
6878 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6880 dev_priv
->display
.init_clock_gating(dev
);
6883 void intel_suspend_hw(struct drm_device
*dev
)
6885 if (HAS_PCH_LPT(dev
))
6886 lpt_suspend_hw(dev
);
6889 static void intel_init_fbc(struct drm_i915_private
*dev_priv
)
6891 if (!HAS_FBC(dev_priv
)) {
6892 dev_priv
->fbc
.enabled
= false;
6896 if (INTEL_INFO(dev_priv
)->gen
>= 7) {
6897 dev_priv
->display
.fbc_enabled
= ironlake_fbc_enabled
;
6898 dev_priv
->display
.enable_fbc
= gen7_enable_fbc
;
6899 dev_priv
->display
.disable_fbc
= ironlake_disable_fbc
;
6900 } else if (INTEL_INFO(dev_priv
)->gen
>= 5) {
6901 dev_priv
->display
.fbc_enabled
= ironlake_fbc_enabled
;
6902 dev_priv
->display
.enable_fbc
= ironlake_enable_fbc
;
6903 dev_priv
->display
.disable_fbc
= ironlake_disable_fbc
;
6904 } else if (IS_GM45(dev_priv
)) {
6905 dev_priv
->display
.fbc_enabled
= g4x_fbc_enabled
;
6906 dev_priv
->display
.enable_fbc
= g4x_enable_fbc
;
6907 dev_priv
->display
.disable_fbc
= g4x_disable_fbc
;
6909 dev_priv
->display
.fbc_enabled
= i8xx_fbc_enabled
;
6910 dev_priv
->display
.enable_fbc
= i8xx_enable_fbc
;
6911 dev_priv
->display
.disable_fbc
= i8xx_disable_fbc
;
6913 /* This value was pulled out of someone's hat */
6914 I915_WRITE(FBC_CONTROL
, 500 << FBC_CTL_INTERVAL_SHIFT
);
6917 dev_priv
->fbc
.enabled
= dev_priv
->display
.fbc_enabled(dev_priv
->dev
);
6920 /* Set up chip specific power management-related functions */
6921 void intel_init_pm(struct drm_device
*dev
)
6923 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6925 intel_init_fbc(dev_priv
);
6928 if (IS_PINEVIEW(dev
))
6929 i915_pineview_get_mem_freq(dev
);
6930 else if (IS_GEN5(dev
))
6931 i915_ironlake_get_mem_freq(dev
);
6933 /* For FIFO watermark updates */
6935 skl_setup_wm_latency(dev
);
6937 dev_priv
->display
.init_clock_gating
= gen9_init_clock_gating
;
6938 dev_priv
->display
.update_wm
= skl_update_wm
;
6939 dev_priv
->display
.update_sprite_wm
= skl_update_sprite_wm
;
6940 } else if (HAS_PCH_SPLIT(dev
)) {
6941 ilk_setup_wm_latency(dev
);
6943 if ((IS_GEN5(dev
) && dev_priv
->wm
.pri_latency
[1] &&
6944 dev_priv
->wm
.spr_latency
[1] && dev_priv
->wm
.cur_latency
[1]) ||
6945 (!IS_GEN5(dev
) && dev_priv
->wm
.pri_latency
[0] &&
6946 dev_priv
->wm
.spr_latency
[0] && dev_priv
->wm
.cur_latency
[0])) {
6947 dev_priv
->display
.update_wm
= ilk_update_wm
;
6948 dev_priv
->display
.update_sprite_wm
= ilk_update_sprite_wm
;
6950 DRM_DEBUG_KMS("Failed to read display plane latency. "
6955 dev_priv
->display
.init_clock_gating
= ironlake_init_clock_gating
;
6956 else if (IS_GEN6(dev
))
6957 dev_priv
->display
.init_clock_gating
= gen6_init_clock_gating
;
6958 else if (IS_IVYBRIDGE(dev
))
6959 dev_priv
->display
.init_clock_gating
= ivybridge_init_clock_gating
;
6960 else if (IS_HASWELL(dev
))
6961 dev_priv
->display
.init_clock_gating
= haswell_init_clock_gating
;
6962 else if (INTEL_INFO(dev
)->gen
== 8)
6963 dev_priv
->display
.init_clock_gating
= broadwell_init_clock_gating
;
6964 } else if (IS_CHERRYVIEW(dev
)) {
6965 dev_priv
->display
.update_wm
= cherryview_update_wm
;
6966 dev_priv
->display
.update_sprite_wm
= valleyview_update_sprite_wm
;
6967 dev_priv
->display
.init_clock_gating
=
6968 cherryview_init_clock_gating
;
6969 } else if (IS_VALLEYVIEW(dev
)) {
6970 dev_priv
->display
.update_wm
= valleyview_update_wm
;
6971 dev_priv
->display
.update_sprite_wm
= valleyview_update_sprite_wm
;
6972 dev_priv
->display
.init_clock_gating
=
6973 valleyview_init_clock_gating
;
6974 } else if (IS_PINEVIEW(dev
)) {
6975 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev
),
6978 dev_priv
->mem_freq
)) {
6979 DRM_INFO("failed to find known CxSR latency "
6980 "(found ddr%s fsb freq %d, mem freq %d), "
6982 (dev_priv
->is_ddr3
== 1) ? "3" : "2",
6983 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
6984 /* Disable CxSR and never update its watermark again */
6985 intel_set_memory_cxsr(dev_priv
, false);
6986 dev_priv
->display
.update_wm
= NULL
;
6988 dev_priv
->display
.update_wm
= pineview_update_wm
;
6989 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
6990 } else if (IS_G4X(dev
)) {
6991 dev_priv
->display
.update_wm
= g4x_update_wm
;
6992 dev_priv
->display
.init_clock_gating
= g4x_init_clock_gating
;
6993 } else if (IS_GEN4(dev
)) {
6994 dev_priv
->display
.update_wm
= i965_update_wm
;
6995 if (IS_CRESTLINE(dev
))
6996 dev_priv
->display
.init_clock_gating
= crestline_init_clock_gating
;
6997 else if (IS_BROADWATER(dev
))
6998 dev_priv
->display
.init_clock_gating
= broadwater_init_clock_gating
;
6999 } else if (IS_GEN3(dev
)) {
7000 dev_priv
->display
.update_wm
= i9xx_update_wm
;
7001 dev_priv
->display
.get_fifo_size
= i9xx_get_fifo_size
;
7002 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
7003 } else if (IS_GEN2(dev
)) {
7004 if (INTEL_INFO(dev
)->num_pipes
== 1) {
7005 dev_priv
->display
.update_wm
= i845_update_wm
;
7006 dev_priv
->display
.get_fifo_size
= i845_get_fifo_size
;
7008 dev_priv
->display
.update_wm
= i9xx_update_wm
;
7009 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
7012 if (IS_I85X(dev
) || IS_I865G(dev
))
7013 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
7015 dev_priv
->display
.init_clock_gating
= i830_init_clock_gating
;
7017 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7021 int sandybridge_pcode_read(struct drm_i915_private
*dev_priv
, u8 mbox
, u32
*val
)
7023 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
7025 if (I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
7026 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7030 I915_WRITE(GEN6_PCODE_DATA
, *val
);
7031 if (INTEL_INFO(dev_priv
)->gen
>= 9)
7032 I915_WRITE(GEN9_PCODE_DATA1
, 0);
7033 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
7035 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
7037 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox
);
7041 *val
= I915_READ(GEN6_PCODE_DATA
);
7042 I915_WRITE(GEN6_PCODE_DATA
, 0);
7047 int sandybridge_pcode_write(struct drm_i915_private
*dev_priv
, u8 mbox
, u32 val
)
7049 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
7051 if (I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
7052 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7056 I915_WRITE(GEN6_PCODE_DATA
, val
);
7057 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
7059 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
7061 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox
);
7065 I915_WRITE(GEN6_PCODE_DATA
, 0);
7070 static int byt_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
7075 switch (dev_priv
->mem_freq
) {
7089 return DIV_ROUND_CLOSEST(dev_priv
->mem_freq
* (val
+ 6 - 0xbd), 4 * div
);
7092 static int byt_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
7097 switch (dev_priv
->mem_freq
) {
7111 return DIV_ROUND_CLOSEST(4 * mul
* val
, dev_priv
->mem_freq
) + 0xbd - 6;
7114 static int chv_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
7118 switch (dev_priv
->rps
.cz_freq
) {
7134 freq
= (DIV_ROUND_CLOSEST((dev_priv
->rps
.cz_freq
* val
), 2 * div
) / 2);
7139 static int chv_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
7143 switch (dev_priv
->rps
.cz_freq
) {
7159 /* CHV needs even values */
7160 opcode
= (DIV_ROUND_CLOSEST((val
* 2 * mul
), dev_priv
->rps
.cz_freq
) * 2);
7165 int vlv_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
7169 if (IS_CHERRYVIEW(dev_priv
->dev
))
7170 ret
= chv_gpu_freq(dev_priv
, val
);
7171 else if (IS_VALLEYVIEW(dev_priv
->dev
))
7172 ret
= byt_gpu_freq(dev_priv
, val
);
7177 int vlv_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
7181 if (IS_CHERRYVIEW(dev_priv
->dev
))
7182 ret
= chv_freq_opcode(dev_priv
, val
);
7183 else if (IS_VALLEYVIEW(dev_priv
->dev
))
7184 ret
= byt_freq_opcode(dev_priv
, val
);
7189 void intel_pm_setup(struct drm_device
*dev
)
7191 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7193 mutex_init(&dev_priv
->rps
.hw_lock
);
7195 INIT_DELAYED_WORK(&dev_priv
->rps
.delayed_resume_work
,
7196 intel_gen6_powersave_work
);
7198 dev_priv
->pm
.suspended
= false;