2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
33 #include <linux/vgaarb.h>
34 #include <drm/i915_powerwell.h>
35 #include <linux/pm_runtime.h>
38 * RC6 is a special power stage which allows the GPU to enter an very
39 * low-voltage mode when idle, using down to 0V while at this stage. This
40 * stage is entered automatically when the GPU is idle when RC6 support is
41 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
43 * There are different RC6 modes available in Intel GPU, which differentiate
44 * among each other with the latency required to enter and leave RC6 and
45 * voltage consumed by the GPU in different states.
47 * The combination of the following flags define which states GPU is allowed
48 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
49 * RC6pp is deepest RC6. Their support by hardware varies according to the
50 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
51 * which brings the most power savings; deeper states save more power, but
52 * require higher latency to switch to and wake up.
54 #define INTEL_RC6_ENABLE (1<<0)
55 #define INTEL_RC6p_ENABLE (1<<1)
56 #define INTEL_RC6pp_ENABLE (1<<2)
58 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
59 * framebuffer contents in-memory, aiming at reducing the required bandwidth
60 * during in-memory transfers and, therefore, reduce the power packet.
62 * The benefits of FBC are mostly visible with solid backgrounds and
63 * variation-less patterns.
65 * FBC-related functionality can be enabled by the means of the
66 * i915.i915_enable_fbc parameter
69 static void i8xx_disable_fbc(struct drm_device
*dev
)
71 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
74 /* Disable compression */
75 fbc_ctl
= I915_READ(FBC_CONTROL
);
76 if ((fbc_ctl
& FBC_CTL_EN
) == 0)
79 fbc_ctl
&= ~FBC_CTL_EN
;
80 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
82 /* Wait for compressing bit to clear */
83 if (wait_for((I915_READ(FBC_STATUS
) & FBC_STAT_COMPRESSING
) == 0, 10)) {
84 DRM_DEBUG_KMS("FBC idle timed out\n");
88 DRM_DEBUG_KMS("disabled FBC\n");
91 static void i8xx_enable_fbc(struct drm_crtc
*crtc
)
93 struct drm_device
*dev
= crtc
->dev
;
94 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
95 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
96 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
97 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
102 cfb_pitch
= dev_priv
->fbc
.size
/ FBC_LL_SIZE
;
103 if (fb
->pitches
[0] < cfb_pitch
)
104 cfb_pitch
= fb
->pitches
[0];
106 /* FBC_CTL wants 32B or 64B units */
108 cfb_pitch
= (cfb_pitch
/ 32) - 1;
110 cfb_pitch
= (cfb_pitch
/ 64) - 1;
113 for (i
= 0; i
< (FBC_LL_SIZE
/ 32) + 1; i
++)
114 I915_WRITE(FBC_TAG
+ (i
* 4), 0);
120 fbc_ctl2
= FBC_CTL_FENCE_DBL
| FBC_CTL_IDLE_IMM
| FBC_CTL_CPU_FENCE
;
121 fbc_ctl2
|= FBC_CTL_PLANE(intel_crtc
->plane
);
122 I915_WRITE(FBC_CONTROL2
, fbc_ctl2
);
123 I915_WRITE(FBC_FENCE_OFF
, crtc
->y
);
127 fbc_ctl
= I915_READ(FBC_CONTROL
);
128 fbc_ctl
&= 0x3fff << FBC_CTL_INTERVAL_SHIFT
;
129 fbc_ctl
|= FBC_CTL_EN
| FBC_CTL_PERIODIC
;
131 fbc_ctl
|= FBC_CTL_C3_IDLE
; /* 945 needs special SR handling */
132 fbc_ctl
|= (cfb_pitch
& 0xff) << FBC_CTL_STRIDE_SHIFT
;
133 fbc_ctl
|= obj
->fence_reg
;
134 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
136 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
137 cfb_pitch
, crtc
->y
, plane_name(intel_crtc
->plane
));
140 static bool i8xx_fbc_enabled(struct drm_device
*dev
)
142 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
144 return I915_READ(FBC_CONTROL
) & FBC_CTL_EN
;
147 static void g4x_enable_fbc(struct drm_crtc
*crtc
)
149 struct drm_device
*dev
= crtc
->dev
;
150 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
151 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
152 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
153 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
156 dpfc_ctl
= DPFC_CTL_PLANE(intel_crtc
->plane
) | DPFC_SR_EN
;
157 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
158 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
160 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
161 dpfc_ctl
|= DPFC_CTL_FENCE_EN
| obj
->fence_reg
;
163 I915_WRITE(DPFC_FENCE_YOFF
, crtc
->y
);
166 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
168 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc
->plane
));
171 static void g4x_disable_fbc(struct drm_device
*dev
)
173 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
176 /* Disable compression */
177 dpfc_ctl
= I915_READ(DPFC_CONTROL
);
178 if (dpfc_ctl
& DPFC_CTL_EN
) {
179 dpfc_ctl
&= ~DPFC_CTL_EN
;
180 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
);
182 DRM_DEBUG_KMS("disabled FBC\n");
186 static bool g4x_fbc_enabled(struct drm_device
*dev
)
188 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
190 return I915_READ(DPFC_CONTROL
) & DPFC_CTL_EN
;
193 static void sandybridge_blit_fbc_update(struct drm_device
*dev
)
195 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
198 /* Make sure blitter notifies FBC of writes */
200 /* Blitter is part of Media powerwell on VLV. No impact of
201 * his param in other platforms for now */
202 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_MEDIA
);
204 blt_ecoskpd
= I915_READ(GEN6_BLITTER_ECOSKPD
);
205 blt_ecoskpd
|= GEN6_BLITTER_FBC_NOTIFY
<<
206 GEN6_BLITTER_LOCK_SHIFT
;
207 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
208 blt_ecoskpd
|= GEN6_BLITTER_FBC_NOTIFY
;
209 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
210 blt_ecoskpd
&= ~(GEN6_BLITTER_FBC_NOTIFY
<<
211 GEN6_BLITTER_LOCK_SHIFT
);
212 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
213 POSTING_READ(GEN6_BLITTER_ECOSKPD
);
215 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_MEDIA
);
218 static void ironlake_enable_fbc(struct drm_crtc
*crtc
)
220 struct drm_device
*dev
= crtc
->dev
;
221 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
222 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
223 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
224 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
227 dpfc_ctl
= DPFC_CTL_PLANE(intel_crtc
->plane
);
228 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
229 dev_priv
->fbc
.threshold
++;
231 switch (dev_priv
->fbc
.threshold
) {
234 dpfc_ctl
|= DPFC_CTL_LIMIT_4X
;
237 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
240 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
243 dpfc_ctl
|= DPFC_CTL_FENCE_EN
;
245 dpfc_ctl
|= obj
->fence_reg
;
247 I915_WRITE(ILK_DPFC_FENCE_YOFF
, crtc
->y
);
248 I915_WRITE(ILK_FBC_RT_BASE
, i915_gem_obj_ggtt_offset(obj
) | ILK_FBC_RT_VALID
);
250 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
253 I915_WRITE(SNB_DPFC_CTL_SA
,
254 SNB_CPU_FENCE_ENABLE
| obj
->fence_reg
);
255 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, crtc
->y
);
256 sandybridge_blit_fbc_update(dev
);
259 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc
->plane
));
262 static void ironlake_disable_fbc(struct drm_device
*dev
)
264 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
267 /* Disable compression */
268 dpfc_ctl
= I915_READ(ILK_DPFC_CONTROL
);
269 if (dpfc_ctl
& DPFC_CTL_EN
) {
270 dpfc_ctl
&= ~DPFC_CTL_EN
;
271 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
);
273 DRM_DEBUG_KMS("disabled FBC\n");
277 static bool ironlake_fbc_enabled(struct drm_device
*dev
)
279 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
281 return I915_READ(ILK_DPFC_CONTROL
) & DPFC_CTL_EN
;
284 static void gen7_enable_fbc(struct drm_crtc
*crtc
)
286 struct drm_device
*dev
= crtc
->dev
;
287 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
288 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
289 struct drm_i915_gem_object
*obj
= intel_fb_obj(fb
);
290 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
293 dpfc_ctl
= IVB_DPFC_CTL_PLANE(intel_crtc
->plane
);
294 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
295 dev_priv
->fbc
.threshold
++;
297 switch (dev_priv
->fbc
.threshold
) {
300 dpfc_ctl
|= DPFC_CTL_LIMIT_4X
;
303 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
306 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
310 dpfc_ctl
|= IVB_DPFC_CTL_FENCE_EN
;
312 if (dev_priv
->fbc
.false_color
)
313 dpfc_ctl
|= FBC_CTL_FALSE_COLOR
;
315 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
317 if (IS_IVYBRIDGE(dev
)) {
318 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
319 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
320 I915_READ(ILK_DISPLAY_CHICKEN1
) |
323 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
324 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc
->pipe
),
325 I915_READ(CHICKEN_PIPESL_1(intel_crtc
->pipe
)) |
329 I915_WRITE(SNB_DPFC_CTL_SA
,
330 SNB_CPU_FENCE_ENABLE
| obj
->fence_reg
);
331 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, crtc
->y
);
333 sandybridge_blit_fbc_update(dev
);
335 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc
->plane
));
338 bool intel_fbc_enabled(struct drm_device
*dev
)
340 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
342 if (!dev_priv
->display
.fbc_enabled
)
345 return dev_priv
->display
.fbc_enabled(dev
);
348 void gen8_fbc_sw_flush(struct drm_device
*dev
, u32 value
)
350 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
355 I915_WRITE(MSG_FBC_REND_STATE
, value
);
358 static void intel_fbc_work_fn(struct work_struct
*__work
)
360 struct intel_fbc_work
*work
=
361 container_of(to_delayed_work(__work
),
362 struct intel_fbc_work
, work
);
363 struct drm_device
*dev
= work
->crtc
->dev
;
364 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
366 mutex_lock(&dev
->struct_mutex
);
367 if (work
== dev_priv
->fbc
.fbc_work
) {
368 /* Double check that we haven't switched fb without cancelling
371 if (work
->crtc
->primary
->fb
== work
->fb
) {
372 dev_priv
->display
.enable_fbc(work
->crtc
);
374 dev_priv
->fbc
.plane
= to_intel_crtc(work
->crtc
)->plane
;
375 dev_priv
->fbc
.fb_id
= work
->crtc
->primary
->fb
->base
.id
;
376 dev_priv
->fbc
.y
= work
->crtc
->y
;
379 dev_priv
->fbc
.fbc_work
= NULL
;
381 mutex_unlock(&dev
->struct_mutex
);
386 static void intel_cancel_fbc_work(struct drm_i915_private
*dev_priv
)
388 if (dev_priv
->fbc
.fbc_work
== NULL
)
391 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
393 /* Synchronisation is provided by struct_mutex and checking of
394 * dev_priv->fbc.fbc_work, so we can perform the cancellation
395 * entirely asynchronously.
397 if (cancel_delayed_work(&dev_priv
->fbc
.fbc_work
->work
))
398 /* tasklet was killed before being run, clean up */
399 kfree(dev_priv
->fbc
.fbc_work
);
401 /* Mark the work as no longer wanted so that if it does
402 * wake-up (because the work was already running and waiting
403 * for our mutex), it will discover that is no longer
406 dev_priv
->fbc
.fbc_work
= NULL
;
409 static void intel_enable_fbc(struct drm_crtc
*crtc
)
411 struct intel_fbc_work
*work
;
412 struct drm_device
*dev
= crtc
->dev
;
413 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
415 if (!dev_priv
->display
.enable_fbc
)
418 intel_cancel_fbc_work(dev_priv
);
420 work
= kzalloc(sizeof(*work
), GFP_KERNEL
);
422 DRM_ERROR("Failed to allocate FBC work structure\n");
423 dev_priv
->display
.enable_fbc(crtc
);
428 work
->fb
= crtc
->primary
->fb
;
429 INIT_DELAYED_WORK(&work
->work
, intel_fbc_work_fn
);
431 dev_priv
->fbc
.fbc_work
= work
;
433 /* Delay the actual enabling to let pageflipping cease and the
434 * display to settle before starting the compression. Note that
435 * this delay also serves a second purpose: it allows for a
436 * vblank to pass after disabling the FBC before we attempt
437 * to modify the control registers.
439 * A more complicated solution would involve tracking vblanks
440 * following the termination of the page-flipping sequence
441 * and indeed performing the enable as a co-routine and not
442 * waiting synchronously upon the vblank.
444 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
446 schedule_delayed_work(&work
->work
, msecs_to_jiffies(50));
449 void intel_disable_fbc(struct drm_device
*dev
)
451 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
453 intel_cancel_fbc_work(dev_priv
);
455 if (!dev_priv
->display
.disable_fbc
)
458 dev_priv
->display
.disable_fbc(dev
);
459 dev_priv
->fbc
.plane
= -1;
462 static bool set_no_fbc_reason(struct drm_i915_private
*dev_priv
,
463 enum no_fbc_reason reason
)
465 if (dev_priv
->fbc
.no_fbc_reason
== reason
)
468 dev_priv
->fbc
.no_fbc_reason
= reason
;
473 * intel_update_fbc - enable/disable FBC as needed
474 * @dev: the drm_device
476 * Set up the framebuffer compression hardware at mode set time. We
477 * enable it if possible:
478 * - plane A only (on pre-965)
479 * - no pixel mulitply/line duplication
480 * - no alpha buffer discard
482 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
484 * We can't assume that any compression will take place (worst case),
485 * so the compressed buffer has to be the same size as the uncompressed
486 * one. It also must reside (along with the line length buffer) in
489 * We need to enable/disable FBC on a global basis.
491 void intel_update_fbc(struct drm_device
*dev
)
493 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
494 struct drm_crtc
*crtc
= NULL
, *tmp_crtc
;
495 struct intel_crtc
*intel_crtc
;
496 struct drm_framebuffer
*fb
;
497 struct drm_i915_gem_object
*obj
;
498 const struct drm_display_mode
*adjusted_mode
;
499 unsigned int max_width
, max_height
;
502 set_no_fbc_reason(dev_priv
, FBC_UNSUPPORTED
);
506 if (!i915
.powersave
) {
507 if (set_no_fbc_reason(dev_priv
, FBC_MODULE_PARAM
))
508 DRM_DEBUG_KMS("fbc disabled per module param\n");
513 * If FBC is already on, we just have to verify that we can
514 * keep it that way...
515 * Need to disable if:
516 * - more than one pipe is active
517 * - changing FBC params (stride, fence, mode)
518 * - new fb is too large to fit in compressed buffer
519 * - going to an unsupported config (interlace, pixel multiply, etc.)
521 for_each_crtc(dev
, tmp_crtc
) {
522 if (intel_crtc_active(tmp_crtc
) &&
523 to_intel_crtc(tmp_crtc
)->primary_enabled
) {
525 if (set_no_fbc_reason(dev_priv
, FBC_MULTIPLE_PIPES
))
526 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
533 if (!crtc
|| crtc
->primary
->fb
== NULL
) {
534 if (set_no_fbc_reason(dev_priv
, FBC_NO_OUTPUT
))
535 DRM_DEBUG_KMS("no output, disabling\n");
539 intel_crtc
= to_intel_crtc(crtc
);
540 fb
= crtc
->primary
->fb
;
541 obj
= intel_fb_obj(fb
);
542 adjusted_mode
= &intel_crtc
->config
.adjusted_mode
;
544 if (i915
.enable_fbc
< 0) {
545 if (set_no_fbc_reason(dev_priv
, FBC_CHIP_DEFAULT
))
546 DRM_DEBUG_KMS("disabled per chip default\n");
549 if (!i915
.enable_fbc
) {
550 if (set_no_fbc_reason(dev_priv
, FBC_MODULE_PARAM
))
551 DRM_DEBUG_KMS("fbc disabled per module param\n");
554 if ((adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
555 (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLSCAN
)) {
556 if (set_no_fbc_reason(dev_priv
, FBC_UNSUPPORTED_MODE
))
557 DRM_DEBUG_KMS("mode incompatible with compression, "
562 if (INTEL_INFO(dev
)->gen
>= 8 || IS_HASWELL(dev
)) {
565 } else if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
572 if (intel_crtc
->config
.pipe_src_w
> max_width
||
573 intel_crtc
->config
.pipe_src_h
> max_height
) {
574 if (set_no_fbc_reason(dev_priv
, FBC_MODE_TOO_LARGE
))
575 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
578 if ((INTEL_INFO(dev
)->gen
< 4 || HAS_DDI(dev
)) &&
579 intel_crtc
->plane
!= PLANE_A
) {
580 if (set_no_fbc_reason(dev_priv
, FBC_BAD_PLANE
))
581 DRM_DEBUG_KMS("plane not A, disabling compression\n");
585 /* The use of a CPU fence is mandatory in order to detect writes
586 * by the CPU to the scanout and trigger updates to the FBC.
588 if (obj
->tiling_mode
!= I915_TILING_X
||
589 obj
->fence_reg
== I915_FENCE_REG_NONE
) {
590 if (set_no_fbc_reason(dev_priv
, FBC_NOT_TILED
))
591 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
594 if (INTEL_INFO(dev
)->gen
<= 4 && !IS_G4X(dev
) &&
595 to_intel_plane(crtc
->primary
)->rotation
!= BIT(DRM_ROTATE_0
)) {
596 if (set_no_fbc_reason(dev_priv
, FBC_UNSUPPORTED_MODE
))
597 DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
601 /* If the kernel debugger is active, always disable compression */
605 if (i915_gem_stolen_setup_compression(dev
, obj
->base
.size
,
606 drm_format_plane_cpp(fb
->pixel_format
, 0))) {
607 if (set_no_fbc_reason(dev_priv
, FBC_STOLEN_TOO_SMALL
))
608 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
612 /* If the scanout has not changed, don't modify the FBC settings.
613 * Note that we make the fundamental assumption that the fb->obj
614 * cannot be unpinned (and have its GTT offset and fence revoked)
615 * without first being decoupled from the scanout and FBC disabled.
617 if (dev_priv
->fbc
.plane
== intel_crtc
->plane
&&
618 dev_priv
->fbc
.fb_id
== fb
->base
.id
&&
619 dev_priv
->fbc
.y
== crtc
->y
)
622 if (intel_fbc_enabled(dev
)) {
623 /* We update FBC along two paths, after changing fb/crtc
624 * configuration (modeswitching) and after page-flipping
625 * finishes. For the latter, we know that not only did
626 * we disable the FBC at the start of the page-flip
627 * sequence, but also more than one vblank has passed.
629 * For the former case of modeswitching, it is possible
630 * to switch between two FBC valid configurations
631 * instantaneously so we do need to disable the FBC
632 * before we can modify its control registers. We also
633 * have to wait for the next vblank for that to take
634 * effect. However, since we delay enabling FBC we can
635 * assume that a vblank has passed since disabling and
636 * that we can safely alter the registers in the deferred
639 * In the scenario that we go from a valid to invalid
640 * and then back to valid FBC configuration we have
641 * no strict enforcement that a vblank occurred since
642 * disabling the FBC. However, along all current pipe
643 * disabling paths we do need to wait for a vblank at
644 * some point. And we wait before enabling FBC anyway.
646 DRM_DEBUG_KMS("disabling active FBC for update\n");
647 intel_disable_fbc(dev
);
650 intel_enable_fbc(crtc
);
651 dev_priv
->fbc
.no_fbc_reason
= FBC_OK
;
655 /* Multiple disables should be harmless */
656 if (intel_fbc_enabled(dev
)) {
657 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
658 intel_disable_fbc(dev
);
660 i915_gem_stolen_cleanup_compression(dev
);
663 static void i915_pineview_get_mem_freq(struct drm_device
*dev
)
665 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
668 tmp
= I915_READ(CLKCFG
);
670 switch (tmp
& CLKCFG_FSB_MASK
) {
672 dev_priv
->fsb_freq
= 533; /* 133*4 */
675 dev_priv
->fsb_freq
= 800; /* 200*4 */
678 dev_priv
->fsb_freq
= 667; /* 167*4 */
681 dev_priv
->fsb_freq
= 400; /* 100*4 */
685 switch (tmp
& CLKCFG_MEM_MASK
) {
687 dev_priv
->mem_freq
= 533;
690 dev_priv
->mem_freq
= 667;
693 dev_priv
->mem_freq
= 800;
697 /* detect pineview DDR3 setting */
698 tmp
= I915_READ(CSHRDDR3CTL
);
699 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
702 static void i915_ironlake_get_mem_freq(struct drm_device
*dev
)
704 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
707 ddrpll
= I915_READ16(DDRMPLL1
);
708 csipll
= I915_READ16(CSIPLL0
);
710 switch (ddrpll
& 0xff) {
712 dev_priv
->mem_freq
= 800;
715 dev_priv
->mem_freq
= 1066;
718 dev_priv
->mem_freq
= 1333;
721 dev_priv
->mem_freq
= 1600;
724 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
726 dev_priv
->mem_freq
= 0;
730 dev_priv
->ips
.r_t
= dev_priv
->mem_freq
;
732 switch (csipll
& 0x3ff) {
734 dev_priv
->fsb_freq
= 3200;
737 dev_priv
->fsb_freq
= 3733;
740 dev_priv
->fsb_freq
= 4266;
743 dev_priv
->fsb_freq
= 4800;
746 dev_priv
->fsb_freq
= 5333;
749 dev_priv
->fsb_freq
= 5866;
752 dev_priv
->fsb_freq
= 6400;
755 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
757 dev_priv
->fsb_freq
= 0;
761 if (dev_priv
->fsb_freq
== 3200) {
762 dev_priv
->ips
.c_m
= 0;
763 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
764 dev_priv
->ips
.c_m
= 1;
766 dev_priv
->ips
.c_m
= 2;
770 static const struct cxsr_latency cxsr_latency_table
[] = {
771 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
772 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
773 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
774 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
775 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
777 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
778 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
779 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
780 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
781 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
783 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
784 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
785 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
786 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
787 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
789 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
790 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
791 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
792 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
793 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
795 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
796 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
797 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
798 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
799 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
801 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
802 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
803 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
804 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
805 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
808 static const struct cxsr_latency
*intel_get_cxsr_latency(int is_desktop
,
813 const struct cxsr_latency
*latency
;
816 if (fsb
== 0 || mem
== 0)
819 for (i
= 0; i
< ARRAY_SIZE(cxsr_latency_table
); i
++) {
820 latency
= &cxsr_latency_table
[i
];
821 if (is_desktop
== latency
->is_desktop
&&
822 is_ddr3
== latency
->is_ddr3
&&
823 fsb
== latency
->fsb_freq
&& mem
== latency
->mem_freq
)
827 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
832 void intel_set_memory_cxsr(struct drm_i915_private
*dev_priv
, bool enable
)
834 struct drm_device
*dev
= dev_priv
->dev
;
837 if (IS_VALLEYVIEW(dev
)) {
838 I915_WRITE(FW_BLC_SELF_VLV
, enable
? FW_CSPWRDWNEN
: 0);
839 } else if (IS_G4X(dev
) || IS_CRESTLINE(dev
)) {
840 I915_WRITE(FW_BLC_SELF
, enable
? FW_BLC_SELF_EN
: 0);
841 } else if (IS_PINEVIEW(dev
)) {
842 val
= I915_READ(DSPFW3
) & ~PINEVIEW_SELF_REFRESH_EN
;
843 val
|= enable
? PINEVIEW_SELF_REFRESH_EN
: 0;
844 I915_WRITE(DSPFW3
, val
);
845 } else if (IS_I945G(dev
) || IS_I945GM(dev
)) {
846 val
= enable
? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN
) :
847 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN
);
848 I915_WRITE(FW_BLC_SELF
, val
);
849 } else if (IS_I915GM(dev
)) {
850 val
= enable
? _MASKED_BIT_ENABLE(INSTPM_SELF_EN
) :
851 _MASKED_BIT_DISABLE(INSTPM_SELF_EN
);
852 I915_WRITE(INSTPM
, val
);
857 DRM_DEBUG_KMS("memory self-refresh is %s\n",
858 enable
? "enabled" : "disabled");
862 * Latency for FIFO fetches is dependent on several factors:
863 * - memory configuration (speed, channels)
865 * - current MCH state
866 * It can be fairly high in some situations, so here we assume a fairly
867 * pessimal value. It's a tradeoff between extra memory fetches (if we
868 * set this value too high, the FIFO will fetch frequently to stay full)
869 * and power consumption (set it too low to save power and we might see
870 * FIFO underruns and display "flicker").
872 * A value of 5us seems to be a good balance; safe for very low end
873 * platforms but not overly aggressive on lower latency configs.
875 static const int latency_ns
= 5000;
877 static int i9xx_get_fifo_size(struct drm_device
*dev
, int plane
)
879 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
880 uint32_t dsparb
= I915_READ(DSPARB
);
883 size
= dsparb
& 0x7f;
885 size
= ((dsparb
>> DSPARB_CSTART_SHIFT
) & 0x7f) - size
;
887 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
888 plane
? "B" : "A", size
);
893 static int i830_get_fifo_size(struct drm_device
*dev
, int plane
)
895 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
896 uint32_t dsparb
= I915_READ(DSPARB
);
899 size
= dsparb
& 0x1ff;
901 size
= ((dsparb
>> DSPARB_BEND_SHIFT
) & 0x1ff) - size
;
902 size
>>= 1; /* Convert to cachelines */
904 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
905 plane
? "B" : "A", size
);
910 static int i845_get_fifo_size(struct drm_device
*dev
, int plane
)
912 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
913 uint32_t dsparb
= I915_READ(DSPARB
);
916 size
= dsparb
& 0x7f;
917 size
>>= 2; /* Convert to cachelines */
919 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
926 /* Pineview has different values for various configs */
927 static const struct intel_watermark_params pineview_display_wm
= {
928 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
929 .max_wm
= PINEVIEW_MAX_WM
,
930 .default_wm
= PINEVIEW_DFT_WM
,
931 .guard_size
= PINEVIEW_GUARD_WM
,
932 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
934 static const struct intel_watermark_params pineview_display_hplloff_wm
= {
935 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
936 .max_wm
= PINEVIEW_MAX_WM
,
937 .default_wm
= PINEVIEW_DFT_HPLLOFF_WM
,
938 .guard_size
= PINEVIEW_GUARD_WM
,
939 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
941 static const struct intel_watermark_params pineview_cursor_wm
= {
942 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
943 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
944 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
945 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
946 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
948 static const struct intel_watermark_params pineview_cursor_hplloff_wm
= {
949 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
950 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
951 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
952 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
953 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
955 static const struct intel_watermark_params g4x_wm_info
= {
956 .fifo_size
= G4X_FIFO_SIZE
,
957 .max_wm
= G4X_MAX_WM
,
958 .default_wm
= G4X_MAX_WM
,
960 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
962 static const struct intel_watermark_params g4x_cursor_wm_info
= {
963 .fifo_size
= I965_CURSOR_FIFO
,
964 .max_wm
= I965_CURSOR_MAX_WM
,
965 .default_wm
= I965_CURSOR_DFT_WM
,
967 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
969 static const struct intel_watermark_params valleyview_wm_info
= {
970 .fifo_size
= VALLEYVIEW_FIFO_SIZE
,
971 .max_wm
= VALLEYVIEW_MAX_WM
,
972 .default_wm
= VALLEYVIEW_MAX_WM
,
974 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
976 static const struct intel_watermark_params valleyview_cursor_wm_info
= {
977 .fifo_size
= I965_CURSOR_FIFO
,
978 .max_wm
= VALLEYVIEW_CURSOR_MAX_WM
,
979 .default_wm
= I965_CURSOR_DFT_WM
,
981 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
983 static const struct intel_watermark_params i965_cursor_wm_info
= {
984 .fifo_size
= I965_CURSOR_FIFO
,
985 .max_wm
= I965_CURSOR_MAX_WM
,
986 .default_wm
= I965_CURSOR_DFT_WM
,
988 .cacheline_size
= I915_FIFO_LINE_SIZE
,
990 static const struct intel_watermark_params i945_wm_info
= {
991 .fifo_size
= I945_FIFO_SIZE
,
992 .max_wm
= I915_MAX_WM
,
995 .cacheline_size
= I915_FIFO_LINE_SIZE
,
997 static const struct intel_watermark_params i915_wm_info
= {
998 .fifo_size
= I915_FIFO_SIZE
,
999 .max_wm
= I915_MAX_WM
,
1002 .cacheline_size
= I915_FIFO_LINE_SIZE
,
1004 static const struct intel_watermark_params i830_wm_info
= {
1005 .fifo_size
= I855GM_FIFO_SIZE
,
1006 .max_wm
= I915_MAX_WM
,
1009 .cacheline_size
= I830_FIFO_LINE_SIZE
,
1011 static const struct intel_watermark_params i845_wm_info
= {
1012 .fifo_size
= I830_FIFO_SIZE
,
1013 .max_wm
= I915_MAX_WM
,
1016 .cacheline_size
= I830_FIFO_LINE_SIZE
,
1020 * intel_calculate_wm - calculate watermark level
1021 * @clock_in_khz: pixel clock
1022 * @wm: chip FIFO params
1023 * @pixel_size: display pixel size
1024 * @latency_ns: memory latency for the platform
1026 * Calculate the watermark level (the level at which the display plane will
1027 * start fetching from memory again). Each chip has a different display
1028 * FIFO size and allocation, so the caller needs to figure that out and pass
1029 * in the correct intel_watermark_params structure.
1031 * As the pixel clock runs, the FIFO will be drained at a rate that depends
1032 * on the pixel size. When it reaches the watermark level, it'll start
1033 * fetching FIFO line sized based chunks from memory until the FIFO fills
1034 * past the watermark point. If the FIFO drains completely, a FIFO underrun
1035 * will occur, and a display engine hang could result.
1037 static unsigned long intel_calculate_wm(unsigned long clock_in_khz
,
1038 const struct intel_watermark_params
*wm
,
1041 unsigned long latency_ns
)
1043 long entries_required
, wm_size
;
1046 * Note: we need to make sure we don't overflow for various clock &
1048 * clocks go from a few thousand to several hundred thousand.
1049 * latency is usually a few thousand
1051 entries_required
= ((clock_in_khz
/ 1000) * pixel_size
* latency_ns
) /
1053 entries_required
= DIV_ROUND_UP(entries_required
, wm
->cacheline_size
);
1055 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required
);
1057 wm_size
= fifo_size
- (entries_required
+ wm
->guard_size
);
1059 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size
);
1061 /* Don't promote wm_size to unsigned... */
1062 if (wm_size
> (long)wm
->max_wm
)
1063 wm_size
= wm
->max_wm
;
1065 wm_size
= wm
->default_wm
;
1069 static struct drm_crtc
*single_enabled_crtc(struct drm_device
*dev
)
1071 struct drm_crtc
*crtc
, *enabled
= NULL
;
1073 for_each_crtc(dev
, crtc
) {
1074 if (intel_crtc_active(crtc
)) {
1084 static void pineview_update_wm(struct drm_crtc
*unused_crtc
)
1086 struct drm_device
*dev
= unused_crtc
->dev
;
1087 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1088 struct drm_crtc
*crtc
;
1089 const struct cxsr_latency
*latency
;
1093 latency
= intel_get_cxsr_latency(IS_PINEVIEW_G(dev
), dev_priv
->is_ddr3
,
1094 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
1096 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1097 intel_set_memory_cxsr(dev_priv
, false);
1101 crtc
= single_enabled_crtc(dev
);
1103 const struct drm_display_mode
*adjusted_mode
;
1104 int pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1107 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1108 clock
= adjusted_mode
->crtc_clock
;
1111 wm
= intel_calculate_wm(clock
, &pineview_display_wm
,
1112 pineview_display_wm
.fifo_size
,
1113 pixel_size
, latency
->display_sr
);
1114 reg
= I915_READ(DSPFW1
);
1115 reg
&= ~DSPFW_SR_MASK
;
1116 reg
|= wm
<< DSPFW_SR_SHIFT
;
1117 I915_WRITE(DSPFW1
, reg
);
1118 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg
);
1121 wm
= intel_calculate_wm(clock
, &pineview_cursor_wm
,
1122 pineview_display_wm
.fifo_size
,
1123 pixel_size
, latency
->cursor_sr
);
1124 reg
= I915_READ(DSPFW3
);
1125 reg
&= ~DSPFW_CURSOR_SR_MASK
;
1126 reg
|= (wm
& 0x3f) << DSPFW_CURSOR_SR_SHIFT
;
1127 I915_WRITE(DSPFW3
, reg
);
1129 /* Display HPLL off SR */
1130 wm
= intel_calculate_wm(clock
, &pineview_display_hplloff_wm
,
1131 pineview_display_hplloff_wm
.fifo_size
,
1132 pixel_size
, latency
->display_hpll_disable
);
1133 reg
= I915_READ(DSPFW3
);
1134 reg
&= ~DSPFW_HPLL_SR_MASK
;
1135 reg
|= wm
& DSPFW_HPLL_SR_MASK
;
1136 I915_WRITE(DSPFW3
, reg
);
1138 /* cursor HPLL off SR */
1139 wm
= intel_calculate_wm(clock
, &pineview_cursor_hplloff_wm
,
1140 pineview_display_hplloff_wm
.fifo_size
,
1141 pixel_size
, latency
->cursor_hpll_disable
);
1142 reg
= I915_READ(DSPFW3
);
1143 reg
&= ~DSPFW_HPLL_CURSOR_MASK
;
1144 reg
|= (wm
& 0x3f) << DSPFW_HPLL_CURSOR_SHIFT
;
1145 I915_WRITE(DSPFW3
, reg
);
1146 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg
);
1148 intel_set_memory_cxsr(dev_priv
, true);
1150 intel_set_memory_cxsr(dev_priv
, false);
1154 static bool g4x_compute_wm0(struct drm_device
*dev
,
1156 const struct intel_watermark_params
*display
,
1157 int display_latency_ns
,
1158 const struct intel_watermark_params
*cursor
,
1159 int cursor_latency_ns
,
1163 struct drm_crtc
*crtc
;
1164 const struct drm_display_mode
*adjusted_mode
;
1165 int htotal
, hdisplay
, clock
, pixel_size
;
1166 int line_time_us
, line_count
;
1167 int entries
, tlb_miss
;
1169 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1170 if (!intel_crtc_active(crtc
)) {
1171 *cursor_wm
= cursor
->guard_size
;
1172 *plane_wm
= display
->guard_size
;
1176 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1177 clock
= adjusted_mode
->crtc_clock
;
1178 htotal
= adjusted_mode
->crtc_htotal
;
1179 hdisplay
= to_intel_crtc(crtc
)->config
.pipe_src_w
;
1180 pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1182 /* Use the small buffer method to calculate plane watermark */
1183 entries
= ((clock
* pixel_size
/ 1000) * display_latency_ns
) / 1000;
1184 tlb_miss
= display
->fifo_size
*display
->cacheline_size
- hdisplay
* 8;
1186 entries
+= tlb_miss
;
1187 entries
= DIV_ROUND_UP(entries
, display
->cacheline_size
);
1188 *plane_wm
= entries
+ display
->guard_size
;
1189 if (*plane_wm
> (int)display
->max_wm
)
1190 *plane_wm
= display
->max_wm
;
1192 /* Use the large buffer method to calculate cursor watermark */
1193 line_time_us
= max(htotal
* 1000 / clock
, 1);
1194 line_count
= (cursor_latency_ns
/ line_time_us
+ 1000) / 1000;
1195 entries
= line_count
* to_intel_crtc(crtc
)->cursor_width
* pixel_size
;
1196 tlb_miss
= cursor
->fifo_size
*cursor
->cacheline_size
- hdisplay
* 8;
1198 entries
+= tlb_miss
;
1199 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1200 *cursor_wm
= entries
+ cursor
->guard_size
;
1201 if (*cursor_wm
> (int)cursor
->max_wm
)
1202 *cursor_wm
= (int)cursor
->max_wm
;
1208 * Check the wm result.
1210 * If any calculated watermark values is larger than the maximum value that
1211 * can be programmed into the associated watermark register, that watermark
1214 static bool g4x_check_srwm(struct drm_device
*dev
,
1215 int display_wm
, int cursor_wm
,
1216 const struct intel_watermark_params
*display
,
1217 const struct intel_watermark_params
*cursor
)
1219 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1220 display_wm
, cursor_wm
);
1222 if (display_wm
> display
->max_wm
) {
1223 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1224 display_wm
, display
->max_wm
);
1228 if (cursor_wm
> cursor
->max_wm
) {
1229 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1230 cursor_wm
, cursor
->max_wm
);
1234 if (!(display_wm
|| cursor_wm
)) {
1235 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1242 static bool g4x_compute_srwm(struct drm_device
*dev
,
1245 const struct intel_watermark_params
*display
,
1246 const struct intel_watermark_params
*cursor
,
1247 int *display_wm
, int *cursor_wm
)
1249 struct drm_crtc
*crtc
;
1250 const struct drm_display_mode
*adjusted_mode
;
1251 int hdisplay
, htotal
, pixel_size
, clock
;
1252 unsigned long line_time_us
;
1253 int line_count
, line_size
;
1258 *display_wm
= *cursor_wm
= 0;
1262 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1263 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1264 clock
= adjusted_mode
->crtc_clock
;
1265 htotal
= adjusted_mode
->crtc_htotal
;
1266 hdisplay
= to_intel_crtc(crtc
)->config
.pipe_src_w
;
1267 pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1269 line_time_us
= max(htotal
* 1000 / clock
, 1);
1270 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
1271 line_size
= hdisplay
* pixel_size
;
1273 /* Use the minimum of the small and large buffer method for primary */
1274 small
= ((clock
* pixel_size
/ 1000) * latency_ns
) / 1000;
1275 large
= line_count
* line_size
;
1277 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
1278 *display_wm
= entries
+ display
->guard_size
;
1280 /* calculate the self-refresh watermark for display cursor */
1281 entries
= line_count
* pixel_size
* to_intel_crtc(crtc
)->cursor_width
;
1282 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1283 *cursor_wm
= entries
+ cursor
->guard_size
;
1285 return g4x_check_srwm(dev
,
1286 *display_wm
, *cursor_wm
,
1290 static bool vlv_compute_drain_latency(struct drm_crtc
*crtc
,
1296 int clock
= to_intel_crtc(crtc
)->config
.adjusted_mode
.crtc_clock
;
1298 if (WARN(clock
== 0, "Pixel clock is zero!\n"))
1301 if (WARN(pixel_size
== 0, "Pixel size is zero!\n"))
1304 entries
= DIV_ROUND_UP(clock
, 1000) * pixel_size
;
1305 *prec_mult
= (entries
> 128) ? DRAIN_LATENCY_PRECISION_64
:
1306 DRAIN_LATENCY_PRECISION_32
;
1307 *drain_latency
= (64 * (*prec_mult
) * 4) / entries
;
1309 if (*drain_latency
> DRAIN_LATENCY_MASK
)
1310 *drain_latency
= DRAIN_LATENCY_MASK
;
1316 * Update drain latency registers of memory arbiter
1318 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1319 * to be programmed. Each plane has a drain latency multiplier and a drain
1323 static void vlv_update_drain_latency(struct drm_crtc
*crtc
)
1325 struct drm_i915_private
*dev_priv
= crtc
->dev
->dev_private
;
1326 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1329 enum pipe pipe
= intel_crtc
->pipe
;
1330 int plane_prec
, prec_mult
, plane_dl
;
1332 plane_dl
= I915_READ(VLV_DDL(pipe
)) & ~(DDL_PLANE_PRECISION_64
|
1333 DRAIN_LATENCY_MASK
| DDL_CURSOR_PRECISION_64
|
1334 (DRAIN_LATENCY_MASK
<< DDL_CURSOR_SHIFT
));
1336 if (!intel_crtc_active(crtc
)) {
1337 I915_WRITE(VLV_DDL(pipe
), plane_dl
);
1341 /* Primary plane Drain Latency */
1342 pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8; /* BPP */
1343 if (vlv_compute_drain_latency(crtc
, pixel_size
, &prec_mult
, &drain_latency
)) {
1344 plane_prec
= (prec_mult
== DRAIN_LATENCY_PRECISION_64
) ?
1345 DDL_PLANE_PRECISION_64
:
1346 DDL_PLANE_PRECISION_32
;
1347 plane_dl
|= plane_prec
| drain_latency
;
1350 /* Cursor Drain Latency
1351 * BPP is always 4 for cursor
1355 /* Program cursor DL only if it is enabled */
1356 if (intel_crtc
->cursor_base
&&
1357 vlv_compute_drain_latency(crtc
, pixel_size
, &prec_mult
, &drain_latency
)) {
1358 plane_prec
= (prec_mult
== DRAIN_LATENCY_PRECISION_64
) ?
1359 DDL_CURSOR_PRECISION_64
:
1360 DDL_CURSOR_PRECISION_32
;
1361 plane_dl
|= plane_prec
| (drain_latency
<< DDL_CURSOR_SHIFT
);
1364 I915_WRITE(VLV_DDL(pipe
), plane_dl
);
1367 #define single_plane_enabled(mask) is_power_of_2(mask)
1369 static void valleyview_update_wm(struct drm_crtc
*crtc
)
1371 struct drm_device
*dev
= crtc
->dev
;
1372 static const int sr_latency_ns
= 12000;
1373 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1374 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1375 int plane_sr
, cursor_sr
;
1376 int ignore_plane_sr
, ignore_cursor_sr
;
1377 unsigned int enabled
= 0;
1380 vlv_update_drain_latency(crtc
);
1382 if (g4x_compute_wm0(dev
, PIPE_A
,
1383 &valleyview_wm_info
, latency_ns
,
1384 &valleyview_cursor_wm_info
, latency_ns
,
1385 &planea_wm
, &cursora_wm
))
1386 enabled
|= 1 << PIPE_A
;
1388 if (g4x_compute_wm0(dev
, PIPE_B
,
1389 &valleyview_wm_info
, latency_ns
,
1390 &valleyview_cursor_wm_info
, latency_ns
,
1391 &planeb_wm
, &cursorb_wm
))
1392 enabled
|= 1 << PIPE_B
;
1394 if (single_plane_enabled(enabled
) &&
1395 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1397 &valleyview_wm_info
,
1398 &valleyview_cursor_wm_info
,
1399 &plane_sr
, &ignore_cursor_sr
) &&
1400 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1402 &valleyview_wm_info
,
1403 &valleyview_cursor_wm_info
,
1404 &ignore_plane_sr
, &cursor_sr
)) {
1405 cxsr_enabled
= true;
1407 cxsr_enabled
= false;
1408 intel_set_memory_cxsr(dev_priv
, false);
1409 plane_sr
= cursor_sr
= 0;
1412 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1413 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1414 planea_wm
, cursora_wm
,
1415 planeb_wm
, cursorb_wm
,
1416 plane_sr
, cursor_sr
);
1419 (plane_sr
<< DSPFW_SR_SHIFT
) |
1420 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1421 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1422 (planea_wm
<< DSPFW_PLANEA_SHIFT
));
1424 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1425 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1427 (I915_READ(DSPFW3
) & ~DSPFW_CURSOR_SR_MASK
) |
1428 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1431 intel_set_memory_cxsr(dev_priv
, true);
1434 static void cherryview_update_wm(struct drm_crtc
*crtc
)
1436 struct drm_device
*dev
= crtc
->dev
;
1437 static const int sr_latency_ns
= 12000;
1438 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1439 int planea_wm
, planeb_wm
, planec_wm
;
1440 int cursora_wm
, cursorb_wm
, cursorc_wm
;
1441 int plane_sr
, cursor_sr
;
1442 int ignore_plane_sr
, ignore_cursor_sr
;
1443 unsigned int enabled
= 0;
1446 vlv_update_drain_latency(crtc
);
1448 if (g4x_compute_wm0(dev
, PIPE_A
,
1449 &valleyview_wm_info
, latency_ns
,
1450 &valleyview_cursor_wm_info
, latency_ns
,
1451 &planea_wm
, &cursora_wm
))
1452 enabled
|= 1 << PIPE_A
;
1454 if (g4x_compute_wm0(dev
, PIPE_B
,
1455 &valleyview_wm_info
, latency_ns
,
1456 &valleyview_cursor_wm_info
, latency_ns
,
1457 &planeb_wm
, &cursorb_wm
))
1458 enabled
|= 1 << PIPE_B
;
1460 if (g4x_compute_wm0(dev
, PIPE_C
,
1461 &valleyview_wm_info
, latency_ns
,
1462 &valleyview_cursor_wm_info
, latency_ns
,
1463 &planec_wm
, &cursorc_wm
))
1464 enabled
|= 1 << PIPE_C
;
1466 if (single_plane_enabled(enabled
) &&
1467 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1469 &valleyview_wm_info
,
1470 &valleyview_cursor_wm_info
,
1471 &plane_sr
, &ignore_cursor_sr
) &&
1472 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1474 &valleyview_wm_info
,
1475 &valleyview_cursor_wm_info
,
1476 &ignore_plane_sr
, &cursor_sr
)) {
1477 cxsr_enabled
= true;
1479 cxsr_enabled
= false;
1480 intel_set_memory_cxsr(dev_priv
, false);
1481 plane_sr
= cursor_sr
= 0;
1484 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1485 "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
1486 "SR: plane=%d, cursor=%d\n",
1487 planea_wm
, cursora_wm
,
1488 planeb_wm
, cursorb_wm
,
1489 planec_wm
, cursorc_wm
,
1490 plane_sr
, cursor_sr
);
1493 (plane_sr
<< DSPFW_SR_SHIFT
) |
1494 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1495 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1496 (planea_wm
<< DSPFW_PLANEA_SHIFT
));
1498 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1499 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1501 (I915_READ(DSPFW3
) & ~DSPFW_CURSOR_SR_MASK
) |
1502 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1503 I915_WRITE(DSPFW9_CHV
,
1504 (I915_READ(DSPFW9_CHV
) & ~(DSPFW_PLANEC_MASK
|
1505 DSPFW_CURSORC_MASK
)) |
1506 (planec_wm
<< DSPFW_PLANEC_SHIFT
) |
1507 (cursorc_wm
<< DSPFW_CURSORC_SHIFT
));
1510 intel_set_memory_cxsr(dev_priv
, true);
1513 static void valleyview_update_sprite_wm(struct drm_plane
*plane
,
1514 struct drm_crtc
*crtc
,
1515 uint32_t sprite_width
,
1516 uint32_t sprite_height
,
1518 bool enabled
, bool scaled
)
1520 struct drm_device
*dev
= crtc
->dev
;
1521 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1522 int pipe
= to_intel_plane(plane
)->pipe
;
1523 int sprite
= to_intel_plane(plane
)->plane
;
1529 sprite_dl
= I915_READ(VLV_DDL(pipe
)) & ~(DDL_SPRITE_PRECISION_64(sprite
) |
1530 (DRAIN_LATENCY_MASK
<< DDL_SPRITE_SHIFT(sprite
)));
1532 if (enabled
&& vlv_compute_drain_latency(crtc
, pixel_size
, &prec_mult
,
1534 plane_prec
= (prec_mult
== DRAIN_LATENCY_PRECISION_64
) ?
1535 DDL_SPRITE_PRECISION_64(sprite
) :
1536 DDL_SPRITE_PRECISION_32(sprite
);
1537 sprite_dl
|= plane_prec
|
1538 (drain_latency
<< DDL_SPRITE_SHIFT(sprite
));
1541 I915_WRITE(VLV_DDL(pipe
), sprite_dl
);
1544 static void g4x_update_wm(struct drm_crtc
*crtc
)
1546 struct drm_device
*dev
= crtc
->dev
;
1547 static const int sr_latency_ns
= 12000;
1548 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1549 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1550 int plane_sr
, cursor_sr
;
1551 unsigned int enabled
= 0;
1554 if (g4x_compute_wm0(dev
, PIPE_A
,
1555 &g4x_wm_info
, latency_ns
,
1556 &g4x_cursor_wm_info
, latency_ns
,
1557 &planea_wm
, &cursora_wm
))
1558 enabled
|= 1 << PIPE_A
;
1560 if (g4x_compute_wm0(dev
, PIPE_B
,
1561 &g4x_wm_info
, latency_ns
,
1562 &g4x_cursor_wm_info
, latency_ns
,
1563 &planeb_wm
, &cursorb_wm
))
1564 enabled
|= 1 << PIPE_B
;
1566 if (single_plane_enabled(enabled
) &&
1567 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1570 &g4x_cursor_wm_info
,
1571 &plane_sr
, &cursor_sr
)) {
1572 cxsr_enabled
= true;
1574 cxsr_enabled
= false;
1575 intel_set_memory_cxsr(dev_priv
, false);
1576 plane_sr
= cursor_sr
= 0;
1579 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1580 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1581 planea_wm
, cursora_wm
,
1582 planeb_wm
, cursorb_wm
,
1583 plane_sr
, cursor_sr
);
1586 (plane_sr
<< DSPFW_SR_SHIFT
) |
1587 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1588 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1589 (planea_wm
<< DSPFW_PLANEA_SHIFT
));
1591 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1592 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1593 /* HPLL off in SR has some issues on G4x... disable it */
1595 (I915_READ(DSPFW3
) & ~(DSPFW_HPLL_SR_EN
| DSPFW_CURSOR_SR_MASK
)) |
1596 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1599 intel_set_memory_cxsr(dev_priv
, true);
1602 static void i965_update_wm(struct drm_crtc
*unused_crtc
)
1604 struct drm_device
*dev
= unused_crtc
->dev
;
1605 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1606 struct drm_crtc
*crtc
;
1611 /* Calc sr entries for one plane configs */
1612 crtc
= single_enabled_crtc(dev
);
1614 /* self-refresh has much higher latency */
1615 static const int sr_latency_ns
= 12000;
1616 const struct drm_display_mode
*adjusted_mode
=
1617 &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1618 int clock
= adjusted_mode
->crtc_clock
;
1619 int htotal
= adjusted_mode
->crtc_htotal
;
1620 int hdisplay
= to_intel_crtc(crtc
)->config
.pipe_src_w
;
1621 int pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1622 unsigned long line_time_us
;
1625 line_time_us
= max(htotal
* 1000 / clock
, 1);
1627 /* Use ns/us then divide to preserve precision */
1628 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1629 pixel_size
* hdisplay
;
1630 entries
= DIV_ROUND_UP(entries
, I915_FIFO_LINE_SIZE
);
1631 srwm
= I965_FIFO_SIZE
- entries
;
1635 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1638 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1639 pixel_size
* to_intel_crtc(crtc
)->cursor_width
;
1640 entries
= DIV_ROUND_UP(entries
,
1641 i965_cursor_wm_info
.cacheline_size
);
1642 cursor_sr
= i965_cursor_wm_info
.fifo_size
-
1643 (entries
+ i965_cursor_wm_info
.guard_size
);
1645 if (cursor_sr
> i965_cursor_wm_info
.max_wm
)
1646 cursor_sr
= i965_cursor_wm_info
.max_wm
;
1648 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1649 "cursor %d\n", srwm
, cursor_sr
);
1651 cxsr_enabled
= true;
1653 cxsr_enabled
= false;
1654 /* Turn off self refresh if both pipes are enabled */
1655 intel_set_memory_cxsr(dev_priv
, false);
1658 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1661 /* 965 has limitations... */
1662 I915_WRITE(DSPFW1
, (srwm
<< DSPFW_SR_SHIFT
) |
1663 (8 << DSPFW_CURSORB_SHIFT
) |
1664 (8 << DSPFW_PLANEB_SHIFT
) |
1665 (8 << DSPFW_PLANEA_SHIFT
));
1666 I915_WRITE(DSPFW2
, (8 << DSPFW_CURSORA_SHIFT
) |
1667 (8 << DSPFW_PLANEC_SHIFT_OLD
));
1668 /* update cursor SR watermark */
1669 I915_WRITE(DSPFW3
, (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1672 intel_set_memory_cxsr(dev_priv
, true);
1675 static void i9xx_update_wm(struct drm_crtc
*unused_crtc
)
1677 struct drm_device
*dev
= unused_crtc
->dev
;
1678 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1679 const struct intel_watermark_params
*wm_info
;
1684 int planea_wm
, planeb_wm
;
1685 struct drm_crtc
*crtc
, *enabled
= NULL
;
1688 wm_info
= &i945_wm_info
;
1689 else if (!IS_GEN2(dev
))
1690 wm_info
= &i915_wm_info
;
1692 wm_info
= &i830_wm_info
;
1694 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 0);
1695 crtc
= intel_get_crtc_for_plane(dev
, 0);
1696 if (intel_crtc_active(crtc
)) {
1697 const struct drm_display_mode
*adjusted_mode
;
1698 int cpp
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1702 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1703 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1704 wm_info
, fifo_size
, cpp
,
1708 planea_wm
= fifo_size
- wm_info
->guard_size
;
1710 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 1);
1711 crtc
= intel_get_crtc_for_plane(dev
, 1);
1712 if (intel_crtc_active(crtc
)) {
1713 const struct drm_display_mode
*adjusted_mode
;
1714 int cpp
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1718 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1719 planeb_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1720 wm_info
, fifo_size
, cpp
,
1722 if (enabled
== NULL
)
1727 planeb_wm
= fifo_size
- wm_info
->guard_size
;
1729 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm
, planeb_wm
);
1731 if (IS_I915GM(dev
) && enabled
) {
1732 struct drm_i915_gem_object
*obj
;
1734 obj
= intel_fb_obj(enabled
->primary
->fb
);
1736 /* self-refresh seems busted with untiled */
1737 if (obj
->tiling_mode
== I915_TILING_NONE
)
1742 * Overlay gets an aggressive default since video jitter is bad.
1746 /* Play safe and disable self-refresh before adjusting watermarks. */
1747 intel_set_memory_cxsr(dev_priv
, false);
1749 /* Calc sr entries for one plane configs */
1750 if (HAS_FW_BLC(dev
) && enabled
) {
1751 /* self-refresh has much higher latency */
1752 static const int sr_latency_ns
= 6000;
1753 const struct drm_display_mode
*adjusted_mode
=
1754 &to_intel_crtc(enabled
)->config
.adjusted_mode
;
1755 int clock
= adjusted_mode
->crtc_clock
;
1756 int htotal
= adjusted_mode
->crtc_htotal
;
1757 int hdisplay
= to_intel_crtc(enabled
)->config
.pipe_src_w
;
1758 int pixel_size
= enabled
->primary
->fb
->bits_per_pixel
/ 8;
1759 unsigned long line_time_us
;
1762 line_time_us
= max(htotal
* 1000 / clock
, 1);
1764 /* Use ns/us then divide to preserve precision */
1765 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1766 pixel_size
* hdisplay
;
1767 entries
= DIV_ROUND_UP(entries
, wm_info
->cacheline_size
);
1768 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries
);
1769 srwm
= wm_info
->fifo_size
- entries
;
1773 if (IS_I945G(dev
) || IS_I945GM(dev
))
1774 I915_WRITE(FW_BLC_SELF
,
1775 FW_BLC_SELF_FIFO_MASK
| (srwm
& 0xff));
1776 else if (IS_I915GM(dev
))
1777 I915_WRITE(FW_BLC_SELF
, srwm
& 0x3f);
1780 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1781 planea_wm
, planeb_wm
, cwm
, srwm
);
1783 fwater_lo
= ((planeb_wm
& 0x3f) << 16) | (planea_wm
& 0x3f);
1784 fwater_hi
= (cwm
& 0x1f);
1786 /* Set request length to 8 cachelines per fetch */
1787 fwater_lo
= fwater_lo
| (1 << 24) | (1 << 8);
1788 fwater_hi
= fwater_hi
| (1 << 8);
1790 I915_WRITE(FW_BLC
, fwater_lo
);
1791 I915_WRITE(FW_BLC2
, fwater_hi
);
1794 intel_set_memory_cxsr(dev_priv
, true);
1797 static void i845_update_wm(struct drm_crtc
*unused_crtc
)
1799 struct drm_device
*dev
= unused_crtc
->dev
;
1800 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1801 struct drm_crtc
*crtc
;
1802 const struct drm_display_mode
*adjusted_mode
;
1806 crtc
= single_enabled_crtc(dev
);
1810 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1811 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1813 dev_priv
->display
.get_fifo_size(dev
, 0),
1815 fwater_lo
= I915_READ(FW_BLC
) & ~0xfff;
1816 fwater_lo
|= (3<<8) | planea_wm
;
1818 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm
);
1820 I915_WRITE(FW_BLC
, fwater_lo
);
1823 static uint32_t ilk_pipe_pixel_rate(struct drm_device
*dev
,
1824 struct drm_crtc
*crtc
)
1826 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1827 uint32_t pixel_rate
;
1829 pixel_rate
= intel_crtc
->config
.adjusted_mode
.crtc_clock
;
1831 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1832 * adjust the pixel_rate here. */
1834 if (intel_crtc
->config
.pch_pfit
.enabled
) {
1835 uint64_t pipe_w
, pipe_h
, pfit_w
, pfit_h
;
1836 uint32_t pfit_size
= intel_crtc
->config
.pch_pfit
.size
;
1838 pipe_w
= intel_crtc
->config
.pipe_src_w
;
1839 pipe_h
= intel_crtc
->config
.pipe_src_h
;
1840 pfit_w
= (pfit_size
>> 16) & 0xFFFF;
1841 pfit_h
= pfit_size
& 0xFFFF;
1842 if (pipe_w
< pfit_w
)
1844 if (pipe_h
< pfit_h
)
1847 pixel_rate
= div_u64((uint64_t) pixel_rate
* pipe_w
* pipe_h
,
1854 /* latency must be in 0.1us units. */
1855 static uint32_t ilk_wm_method1(uint32_t pixel_rate
, uint8_t bytes_per_pixel
,
1860 if (WARN(latency
== 0, "Latency value missing\n"))
1863 ret
= (uint64_t) pixel_rate
* bytes_per_pixel
* latency
;
1864 ret
= DIV_ROUND_UP_ULL(ret
, 64 * 10000) + 2;
1869 /* latency must be in 0.1us units. */
1870 static uint32_t ilk_wm_method2(uint32_t pixel_rate
, uint32_t pipe_htotal
,
1871 uint32_t horiz_pixels
, uint8_t bytes_per_pixel
,
1876 if (WARN(latency
== 0, "Latency value missing\n"))
1879 ret
= (latency
* pixel_rate
) / (pipe_htotal
* 10000);
1880 ret
= (ret
+ 1) * horiz_pixels
* bytes_per_pixel
;
1881 ret
= DIV_ROUND_UP(ret
, 64) + 2;
1885 static uint32_t ilk_wm_fbc(uint32_t pri_val
, uint32_t horiz_pixels
,
1886 uint8_t bytes_per_pixel
)
1888 return DIV_ROUND_UP(pri_val
* 64, horiz_pixels
* bytes_per_pixel
) + 2;
1891 struct ilk_pipe_wm_parameters
{
1893 uint32_t pipe_htotal
;
1894 uint32_t pixel_rate
;
1895 struct intel_plane_wm_parameters pri
;
1896 struct intel_plane_wm_parameters spr
;
1897 struct intel_plane_wm_parameters cur
;
1900 struct ilk_wm_maximums
{
1907 /* used in computing the new watermarks state */
1908 struct intel_wm_config
{
1909 unsigned int num_pipes_active
;
1910 bool sprites_enabled
;
1911 bool sprites_scaled
;
1915 * For both WM_PIPE and WM_LP.
1916 * mem_value must be in 0.1us units.
1918 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters
*params
,
1922 uint32_t method1
, method2
;
1924 if (!params
->active
|| !params
->pri
.enabled
)
1927 method1
= ilk_wm_method1(params
->pixel_rate
,
1928 params
->pri
.bytes_per_pixel
,
1934 method2
= ilk_wm_method2(params
->pixel_rate
,
1935 params
->pipe_htotal
,
1936 params
->pri
.horiz_pixels
,
1937 params
->pri
.bytes_per_pixel
,
1940 return min(method1
, method2
);
1944 * For both WM_PIPE and WM_LP.
1945 * mem_value must be in 0.1us units.
1947 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters
*params
,
1950 uint32_t method1
, method2
;
1952 if (!params
->active
|| !params
->spr
.enabled
)
1955 method1
= ilk_wm_method1(params
->pixel_rate
,
1956 params
->spr
.bytes_per_pixel
,
1958 method2
= ilk_wm_method2(params
->pixel_rate
,
1959 params
->pipe_htotal
,
1960 params
->spr
.horiz_pixels
,
1961 params
->spr
.bytes_per_pixel
,
1963 return min(method1
, method2
);
1967 * For both WM_PIPE and WM_LP.
1968 * mem_value must be in 0.1us units.
1970 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters
*params
,
1973 if (!params
->active
|| !params
->cur
.enabled
)
1976 return ilk_wm_method2(params
->pixel_rate
,
1977 params
->pipe_htotal
,
1978 params
->cur
.horiz_pixels
,
1979 params
->cur
.bytes_per_pixel
,
1983 /* Only for WM_LP. */
1984 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters
*params
,
1987 if (!params
->active
|| !params
->pri
.enabled
)
1990 return ilk_wm_fbc(pri_val
,
1991 params
->pri
.horiz_pixels
,
1992 params
->pri
.bytes_per_pixel
);
1995 static unsigned int ilk_display_fifo_size(const struct drm_device
*dev
)
1997 if (INTEL_INFO(dev
)->gen
>= 8)
1999 else if (INTEL_INFO(dev
)->gen
>= 7)
2005 static unsigned int ilk_plane_wm_reg_max(const struct drm_device
*dev
,
2006 int level
, bool is_sprite
)
2008 if (INTEL_INFO(dev
)->gen
>= 8)
2009 /* BDW primary/sprite plane watermarks */
2010 return level
== 0 ? 255 : 2047;
2011 else if (INTEL_INFO(dev
)->gen
>= 7)
2012 /* IVB/HSW primary/sprite plane watermarks */
2013 return level
== 0 ? 127 : 1023;
2014 else if (!is_sprite
)
2015 /* ILK/SNB primary plane watermarks */
2016 return level
== 0 ? 127 : 511;
2018 /* ILK/SNB sprite plane watermarks */
2019 return level
== 0 ? 63 : 255;
2022 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device
*dev
,
2025 if (INTEL_INFO(dev
)->gen
>= 7)
2026 return level
== 0 ? 63 : 255;
2028 return level
== 0 ? 31 : 63;
2031 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device
*dev
)
2033 if (INTEL_INFO(dev
)->gen
>= 8)
2039 /* Calculate the maximum primary/sprite plane watermark */
2040 static unsigned int ilk_plane_wm_max(const struct drm_device
*dev
,
2042 const struct intel_wm_config
*config
,
2043 enum intel_ddb_partitioning ddb_partitioning
,
2046 unsigned int fifo_size
= ilk_display_fifo_size(dev
);
2048 /* if sprites aren't enabled, sprites get nothing */
2049 if (is_sprite
&& !config
->sprites_enabled
)
2052 /* HSW allows LP1+ watermarks even with multiple pipes */
2053 if (level
== 0 || config
->num_pipes_active
> 1) {
2054 fifo_size
/= INTEL_INFO(dev
)->num_pipes
;
2057 * For some reason the non self refresh
2058 * FIFO size is only half of the self
2059 * refresh FIFO size on ILK/SNB.
2061 if (INTEL_INFO(dev
)->gen
<= 6)
2065 if (config
->sprites_enabled
) {
2066 /* level 0 is always calculated with 1:1 split */
2067 if (level
> 0 && ddb_partitioning
== INTEL_DDB_PART_5_6
) {
2076 /* clamp to max that the registers can hold */
2077 return min(fifo_size
, ilk_plane_wm_reg_max(dev
, level
, is_sprite
));
2080 /* Calculate the maximum cursor plane watermark */
2081 static unsigned int ilk_cursor_wm_max(const struct drm_device
*dev
,
2083 const struct intel_wm_config
*config
)
2085 /* HSW LP1+ watermarks w/ multiple pipes */
2086 if (level
> 0 && config
->num_pipes_active
> 1)
2089 /* otherwise just report max that registers can hold */
2090 return ilk_cursor_wm_reg_max(dev
, level
);
2093 static void ilk_compute_wm_maximums(const struct drm_device
*dev
,
2095 const struct intel_wm_config
*config
,
2096 enum intel_ddb_partitioning ddb_partitioning
,
2097 struct ilk_wm_maximums
*max
)
2099 max
->pri
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, false);
2100 max
->spr
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, true);
2101 max
->cur
= ilk_cursor_wm_max(dev
, level
, config
);
2102 max
->fbc
= ilk_fbc_wm_reg_max(dev
);
2105 static void ilk_compute_wm_reg_maximums(struct drm_device
*dev
,
2107 struct ilk_wm_maximums
*max
)
2109 max
->pri
= ilk_plane_wm_reg_max(dev
, level
, false);
2110 max
->spr
= ilk_plane_wm_reg_max(dev
, level
, true);
2111 max
->cur
= ilk_cursor_wm_reg_max(dev
, level
);
2112 max
->fbc
= ilk_fbc_wm_reg_max(dev
);
2115 static bool ilk_validate_wm_level(int level
,
2116 const struct ilk_wm_maximums
*max
,
2117 struct intel_wm_level
*result
)
2121 /* already determined to be invalid? */
2122 if (!result
->enable
)
2125 result
->enable
= result
->pri_val
<= max
->pri
&&
2126 result
->spr_val
<= max
->spr
&&
2127 result
->cur_val
<= max
->cur
;
2129 ret
= result
->enable
;
2132 * HACK until we can pre-compute everything,
2133 * and thus fail gracefully if LP0 watermarks
2136 if (level
== 0 && !result
->enable
) {
2137 if (result
->pri_val
> max
->pri
)
2138 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2139 level
, result
->pri_val
, max
->pri
);
2140 if (result
->spr_val
> max
->spr
)
2141 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2142 level
, result
->spr_val
, max
->spr
);
2143 if (result
->cur_val
> max
->cur
)
2144 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2145 level
, result
->cur_val
, max
->cur
);
2147 result
->pri_val
= min_t(uint32_t, result
->pri_val
, max
->pri
);
2148 result
->spr_val
= min_t(uint32_t, result
->spr_val
, max
->spr
);
2149 result
->cur_val
= min_t(uint32_t, result
->cur_val
, max
->cur
);
2150 result
->enable
= true;
2156 static void ilk_compute_wm_level(const struct drm_i915_private
*dev_priv
,
2158 const struct ilk_pipe_wm_parameters
*p
,
2159 struct intel_wm_level
*result
)
2161 uint16_t pri_latency
= dev_priv
->wm
.pri_latency
[level
];
2162 uint16_t spr_latency
= dev_priv
->wm
.spr_latency
[level
];
2163 uint16_t cur_latency
= dev_priv
->wm
.cur_latency
[level
];
2165 /* WM1+ latency values stored in 0.5us units */
2172 result
->pri_val
= ilk_compute_pri_wm(p
, pri_latency
, level
);
2173 result
->spr_val
= ilk_compute_spr_wm(p
, spr_latency
);
2174 result
->cur_val
= ilk_compute_cur_wm(p
, cur_latency
);
2175 result
->fbc_val
= ilk_compute_fbc_wm(p
, result
->pri_val
);
2176 result
->enable
= true;
2180 hsw_compute_linetime_wm(struct drm_device
*dev
, struct drm_crtc
*crtc
)
2182 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2183 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2184 struct drm_display_mode
*mode
= &intel_crtc
->config
.adjusted_mode
;
2185 u32 linetime
, ips_linetime
;
2187 if (!intel_crtc_active(crtc
))
2190 /* The WM are computed with base on how long it takes to fill a single
2191 * row at the given clock rate, multiplied by 8.
2193 linetime
= DIV_ROUND_CLOSEST(mode
->crtc_htotal
* 1000 * 8,
2195 ips_linetime
= DIV_ROUND_CLOSEST(mode
->crtc_htotal
* 1000 * 8,
2196 intel_ddi_get_cdclk_freq(dev_priv
));
2198 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime
) |
2199 PIPE_WM_LINETIME_TIME(linetime
);
2202 static void intel_read_wm_latency(struct drm_device
*dev
, uint16_t wm
[5])
2204 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2206 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
2207 uint64_t sskpd
= I915_READ64(MCH_SSKPD
);
2209 wm
[0] = (sskpd
>> 56) & 0xFF;
2211 wm
[0] = sskpd
& 0xF;
2212 wm
[1] = (sskpd
>> 4) & 0xFF;
2213 wm
[2] = (sskpd
>> 12) & 0xFF;
2214 wm
[3] = (sskpd
>> 20) & 0x1FF;
2215 wm
[4] = (sskpd
>> 32) & 0x1FF;
2216 } else if (INTEL_INFO(dev
)->gen
>= 6) {
2217 uint32_t sskpd
= I915_READ(MCH_SSKPD
);
2219 wm
[0] = (sskpd
>> SSKPD_WM0_SHIFT
) & SSKPD_WM_MASK
;
2220 wm
[1] = (sskpd
>> SSKPD_WM1_SHIFT
) & SSKPD_WM_MASK
;
2221 wm
[2] = (sskpd
>> SSKPD_WM2_SHIFT
) & SSKPD_WM_MASK
;
2222 wm
[3] = (sskpd
>> SSKPD_WM3_SHIFT
) & SSKPD_WM_MASK
;
2223 } else if (INTEL_INFO(dev
)->gen
>= 5) {
2224 uint32_t mltr
= I915_READ(MLTR_ILK
);
2226 /* ILK primary LP0 latency is 700 ns */
2228 wm
[1] = (mltr
>> MLTR_WM1_SHIFT
) & ILK_SRLT_MASK
;
2229 wm
[2] = (mltr
>> MLTR_WM2_SHIFT
) & ILK_SRLT_MASK
;
2233 static void intel_fixup_spr_wm_latency(struct drm_device
*dev
, uint16_t wm
[5])
2235 /* ILK sprite LP0 latency is 1300 ns */
2236 if (INTEL_INFO(dev
)->gen
== 5)
2240 static void intel_fixup_cur_wm_latency(struct drm_device
*dev
, uint16_t wm
[5])
2242 /* ILK cursor LP0 latency is 1300 ns */
2243 if (INTEL_INFO(dev
)->gen
== 5)
2246 /* WaDoubleCursorLP3Latency:ivb */
2247 if (IS_IVYBRIDGE(dev
))
2251 int ilk_wm_max_level(const struct drm_device
*dev
)
2253 /* how many WM levels are we expecting */
2254 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2256 else if (INTEL_INFO(dev
)->gen
>= 6)
2261 static void intel_print_wm_latency(struct drm_device
*dev
,
2263 const uint16_t wm
[5])
2265 int level
, max_level
= ilk_wm_max_level(dev
);
2267 for (level
= 0; level
<= max_level
; level
++) {
2268 unsigned int latency
= wm
[level
];
2271 DRM_ERROR("%s WM%d latency not provided\n",
2276 /* WM1+ latency values in 0.5us units */
2280 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2281 name
, level
, wm
[level
],
2282 latency
/ 10, latency
% 10);
2286 static bool ilk_increase_wm_latency(struct drm_i915_private
*dev_priv
,
2287 uint16_t wm
[5], uint16_t min
)
2289 int level
, max_level
= ilk_wm_max_level(dev_priv
->dev
);
2294 wm
[0] = max(wm
[0], min
);
2295 for (level
= 1; level
<= max_level
; level
++)
2296 wm
[level
] = max_t(uint16_t, wm
[level
], DIV_ROUND_UP(min
, 5));
2301 static void snb_wm_latency_quirk(struct drm_device
*dev
)
2303 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2307 * The BIOS provided WM memory latency values are often
2308 * inadequate for high resolution displays. Adjust them.
2310 changed
= ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
, 12) |
2311 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
, 12) |
2312 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
, 12);
2317 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2318 intel_print_wm_latency(dev
, "Primary", dev_priv
->wm
.pri_latency
);
2319 intel_print_wm_latency(dev
, "Sprite", dev_priv
->wm
.spr_latency
);
2320 intel_print_wm_latency(dev
, "Cursor", dev_priv
->wm
.cur_latency
);
2323 static void ilk_setup_wm_latency(struct drm_device
*dev
)
2325 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2327 intel_read_wm_latency(dev
, dev_priv
->wm
.pri_latency
);
2329 memcpy(dev_priv
->wm
.spr_latency
, dev_priv
->wm
.pri_latency
,
2330 sizeof(dev_priv
->wm
.pri_latency
));
2331 memcpy(dev_priv
->wm
.cur_latency
, dev_priv
->wm
.pri_latency
,
2332 sizeof(dev_priv
->wm
.pri_latency
));
2334 intel_fixup_spr_wm_latency(dev
, dev_priv
->wm
.spr_latency
);
2335 intel_fixup_cur_wm_latency(dev
, dev_priv
->wm
.cur_latency
);
2337 intel_print_wm_latency(dev
, "Primary", dev_priv
->wm
.pri_latency
);
2338 intel_print_wm_latency(dev
, "Sprite", dev_priv
->wm
.spr_latency
);
2339 intel_print_wm_latency(dev
, "Cursor", dev_priv
->wm
.cur_latency
);
2342 snb_wm_latency_quirk(dev
);
2345 static void ilk_compute_wm_parameters(struct drm_crtc
*crtc
,
2346 struct ilk_pipe_wm_parameters
*p
)
2348 struct drm_device
*dev
= crtc
->dev
;
2349 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2350 enum pipe pipe
= intel_crtc
->pipe
;
2351 struct drm_plane
*plane
;
2353 if (!intel_crtc_active(crtc
))
2357 p
->pipe_htotal
= intel_crtc
->config
.adjusted_mode
.crtc_htotal
;
2358 p
->pixel_rate
= ilk_pipe_pixel_rate(dev
, crtc
);
2359 p
->pri
.bytes_per_pixel
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
2360 p
->cur
.bytes_per_pixel
= 4;
2361 p
->pri
.horiz_pixels
= intel_crtc
->config
.pipe_src_w
;
2362 p
->cur
.horiz_pixels
= intel_crtc
->cursor_width
;
2363 /* TODO: for now, assume primary and cursor planes are always enabled. */
2364 p
->pri
.enabled
= true;
2365 p
->cur
.enabled
= true;
2367 drm_for_each_legacy_plane(plane
, &dev
->mode_config
.plane_list
) {
2368 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
2370 if (intel_plane
->pipe
== pipe
) {
2371 p
->spr
= intel_plane
->wm
;
2377 static void ilk_compute_wm_config(struct drm_device
*dev
,
2378 struct intel_wm_config
*config
)
2380 struct intel_crtc
*intel_crtc
;
2382 /* Compute the currently _active_ config */
2383 for_each_intel_crtc(dev
, intel_crtc
) {
2384 const struct intel_pipe_wm
*wm
= &intel_crtc
->wm
.active
;
2386 if (!wm
->pipe_enabled
)
2389 config
->sprites_enabled
|= wm
->sprites_enabled
;
2390 config
->sprites_scaled
|= wm
->sprites_scaled
;
2391 config
->num_pipes_active
++;
2395 /* Compute new watermarks for the pipe */
2396 static bool intel_compute_pipe_wm(struct drm_crtc
*crtc
,
2397 const struct ilk_pipe_wm_parameters
*params
,
2398 struct intel_pipe_wm
*pipe_wm
)
2400 struct drm_device
*dev
= crtc
->dev
;
2401 const struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2402 int level
, max_level
= ilk_wm_max_level(dev
);
2403 /* LP0 watermark maximums depend on this pipe alone */
2404 struct intel_wm_config config
= {
2405 .num_pipes_active
= 1,
2406 .sprites_enabled
= params
->spr
.enabled
,
2407 .sprites_scaled
= params
->spr
.scaled
,
2409 struct ilk_wm_maximums max
;
2411 pipe_wm
->pipe_enabled
= params
->active
;
2412 pipe_wm
->sprites_enabled
= params
->spr
.enabled
;
2413 pipe_wm
->sprites_scaled
= params
->spr
.scaled
;
2415 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2416 if (INTEL_INFO(dev
)->gen
<= 6 && params
->spr
.enabled
)
2419 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2420 if (params
->spr
.scaled
)
2423 ilk_compute_wm_level(dev_priv
, 0, params
, &pipe_wm
->wm
[0]);
2425 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2426 pipe_wm
->linetime
= hsw_compute_linetime_wm(dev
, crtc
);
2428 /* LP0 watermarks always use 1/2 DDB partitioning */
2429 ilk_compute_wm_maximums(dev
, 0, &config
, INTEL_DDB_PART_1_2
, &max
);
2431 /* At least LP0 must be valid */
2432 if (!ilk_validate_wm_level(0, &max
, &pipe_wm
->wm
[0]))
2435 ilk_compute_wm_reg_maximums(dev
, 1, &max
);
2437 for (level
= 1; level
<= max_level
; level
++) {
2438 struct intel_wm_level wm
= {};
2440 ilk_compute_wm_level(dev_priv
, level
, params
, &wm
);
2443 * Disable any watermark level that exceeds the
2444 * register maximums since such watermarks are
2447 if (!ilk_validate_wm_level(level
, &max
, &wm
))
2450 pipe_wm
->wm
[level
] = wm
;
2457 * Merge the watermarks from all active pipes for a specific level.
2459 static void ilk_merge_wm_level(struct drm_device
*dev
,
2461 struct intel_wm_level
*ret_wm
)
2463 const struct intel_crtc
*intel_crtc
;
2465 ret_wm
->enable
= true;
2467 for_each_intel_crtc(dev
, intel_crtc
) {
2468 const struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
;
2469 const struct intel_wm_level
*wm
= &active
->wm
[level
];
2471 if (!active
->pipe_enabled
)
2475 * The watermark values may have been used in the past,
2476 * so we must maintain them in the registers for some
2477 * time even if the level is now disabled.
2480 ret_wm
->enable
= false;
2482 ret_wm
->pri_val
= max(ret_wm
->pri_val
, wm
->pri_val
);
2483 ret_wm
->spr_val
= max(ret_wm
->spr_val
, wm
->spr_val
);
2484 ret_wm
->cur_val
= max(ret_wm
->cur_val
, wm
->cur_val
);
2485 ret_wm
->fbc_val
= max(ret_wm
->fbc_val
, wm
->fbc_val
);
2490 * Merge all low power watermarks for all active pipes.
2492 static void ilk_wm_merge(struct drm_device
*dev
,
2493 const struct intel_wm_config
*config
,
2494 const struct ilk_wm_maximums
*max
,
2495 struct intel_pipe_wm
*merged
)
2497 int level
, max_level
= ilk_wm_max_level(dev
);
2498 int last_enabled_level
= max_level
;
2500 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2501 if ((INTEL_INFO(dev
)->gen
<= 6 || IS_IVYBRIDGE(dev
)) &&
2502 config
->num_pipes_active
> 1)
2505 /* ILK: FBC WM must be disabled always */
2506 merged
->fbc_wm_enabled
= INTEL_INFO(dev
)->gen
>= 6;
2508 /* merge each WM1+ level */
2509 for (level
= 1; level
<= max_level
; level
++) {
2510 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2512 ilk_merge_wm_level(dev
, level
, wm
);
2514 if (level
> last_enabled_level
)
2516 else if (!ilk_validate_wm_level(level
, max
, wm
))
2517 /* make sure all following levels get disabled */
2518 last_enabled_level
= level
- 1;
2521 * The spec says it is preferred to disable
2522 * FBC WMs instead of disabling a WM level.
2524 if (wm
->fbc_val
> max
->fbc
) {
2526 merged
->fbc_wm_enabled
= false;
2531 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2533 * FIXME this is racy. FBC might get enabled later.
2534 * What we should check here is whether FBC can be
2535 * enabled sometime later.
2537 if (IS_GEN5(dev
) && !merged
->fbc_wm_enabled
&& intel_fbc_enabled(dev
)) {
2538 for (level
= 2; level
<= max_level
; level
++) {
2539 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2546 static int ilk_wm_lp_to_level(int wm_lp
, const struct intel_pipe_wm
*pipe_wm
)
2548 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2549 return wm_lp
+ (wm_lp
>= 2 && pipe_wm
->wm
[4].enable
);
2552 /* The value we need to program into the WM_LPx latency field */
2553 static unsigned int ilk_wm_lp_latency(struct drm_device
*dev
, int level
)
2555 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2557 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2560 return dev_priv
->wm
.pri_latency
[level
];
2563 static void ilk_compute_wm_results(struct drm_device
*dev
,
2564 const struct intel_pipe_wm
*merged
,
2565 enum intel_ddb_partitioning partitioning
,
2566 struct ilk_wm_values
*results
)
2568 struct intel_crtc
*intel_crtc
;
2571 results
->enable_fbc_wm
= merged
->fbc_wm_enabled
;
2572 results
->partitioning
= partitioning
;
2574 /* LP1+ register values */
2575 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2576 const struct intel_wm_level
*r
;
2578 level
= ilk_wm_lp_to_level(wm_lp
, merged
);
2580 r
= &merged
->wm
[level
];
2583 * Maintain the watermark values even if the level is
2584 * disabled. Doing otherwise could cause underruns.
2586 results
->wm_lp
[wm_lp
- 1] =
2587 (ilk_wm_lp_latency(dev
, level
) << WM1_LP_LATENCY_SHIFT
) |
2588 (r
->pri_val
<< WM1_LP_SR_SHIFT
) |
2592 results
->wm_lp
[wm_lp
- 1] |= WM1_LP_SR_EN
;
2594 if (INTEL_INFO(dev
)->gen
>= 8)
2595 results
->wm_lp
[wm_lp
- 1] |=
2596 r
->fbc_val
<< WM1_LP_FBC_SHIFT_BDW
;
2598 results
->wm_lp
[wm_lp
- 1] |=
2599 r
->fbc_val
<< WM1_LP_FBC_SHIFT
;
2602 * Always set WM1S_LP_EN when spr_val != 0, even if the
2603 * level is disabled. Doing otherwise could cause underruns.
2605 if (INTEL_INFO(dev
)->gen
<= 6 && r
->spr_val
) {
2606 WARN_ON(wm_lp
!= 1);
2607 results
->wm_lp_spr
[wm_lp
- 1] = WM1S_LP_EN
| r
->spr_val
;
2609 results
->wm_lp_spr
[wm_lp
- 1] = r
->spr_val
;
2612 /* LP0 register values */
2613 for_each_intel_crtc(dev
, intel_crtc
) {
2614 enum pipe pipe
= intel_crtc
->pipe
;
2615 const struct intel_wm_level
*r
=
2616 &intel_crtc
->wm
.active
.wm
[0];
2618 if (WARN_ON(!r
->enable
))
2621 results
->wm_linetime
[pipe
] = intel_crtc
->wm
.active
.linetime
;
2623 results
->wm_pipe
[pipe
] =
2624 (r
->pri_val
<< WM0_PIPE_PLANE_SHIFT
) |
2625 (r
->spr_val
<< WM0_PIPE_SPRITE_SHIFT
) |
2630 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2631 * case both are at the same level. Prefer r1 in case they're the same. */
2632 static struct intel_pipe_wm
*ilk_find_best_result(struct drm_device
*dev
,
2633 struct intel_pipe_wm
*r1
,
2634 struct intel_pipe_wm
*r2
)
2636 int level
, max_level
= ilk_wm_max_level(dev
);
2637 int level1
= 0, level2
= 0;
2639 for (level
= 1; level
<= max_level
; level
++) {
2640 if (r1
->wm
[level
].enable
)
2642 if (r2
->wm
[level
].enable
)
2646 if (level1
== level2
) {
2647 if (r2
->fbc_wm_enabled
&& !r1
->fbc_wm_enabled
)
2651 } else if (level1
> level2
) {
2658 /* dirty bits used to track which watermarks need changes */
2659 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2660 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2661 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2662 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2663 #define WM_DIRTY_FBC (1 << 24)
2664 #define WM_DIRTY_DDB (1 << 25)
2666 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private
*dev_priv
,
2667 const struct ilk_wm_values
*old
,
2668 const struct ilk_wm_values
*new)
2670 unsigned int dirty
= 0;
2674 for_each_pipe(dev_priv
, pipe
) {
2675 if (old
->wm_linetime
[pipe
] != new->wm_linetime
[pipe
]) {
2676 dirty
|= WM_DIRTY_LINETIME(pipe
);
2677 /* Must disable LP1+ watermarks too */
2678 dirty
|= WM_DIRTY_LP_ALL
;
2681 if (old
->wm_pipe
[pipe
] != new->wm_pipe
[pipe
]) {
2682 dirty
|= WM_DIRTY_PIPE(pipe
);
2683 /* Must disable LP1+ watermarks too */
2684 dirty
|= WM_DIRTY_LP_ALL
;
2688 if (old
->enable_fbc_wm
!= new->enable_fbc_wm
) {
2689 dirty
|= WM_DIRTY_FBC
;
2690 /* Must disable LP1+ watermarks too */
2691 dirty
|= WM_DIRTY_LP_ALL
;
2694 if (old
->partitioning
!= new->partitioning
) {
2695 dirty
|= WM_DIRTY_DDB
;
2696 /* Must disable LP1+ watermarks too */
2697 dirty
|= WM_DIRTY_LP_ALL
;
2700 /* LP1+ watermarks already deemed dirty, no need to continue */
2701 if (dirty
& WM_DIRTY_LP_ALL
)
2704 /* Find the lowest numbered LP1+ watermark in need of an update... */
2705 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2706 if (old
->wm_lp
[wm_lp
- 1] != new->wm_lp
[wm_lp
- 1] ||
2707 old
->wm_lp_spr
[wm_lp
- 1] != new->wm_lp_spr
[wm_lp
- 1])
2711 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2712 for (; wm_lp
<= 3; wm_lp
++)
2713 dirty
|= WM_DIRTY_LP(wm_lp
);
2718 static bool _ilk_disable_lp_wm(struct drm_i915_private
*dev_priv
,
2721 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2722 bool changed
= false;
2724 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] & WM1_LP_SR_EN
) {
2725 previous
->wm_lp
[2] &= ~WM1_LP_SR_EN
;
2726 I915_WRITE(WM3_LP_ILK
, previous
->wm_lp
[2]);
2729 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] & WM1_LP_SR_EN
) {
2730 previous
->wm_lp
[1] &= ~WM1_LP_SR_EN
;
2731 I915_WRITE(WM2_LP_ILK
, previous
->wm_lp
[1]);
2734 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] & WM1_LP_SR_EN
) {
2735 previous
->wm_lp
[0] &= ~WM1_LP_SR_EN
;
2736 I915_WRITE(WM1_LP_ILK
, previous
->wm_lp
[0]);
2741 * Don't touch WM1S_LP_EN here.
2742 * Doing so could cause underruns.
2749 * The spec says we shouldn't write when we don't need, because every write
2750 * causes WMs to be re-evaluated, expending some power.
2752 static void ilk_write_wm_values(struct drm_i915_private
*dev_priv
,
2753 struct ilk_wm_values
*results
)
2755 struct drm_device
*dev
= dev_priv
->dev
;
2756 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2760 dirty
= ilk_compute_wm_dirty(dev_priv
, previous
, results
);
2764 _ilk_disable_lp_wm(dev_priv
, dirty
);
2766 if (dirty
& WM_DIRTY_PIPE(PIPE_A
))
2767 I915_WRITE(WM0_PIPEA_ILK
, results
->wm_pipe
[0]);
2768 if (dirty
& WM_DIRTY_PIPE(PIPE_B
))
2769 I915_WRITE(WM0_PIPEB_ILK
, results
->wm_pipe
[1]);
2770 if (dirty
& WM_DIRTY_PIPE(PIPE_C
))
2771 I915_WRITE(WM0_PIPEC_IVB
, results
->wm_pipe
[2]);
2773 if (dirty
& WM_DIRTY_LINETIME(PIPE_A
))
2774 I915_WRITE(PIPE_WM_LINETIME(PIPE_A
), results
->wm_linetime
[0]);
2775 if (dirty
& WM_DIRTY_LINETIME(PIPE_B
))
2776 I915_WRITE(PIPE_WM_LINETIME(PIPE_B
), results
->wm_linetime
[1]);
2777 if (dirty
& WM_DIRTY_LINETIME(PIPE_C
))
2778 I915_WRITE(PIPE_WM_LINETIME(PIPE_C
), results
->wm_linetime
[2]);
2780 if (dirty
& WM_DIRTY_DDB
) {
2781 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
2782 val
= I915_READ(WM_MISC
);
2783 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
2784 val
&= ~WM_MISC_DATA_PARTITION_5_6
;
2786 val
|= WM_MISC_DATA_PARTITION_5_6
;
2787 I915_WRITE(WM_MISC
, val
);
2789 val
= I915_READ(DISP_ARB_CTL2
);
2790 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
2791 val
&= ~DISP_DATA_PARTITION_5_6
;
2793 val
|= DISP_DATA_PARTITION_5_6
;
2794 I915_WRITE(DISP_ARB_CTL2
, val
);
2798 if (dirty
& WM_DIRTY_FBC
) {
2799 val
= I915_READ(DISP_ARB_CTL
);
2800 if (results
->enable_fbc_wm
)
2801 val
&= ~DISP_FBC_WM_DIS
;
2803 val
|= DISP_FBC_WM_DIS
;
2804 I915_WRITE(DISP_ARB_CTL
, val
);
2807 if (dirty
& WM_DIRTY_LP(1) &&
2808 previous
->wm_lp_spr
[0] != results
->wm_lp_spr
[0])
2809 I915_WRITE(WM1S_LP_ILK
, results
->wm_lp_spr
[0]);
2811 if (INTEL_INFO(dev
)->gen
>= 7) {
2812 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp_spr
[1] != results
->wm_lp_spr
[1])
2813 I915_WRITE(WM2S_LP_IVB
, results
->wm_lp_spr
[1]);
2814 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp_spr
[2] != results
->wm_lp_spr
[2])
2815 I915_WRITE(WM3S_LP_IVB
, results
->wm_lp_spr
[2]);
2818 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] != results
->wm_lp
[0])
2819 I915_WRITE(WM1_LP_ILK
, results
->wm_lp
[0]);
2820 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] != results
->wm_lp
[1])
2821 I915_WRITE(WM2_LP_ILK
, results
->wm_lp
[1]);
2822 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] != results
->wm_lp
[2])
2823 I915_WRITE(WM3_LP_ILK
, results
->wm_lp
[2]);
2825 dev_priv
->wm
.hw
= *results
;
2828 static bool ilk_disable_lp_wm(struct drm_device
*dev
)
2830 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2832 return _ilk_disable_lp_wm(dev_priv
, WM_DIRTY_LP_ALL
);
2835 static void ilk_update_wm(struct drm_crtc
*crtc
)
2837 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2838 struct drm_device
*dev
= crtc
->dev
;
2839 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2840 struct ilk_wm_maximums max
;
2841 struct ilk_pipe_wm_parameters params
= {};
2842 struct ilk_wm_values results
= {};
2843 enum intel_ddb_partitioning partitioning
;
2844 struct intel_pipe_wm pipe_wm
= {};
2845 struct intel_pipe_wm lp_wm_1_2
= {}, lp_wm_5_6
= {}, *best_lp_wm
;
2846 struct intel_wm_config config
= {};
2848 ilk_compute_wm_parameters(crtc
, ¶ms
);
2850 intel_compute_pipe_wm(crtc
, ¶ms
, &pipe_wm
);
2852 if (!memcmp(&intel_crtc
->wm
.active
, &pipe_wm
, sizeof(pipe_wm
)))
2855 intel_crtc
->wm
.active
= pipe_wm
;
2857 ilk_compute_wm_config(dev
, &config
);
2859 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_1_2
, &max
);
2860 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_1_2
);
2862 /* 5/6 split only in single pipe config on IVB+ */
2863 if (INTEL_INFO(dev
)->gen
>= 7 &&
2864 config
.num_pipes_active
== 1 && config
.sprites_enabled
) {
2865 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_5_6
, &max
);
2866 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_5_6
);
2868 best_lp_wm
= ilk_find_best_result(dev
, &lp_wm_1_2
, &lp_wm_5_6
);
2870 best_lp_wm
= &lp_wm_1_2
;
2873 partitioning
= (best_lp_wm
== &lp_wm_1_2
) ?
2874 INTEL_DDB_PART_1_2
: INTEL_DDB_PART_5_6
;
2876 ilk_compute_wm_results(dev
, best_lp_wm
, partitioning
, &results
);
2878 ilk_write_wm_values(dev_priv
, &results
);
2882 ilk_update_sprite_wm(struct drm_plane
*plane
,
2883 struct drm_crtc
*crtc
,
2884 uint32_t sprite_width
, uint32_t sprite_height
,
2885 int pixel_size
, bool enabled
, bool scaled
)
2887 struct drm_device
*dev
= plane
->dev
;
2888 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
2890 intel_plane
->wm
.enabled
= enabled
;
2891 intel_plane
->wm
.scaled
= scaled
;
2892 intel_plane
->wm
.horiz_pixels
= sprite_width
;
2893 intel_plane
->wm
.vert_pixels
= sprite_width
;
2894 intel_plane
->wm
.bytes_per_pixel
= pixel_size
;
2897 * IVB workaround: must disable low power watermarks for at least
2898 * one frame before enabling scaling. LP watermarks can be re-enabled
2899 * when scaling is disabled.
2901 * WaCxSRDisabledForSpriteScaling:ivb
2903 if (IS_IVYBRIDGE(dev
) && scaled
&& ilk_disable_lp_wm(dev
))
2904 intel_wait_for_vblank(dev
, intel_plane
->pipe
);
2906 ilk_update_wm(crtc
);
2909 static void ilk_pipe_wm_get_hw_state(struct drm_crtc
*crtc
)
2911 struct drm_device
*dev
= crtc
->dev
;
2912 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2913 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
2914 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2915 struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
;
2916 enum pipe pipe
= intel_crtc
->pipe
;
2917 static const unsigned int wm0_pipe_reg
[] = {
2918 [PIPE_A
] = WM0_PIPEA_ILK
,
2919 [PIPE_B
] = WM0_PIPEB_ILK
,
2920 [PIPE_C
] = WM0_PIPEC_IVB
,
2923 hw
->wm_pipe
[pipe
] = I915_READ(wm0_pipe_reg
[pipe
]);
2924 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2925 hw
->wm_linetime
[pipe
] = I915_READ(PIPE_WM_LINETIME(pipe
));
2927 active
->pipe_enabled
= intel_crtc_active(crtc
);
2929 if (active
->pipe_enabled
) {
2930 u32 tmp
= hw
->wm_pipe
[pipe
];
2933 * For active pipes LP0 watermark is marked as
2934 * enabled, and LP1+ watermaks as disabled since
2935 * we can't really reverse compute them in case
2936 * multiple pipes are active.
2938 active
->wm
[0].enable
= true;
2939 active
->wm
[0].pri_val
= (tmp
& WM0_PIPE_PLANE_MASK
) >> WM0_PIPE_PLANE_SHIFT
;
2940 active
->wm
[0].spr_val
= (tmp
& WM0_PIPE_SPRITE_MASK
) >> WM0_PIPE_SPRITE_SHIFT
;
2941 active
->wm
[0].cur_val
= tmp
& WM0_PIPE_CURSOR_MASK
;
2942 active
->linetime
= hw
->wm_linetime
[pipe
];
2944 int level
, max_level
= ilk_wm_max_level(dev
);
2947 * For inactive pipes, all watermark levels
2948 * should be marked as enabled but zeroed,
2949 * which is what we'd compute them to.
2951 for (level
= 0; level
<= max_level
; level
++)
2952 active
->wm
[level
].enable
= true;
2956 void ilk_wm_get_hw_state(struct drm_device
*dev
)
2958 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2959 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
2960 struct drm_crtc
*crtc
;
2962 for_each_crtc(dev
, crtc
)
2963 ilk_pipe_wm_get_hw_state(crtc
);
2965 hw
->wm_lp
[0] = I915_READ(WM1_LP_ILK
);
2966 hw
->wm_lp
[1] = I915_READ(WM2_LP_ILK
);
2967 hw
->wm_lp
[2] = I915_READ(WM3_LP_ILK
);
2969 hw
->wm_lp_spr
[0] = I915_READ(WM1S_LP_ILK
);
2970 if (INTEL_INFO(dev
)->gen
>= 7) {
2971 hw
->wm_lp_spr
[1] = I915_READ(WM2S_LP_IVB
);
2972 hw
->wm_lp_spr
[2] = I915_READ(WM3S_LP_IVB
);
2975 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2976 hw
->partitioning
= (I915_READ(WM_MISC
) & WM_MISC_DATA_PARTITION_5_6
) ?
2977 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
2978 else if (IS_IVYBRIDGE(dev
))
2979 hw
->partitioning
= (I915_READ(DISP_ARB_CTL2
) & DISP_DATA_PARTITION_5_6
) ?
2980 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
2983 !(I915_READ(DISP_ARB_CTL
) & DISP_FBC_WM_DIS
);
2987 * intel_update_watermarks - update FIFO watermark values based on current modes
2989 * Calculate watermark values for the various WM regs based on current mode
2990 * and plane configuration.
2992 * There are several cases to deal with here:
2993 * - normal (i.e. non-self-refresh)
2994 * - self-refresh (SR) mode
2995 * - lines are large relative to FIFO size (buffer can hold up to 2)
2996 * - lines are small relative to FIFO size (buffer can hold more than 2
2997 * lines), so need to account for TLB latency
2999 * The normal calculation is:
3000 * watermark = dotclock * bytes per pixel * latency
3001 * where latency is platform & configuration dependent (we assume pessimal
3004 * The SR calculation is:
3005 * watermark = (trunc(latency/line time)+1) * surface width *
3008 * line time = htotal / dotclock
3009 * surface width = hdisplay for normal plane and 64 for cursor
3010 * and latency is assumed to be high, as above.
3012 * The final value programmed to the register should always be rounded up,
3013 * and include an extra 2 entries to account for clock crossings.
3015 * We don't use the sprite, so we can ignore that. And on Crestline we have
3016 * to set the non-SR watermarks to 8.
3018 void intel_update_watermarks(struct drm_crtc
*crtc
)
3020 struct drm_i915_private
*dev_priv
= crtc
->dev
->dev_private
;
3022 if (dev_priv
->display
.update_wm
)
3023 dev_priv
->display
.update_wm(crtc
);
3026 void intel_update_sprite_watermarks(struct drm_plane
*plane
,
3027 struct drm_crtc
*crtc
,
3028 uint32_t sprite_width
,
3029 uint32_t sprite_height
,
3031 bool enabled
, bool scaled
)
3033 struct drm_i915_private
*dev_priv
= plane
->dev
->dev_private
;
3035 if (dev_priv
->display
.update_sprite_wm
)
3036 dev_priv
->display
.update_sprite_wm(plane
, crtc
,
3037 sprite_width
, sprite_height
,
3038 pixel_size
, enabled
, scaled
);
3041 static struct drm_i915_gem_object
*
3042 intel_alloc_context_page(struct drm_device
*dev
)
3044 struct drm_i915_gem_object
*ctx
;
3047 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
3049 ctx
= i915_gem_alloc_object(dev
, 4096);
3051 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
3055 ret
= i915_gem_obj_ggtt_pin(ctx
, 4096, 0);
3057 DRM_ERROR("failed to pin power context: %d\n", ret
);
3061 ret
= i915_gem_object_set_to_gtt_domain(ctx
, 1);
3063 DRM_ERROR("failed to set-domain on power context: %d\n", ret
);
3070 i915_gem_object_ggtt_unpin(ctx
);
3072 drm_gem_object_unreference(&ctx
->base
);
3077 * Lock protecting IPS related data structures
3079 DEFINE_SPINLOCK(mchdev_lock
);
3081 /* Global for IPS driver to get at the current i915 device. Protected by
3083 static struct drm_i915_private
*i915_mch_dev
;
3085 bool ironlake_set_drps(struct drm_device
*dev
, u8 val
)
3087 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3090 assert_spin_locked(&mchdev_lock
);
3092 rgvswctl
= I915_READ16(MEMSWCTL
);
3093 if (rgvswctl
& MEMCTL_CMD_STS
) {
3094 DRM_DEBUG("gpu busy, RCS change rejected\n");
3095 return false; /* still busy with another command */
3098 rgvswctl
= (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
3099 (val
<< MEMCTL_FREQ_SHIFT
) | MEMCTL_SFCAVM
;
3100 I915_WRITE16(MEMSWCTL
, rgvswctl
);
3101 POSTING_READ16(MEMSWCTL
);
3103 rgvswctl
|= MEMCTL_CMD_STS
;
3104 I915_WRITE16(MEMSWCTL
, rgvswctl
);
3109 static void ironlake_enable_drps(struct drm_device
*dev
)
3111 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3112 u32 rgvmodectl
= I915_READ(MEMMODECTL
);
3113 u8 fmax
, fmin
, fstart
, vstart
;
3115 spin_lock_irq(&mchdev_lock
);
3117 /* Enable temp reporting */
3118 I915_WRITE16(PMMISC
, I915_READ(PMMISC
) | MCPPCE_EN
);
3119 I915_WRITE16(TSC1
, I915_READ(TSC1
) | TSE
);
3121 /* 100ms RC evaluation intervals */
3122 I915_WRITE(RCUPEI
, 100000);
3123 I915_WRITE(RCDNEI
, 100000);
3125 /* Set max/min thresholds to 90ms and 80ms respectively */
3126 I915_WRITE(RCBMAXAVG
, 90000);
3127 I915_WRITE(RCBMINAVG
, 80000);
3129 I915_WRITE(MEMIHYST
, 1);
3131 /* Set up min, max, and cur for interrupt handling */
3132 fmax
= (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
;
3133 fmin
= (rgvmodectl
& MEMMODE_FMIN_MASK
);
3134 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
3135 MEMMODE_FSTART_SHIFT
;
3137 vstart
= (I915_READ(PXVFREQ_BASE
+ (fstart
* 4)) & PXVFREQ_PX_MASK
) >>
3140 dev_priv
->ips
.fmax
= fmax
; /* IPS callback will increase this */
3141 dev_priv
->ips
.fstart
= fstart
;
3143 dev_priv
->ips
.max_delay
= fstart
;
3144 dev_priv
->ips
.min_delay
= fmin
;
3145 dev_priv
->ips
.cur_delay
= fstart
;
3147 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
3148 fmax
, fmin
, fstart
);
3150 I915_WRITE(MEMINTREN
, MEMINT_CX_SUPR_EN
| MEMINT_EVAL_CHG_EN
);
3153 * Interrupts will be enabled in ironlake_irq_postinstall
3156 I915_WRITE(VIDSTART
, vstart
);
3157 POSTING_READ(VIDSTART
);
3159 rgvmodectl
|= MEMMODE_SWMODE_EN
;
3160 I915_WRITE(MEMMODECTL
, rgvmodectl
);
3162 if (wait_for_atomic((I915_READ(MEMSWCTL
) & MEMCTL_CMD_STS
) == 0, 10))
3163 DRM_ERROR("stuck trying to change perf mode\n");
3166 ironlake_set_drps(dev
, fstart
);
3168 dev_priv
->ips
.last_count1
= I915_READ(0x112e4) + I915_READ(0x112e8) +
3170 dev_priv
->ips
.last_time1
= jiffies_to_msecs(jiffies
);
3171 dev_priv
->ips
.last_count2
= I915_READ(0x112f4);
3172 dev_priv
->ips
.last_time2
= ktime_get_raw_ns();
3174 spin_unlock_irq(&mchdev_lock
);
3177 static void ironlake_disable_drps(struct drm_device
*dev
)
3179 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3182 spin_lock_irq(&mchdev_lock
);
3184 rgvswctl
= I915_READ16(MEMSWCTL
);
3186 /* Ack interrupts, disable EFC interrupt */
3187 I915_WRITE(MEMINTREN
, I915_READ(MEMINTREN
) & ~MEMINT_EVAL_CHG_EN
);
3188 I915_WRITE(MEMINTRSTS
, MEMINT_EVAL_CHG
);
3189 I915_WRITE(DEIER
, I915_READ(DEIER
) & ~DE_PCU_EVENT
);
3190 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
3191 I915_WRITE(DEIMR
, I915_READ(DEIMR
) | DE_PCU_EVENT
);
3193 /* Go back to the starting frequency */
3194 ironlake_set_drps(dev
, dev_priv
->ips
.fstart
);
3196 rgvswctl
|= MEMCTL_CMD_STS
;
3197 I915_WRITE(MEMSWCTL
, rgvswctl
);
3200 spin_unlock_irq(&mchdev_lock
);
3203 /* There's a funny hw issue where the hw returns all 0 when reading from
3204 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3205 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3206 * all limits and the gpu stuck at whatever frequency it is at atm).
3208 static u32
gen6_rps_limits(struct drm_i915_private
*dev_priv
, u8 val
)
3212 /* Only set the down limit when we've reached the lowest level to avoid
3213 * getting more interrupts, otherwise leave this clear. This prevents a
3214 * race in the hw when coming out of rc6: There's a tiny window where
3215 * the hw runs at the minimal clock before selecting the desired
3216 * frequency, if the down threshold expires in that window we will not
3217 * receive a down interrupt. */
3218 limits
= dev_priv
->rps
.max_freq_softlimit
<< 24;
3219 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
3220 limits
|= dev_priv
->rps
.min_freq_softlimit
<< 16;
3225 static void gen6_set_rps_thresholds(struct drm_i915_private
*dev_priv
, u8 val
)
3229 if (dev_priv
->rps
.is_bdw_sw_turbo
)
3232 new_power
= dev_priv
->rps
.power
;
3233 switch (dev_priv
->rps
.power
) {
3235 if (val
> dev_priv
->rps
.efficient_freq
+ 1 && val
> dev_priv
->rps
.cur_freq
)
3236 new_power
= BETWEEN
;
3240 if (val
<= dev_priv
->rps
.efficient_freq
&& val
< dev_priv
->rps
.cur_freq
)
3241 new_power
= LOW_POWER
;
3242 else if (val
>= dev_priv
->rps
.rp0_freq
&& val
> dev_priv
->rps
.cur_freq
)
3243 new_power
= HIGH_POWER
;
3247 if (val
< (dev_priv
->rps
.rp1_freq
+ dev_priv
->rps
.rp0_freq
) >> 1 && val
< dev_priv
->rps
.cur_freq
)
3248 new_power
= BETWEEN
;
3251 /* Max/min bins are special */
3252 if (val
== dev_priv
->rps
.min_freq_softlimit
)
3253 new_power
= LOW_POWER
;
3254 if (val
== dev_priv
->rps
.max_freq_softlimit
)
3255 new_power
= HIGH_POWER
;
3256 if (new_power
== dev_priv
->rps
.power
)
3259 /* Note the units here are not exactly 1us, but 1280ns. */
3260 switch (new_power
) {
3262 /* Upclock if more than 95% busy over 16ms */
3263 I915_WRITE(GEN6_RP_UP_EI
, 12500);
3264 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 11800);
3266 /* Downclock if less than 85% busy over 32ms */
3267 I915_WRITE(GEN6_RP_DOWN_EI
, 25000);
3268 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 21250);
3270 I915_WRITE(GEN6_RP_CONTROL
,
3271 GEN6_RP_MEDIA_TURBO
|
3272 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
3273 GEN6_RP_MEDIA_IS_GFX
|
3275 GEN6_RP_UP_BUSY_AVG
|
3276 GEN6_RP_DOWN_IDLE_AVG
);
3280 /* Upclock if more than 90% busy over 13ms */
3281 I915_WRITE(GEN6_RP_UP_EI
, 10250);
3282 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 9225);
3284 /* Downclock if less than 75% busy over 32ms */
3285 I915_WRITE(GEN6_RP_DOWN_EI
, 25000);
3286 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 18750);
3288 I915_WRITE(GEN6_RP_CONTROL
,
3289 GEN6_RP_MEDIA_TURBO
|
3290 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
3291 GEN6_RP_MEDIA_IS_GFX
|
3293 GEN6_RP_UP_BUSY_AVG
|
3294 GEN6_RP_DOWN_IDLE_AVG
);
3298 /* Upclock if more than 85% busy over 10ms */
3299 I915_WRITE(GEN6_RP_UP_EI
, 8000);
3300 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 6800);
3302 /* Downclock if less than 60% busy over 32ms */
3303 I915_WRITE(GEN6_RP_DOWN_EI
, 25000);
3304 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 15000);
3306 I915_WRITE(GEN6_RP_CONTROL
,
3307 GEN6_RP_MEDIA_TURBO
|
3308 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
3309 GEN6_RP_MEDIA_IS_GFX
|
3311 GEN6_RP_UP_BUSY_AVG
|
3312 GEN6_RP_DOWN_IDLE_AVG
);
3316 dev_priv
->rps
.power
= new_power
;
3317 dev_priv
->rps
.last_adj
= 0;
3320 static u32
gen6_rps_pm_mask(struct drm_i915_private
*dev_priv
, u8 val
)
3324 if (val
> dev_priv
->rps
.min_freq_softlimit
)
3325 mask
|= GEN6_PM_RP_DOWN_THRESHOLD
| GEN6_PM_RP_DOWN_TIMEOUT
;
3326 if (val
< dev_priv
->rps
.max_freq_softlimit
)
3327 mask
|= GEN6_PM_RP_UP_THRESHOLD
;
3329 mask
|= dev_priv
->pm_rps_events
& (GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_UP_EI_EXPIRED
);
3330 mask
&= dev_priv
->pm_rps_events
;
3332 /* IVB and SNB hard hangs on looping batchbuffer
3333 * if GEN6_PM_UP_EI_EXPIRED is masked.
3335 if (INTEL_INFO(dev_priv
->dev
)->gen
<= 7 && !IS_HASWELL(dev_priv
->dev
))
3336 mask
|= GEN6_PM_RP_UP_EI_EXPIRED
;
3338 if (IS_GEN8(dev_priv
->dev
))
3339 mask
|= GEN8_PMINTR_REDIRECT_TO_NON_DISP
;
3344 /* gen6_set_rps is called to update the frequency request, but should also be
3345 * called when the range (min_delay and max_delay) is modified so that we can
3346 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3347 void gen6_set_rps(struct drm_device
*dev
, u8 val
)
3349 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3351 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
3352 WARN_ON(val
> dev_priv
->rps
.max_freq_softlimit
);
3353 WARN_ON(val
< dev_priv
->rps
.min_freq_softlimit
);
3355 /* min/max delay may still have been modified so be sure to
3356 * write the limits value.
3358 if (val
!= dev_priv
->rps
.cur_freq
) {
3359 gen6_set_rps_thresholds(dev_priv
, val
);
3361 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
3362 I915_WRITE(GEN6_RPNSWREQ
,
3363 HSW_FREQUENCY(val
));
3365 I915_WRITE(GEN6_RPNSWREQ
,
3366 GEN6_FREQUENCY(val
) |
3368 GEN6_AGGRESSIVE_TURBO
);
3371 /* Make sure we continue to get interrupts
3372 * until we hit the minimum or maximum frequencies.
3374 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
, gen6_rps_limits(dev_priv
, val
));
3375 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
3377 POSTING_READ(GEN6_RPNSWREQ
);
3379 dev_priv
->rps
.cur_freq
= val
;
3380 trace_intel_gpu_freq_change(val
* 50);
3383 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3385 * * If Gfx is Idle, then
3386 * 1. Mask Turbo interrupts
3387 * 2. Bring up Gfx clock
3388 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3389 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3390 * 5. Unmask Turbo interrupts
3392 static void vlv_set_rps_idle(struct drm_i915_private
*dev_priv
)
3394 struct drm_device
*dev
= dev_priv
->dev
;
3396 /* Latest VLV doesn't need to force the gfx clock */
3397 if (dev
->pdev
->revision
>= 0xd) {
3398 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
3403 * When we are idle. Drop to min voltage state.
3406 if (dev_priv
->rps
.cur_freq
<= dev_priv
->rps
.min_freq_softlimit
)
3409 /* Mask turbo interrupt so that they will not come in between */
3410 I915_WRITE(GEN6_PMINTRMSK
, 0xffffffff);
3412 vlv_force_gfx_clock(dev_priv
, true);
3414 dev_priv
->rps
.cur_freq
= dev_priv
->rps
.min_freq_softlimit
;
3416 vlv_punit_write(dev_priv
, PUNIT_REG_GPU_FREQ_REQ
,
3417 dev_priv
->rps
.min_freq_softlimit
);
3419 if (wait_for(((vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
))
3420 & GENFREQSTATUS
) == 0, 5))
3421 DRM_ERROR("timed out waiting for Punit\n");
3423 vlv_force_gfx_clock(dev_priv
, false);
3425 I915_WRITE(GEN6_PMINTRMSK
,
3426 gen6_rps_pm_mask(dev_priv
, dev_priv
->rps
.cur_freq
));
3429 void gen6_rps_idle(struct drm_i915_private
*dev_priv
)
3431 struct drm_device
*dev
= dev_priv
->dev
;
3433 mutex_lock(&dev_priv
->rps
.hw_lock
);
3434 if (dev_priv
->rps
.enabled
) {
3435 if (IS_CHERRYVIEW(dev
))
3436 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
3437 else if (IS_VALLEYVIEW(dev
))
3438 vlv_set_rps_idle(dev_priv
);
3439 else if (!dev_priv
->rps
.is_bdw_sw_turbo
3440 || atomic_read(&dev_priv
->rps
.sw_turbo
.flip_received
)){
3441 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
3444 dev_priv
->rps
.last_adj
= 0;
3446 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3449 void gen6_rps_boost(struct drm_i915_private
*dev_priv
)
3451 struct drm_device
*dev
= dev_priv
->dev
;
3453 mutex_lock(&dev_priv
->rps
.hw_lock
);
3454 if (dev_priv
->rps
.enabled
) {
3455 if (IS_VALLEYVIEW(dev
))
3456 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.max_freq_softlimit
);
3457 else if (!dev_priv
->rps
.is_bdw_sw_turbo
3458 || atomic_read(&dev_priv
->rps
.sw_turbo
.flip_received
)){
3459 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.max_freq_softlimit
);
3462 dev_priv
->rps
.last_adj
= 0;
3464 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3467 void valleyview_set_rps(struct drm_device
*dev
, u8 val
)
3469 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3471 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
3472 WARN_ON(val
> dev_priv
->rps
.max_freq_softlimit
);
3473 WARN_ON(val
< dev_priv
->rps
.min_freq_softlimit
);
3475 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3476 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
3477 dev_priv
->rps
.cur_freq
,
3478 vlv_gpu_freq(dev_priv
, val
), val
);
3480 if (val
!= dev_priv
->rps
.cur_freq
)
3481 vlv_punit_write(dev_priv
, PUNIT_REG_GPU_FREQ_REQ
, val
);
3483 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
3485 dev_priv
->rps
.cur_freq
= val
;
3486 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv
, val
));
3489 static void gen8_disable_rps_interrupts(struct drm_device
*dev
)
3491 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3492 if (IS_BROADWELL(dev
) && dev_priv
->rps
.is_bdw_sw_turbo
){
3493 if (atomic_read(&dev_priv
->rps
.sw_turbo
.flip_received
))
3494 del_timer(&dev_priv
->rps
.sw_turbo
.flip_timer
);
3495 dev_priv
-> rps
.is_bdw_sw_turbo
= false;
3497 I915_WRITE(GEN6_PMINTRMSK
, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP
);
3498 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3499 ~dev_priv
->pm_rps_events
);
3500 /* Complete PM interrupt masking here doesn't race with the rps work
3501 * item again unmasking PM interrupts because that is using a different
3502 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
3503 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
3504 * gen8_enable_rps will clean up. */
3506 spin_lock_irq(&dev_priv
->irq_lock
);
3507 dev_priv
->rps
.pm_iir
= 0;
3508 spin_unlock_irq(&dev_priv
->irq_lock
);
3510 I915_WRITE(GEN8_GT_IIR(2), dev_priv
->pm_rps_events
);
3514 static void gen6_disable_rps_interrupts(struct drm_device
*dev
)
3516 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3518 I915_WRITE(GEN6_PMINTRMSK
, 0xffffffff);
3519 I915_WRITE(GEN6_PMIER
, I915_READ(GEN6_PMIER
) &
3520 ~dev_priv
->pm_rps_events
);
3521 /* Complete PM interrupt masking here doesn't race with the rps work
3522 * item again unmasking PM interrupts because that is using a different
3523 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3524 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3526 spin_lock_irq(&dev_priv
->irq_lock
);
3527 dev_priv
->rps
.pm_iir
= 0;
3528 spin_unlock_irq(&dev_priv
->irq_lock
);
3530 I915_WRITE(GEN6_PMIIR
, dev_priv
->pm_rps_events
);
3533 static void gen6_disable_rps(struct drm_device
*dev
)
3535 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3537 I915_WRITE(GEN6_RC_CONTROL
, 0);
3538 I915_WRITE(GEN6_RPNSWREQ
, 1 << 31);
3540 if (IS_BROADWELL(dev
))
3541 gen8_disable_rps_interrupts(dev
);
3543 gen6_disable_rps_interrupts(dev
);
3546 static void cherryview_disable_rps(struct drm_device
*dev
)
3548 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3550 I915_WRITE(GEN6_RC_CONTROL
, 0);
3552 gen8_disable_rps_interrupts(dev
);
3555 static void valleyview_disable_rps(struct drm_device
*dev
)
3557 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3559 /* we're doing forcewake before Disabling RC6,
3560 * This what the BIOS expects when going into suspend */
3561 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
3563 I915_WRITE(GEN6_RC_CONTROL
, 0);
3565 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
3567 gen6_disable_rps_interrupts(dev
);
3570 static void intel_print_rc6_info(struct drm_device
*dev
, u32 mode
)
3572 if (IS_VALLEYVIEW(dev
)) {
3573 if (mode
& (GEN7_RC_CTL_TO_MODE
| GEN6_RC_CTL_EI_MODE(1)))
3574 mode
= GEN6_RC_CTL_RC6_ENABLE
;
3578 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3579 (mode
& GEN6_RC_CTL_RC6_ENABLE
) ? "on" : "off",
3580 (mode
& GEN6_RC_CTL_RC6p_ENABLE
) ? "on" : "off",
3581 (mode
& GEN6_RC_CTL_RC6pp_ENABLE
) ? "on" : "off");
3584 static int sanitize_rc6_option(const struct drm_device
*dev
, int enable_rc6
)
3586 /* No RC6 before Ironlake */
3587 if (INTEL_INFO(dev
)->gen
< 5)
3590 /* RC6 is only on Ironlake mobile not on desktop */
3591 if (INTEL_INFO(dev
)->gen
== 5 && !IS_IRONLAKE_M(dev
))
3594 /* Respect the kernel parameter if it is set */
3595 if (enable_rc6
>= 0) {
3598 if (INTEL_INFO(dev
)->gen
== 6 || IS_IVYBRIDGE(dev
))
3599 mask
= INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
|
3602 mask
= INTEL_RC6_ENABLE
;
3604 if ((enable_rc6
& mask
) != enable_rc6
)
3605 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
3606 enable_rc6
& mask
, enable_rc6
, mask
);
3608 return enable_rc6
& mask
;
3611 /* Disable RC6 on Ironlake */
3612 if (INTEL_INFO(dev
)->gen
== 5)
3615 if (IS_IVYBRIDGE(dev
))
3616 return (INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
);
3618 return INTEL_RC6_ENABLE
;
3621 int intel_enable_rc6(const struct drm_device
*dev
)
3623 return i915
.enable_rc6
;
3626 static void gen8_enable_rps_interrupts(struct drm_device
*dev
)
3628 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3630 spin_lock_irq(&dev_priv
->irq_lock
);
3631 WARN_ON(dev_priv
->rps
.pm_iir
);
3632 gen8_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
3633 I915_WRITE(GEN8_GT_IIR(2), dev_priv
->pm_rps_events
);
3634 spin_unlock_irq(&dev_priv
->irq_lock
);
3637 static void gen6_enable_rps_interrupts(struct drm_device
*dev
)
3639 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3641 spin_lock_irq(&dev_priv
->irq_lock
);
3642 WARN_ON(dev_priv
->rps
.pm_iir
);
3643 gen6_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
3644 I915_WRITE(GEN6_PMIIR
, dev_priv
->pm_rps_events
);
3645 spin_unlock_irq(&dev_priv
->irq_lock
);
3648 static void parse_rp_state_cap(struct drm_i915_private
*dev_priv
, u32 rp_state_cap
)
3650 /* All of these values are in units of 50MHz */
3651 dev_priv
->rps
.cur_freq
= 0;
3652 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3653 dev_priv
->rps
.rp1_freq
= (rp_state_cap
>> 8) & 0xff;
3654 dev_priv
->rps
.rp0_freq
= (rp_state_cap
>> 0) & 0xff;
3655 dev_priv
->rps
.min_freq
= (rp_state_cap
>> 16) & 0xff;
3656 /* XXX: only BYT has a special efficient freq */
3657 dev_priv
->rps
.efficient_freq
= dev_priv
->rps
.rp1_freq
;
3658 /* hw_max = RP0 until we check for overclocking */
3659 dev_priv
->rps
.max_freq
= dev_priv
->rps
.rp0_freq
;
3661 /* Preserve min/max settings in case of re-init */
3662 if (dev_priv
->rps
.max_freq_softlimit
== 0)
3663 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
3665 if (dev_priv
->rps
.min_freq_softlimit
== 0)
3666 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
3669 static void bdw_sw_calculate_freq(struct drm_device
*dev
,
3670 struct intel_rps_bdw_cal
*c
, u32
*cur_time
, u32
*c0
)
3672 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3674 u32 busyness_pct
= 0;
3675 u32 elapsed_time
= 0;
3678 if (!c
|| !cur_time
|| !c0
)
3681 if (0 == c
->last_c0
)
3684 /* Check Evaluation interval */
3685 elapsed_time
= *cur_time
- c
->last_ts
;
3686 if (elapsed_time
< c
->eval_interval
)
3689 mutex_lock(&dev_priv
->rps
.hw_lock
);
3692 * c0 unit in 32*1.28 usec, elapsed_time unit in 1 usec.
3693 * Whole busyness_pct calculation should be
3694 * busy = ((u64)(*c0 - c->last_c0) << 5 << 7) / 100;
3695 * busyness_pct = (u32)(busy * 100 / elapsed_time);
3696 * The final formula is to simplify CPU calculation
3698 busy
= (u64
)(*c0
- c
->last_c0
) << 12;
3699 do_div(busy
, elapsed_time
);
3700 busyness_pct
= (u32
)busy
;
3702 if (c
->is_up
&& busyness_pct
>= c
->it_threshold_pct
)
3703 new_freq
= (u16
)dev_priv
->rps
.cur_freq
+ 3;
3704 if (!c
->is_up
&& busyness_pct
<= c
->it_threshold_pct
)
3705 new_freq
= (u16
)dev_priv
->rps
.cur_freq
- 1;
3707 /* Adjust to new frequency busyness and compare with threshold */
3708 if (0 != new_freq
) {
3709 if (new_freq
> dev_priv
->rps
.max_freq_softlimit
)
3710 new_freq
= dev_priv
->rps
.max_freq_softlimit
;
3711 else if (new_freq
< dev_priv
->rps
.min_freq_softlimit
)
3712 new_freq
= dev_priv
->rps
.min_freq_softlimit
;
3714 gen6_set_rps(dev
, new_freq
);
3717 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3721 c
->last_ts
= *cur_time
;
3724 static void gen8_set_frequency_RP0(struct work_struct
*work
)
3726 struct intel_rps_bdw_turbo
*p_bdw_turbo
=
3727 container_of(work
, struct intel_rps_bdw_turbo
, work_max_freq
);
3728 struct intel_gen6_power_mgmt
*p_power_mgmt
=
3729 container_of(p_bdw_turbo
, struct intel_gen6_power_mgmt
, sw_turbo
);
3730 struct drm_i915_private
*dev_priv
=
3731 container_of(p_power_mgmt
, struct drm_i915_private
, rps
);
3733 mutex_lock(&dev_priv
->rps
.hw_lock
);
3734 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.rp0_freq
);
3735 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3738 static void flip_active_timeout_handler(unsigned long var
)
3740 struct drm_i915_private
*dev_priv
= (struct drm_i915_private
*) var
;
3742 del_timer(&dev_priv
->rps
.sw_turbo
.flip_timer
);
3743 atomic_set(&dev_priv
->rps
.sw_turbo
.flip_received
, false);
3745 queue_work(dev_priv
->wq
, &dev_priv
->rps
.sw_turbo
.work_max_freq
);
3748 void bdw_software_turbo(struct drm_device
*dev
)
3750 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3752 u32 current_time
= I915_READ(TIMESTAMP_CTR
); /* unit in usec */
3753 u32 current_c0
= I915_READ(MCHBAR_PCU_C0
); /* unit in 32*1.28 usec */
3755 bdw_sw_calculate_freq(dev
, &dev_priv
->rps
.sw_turbo
.up
,
3756 ¤t_time
, ¤t_c0
);
3757 bdw_sw_calculate_freq(dev
, &dev_priv
->rps
.sw_turbo
.down
,
3758 ¤t_time
, ¤t_c0
);
3761 static void gen8_enable_rps(struct drm_device
*dev
)
3763 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3764 struct intel_engine_cs
*ring
;
3765 uint32_t rc6_mask
= 0, rp_state_cap
;
3766 uint32_t threshold_up_pct
, threshold_down_pct
;
3767 uint32_t ei_up
, ei_down
; /* up and down evaluation interval */
3771 /* Use software Turbo for BDW */
3772 dev_priv
->rps
.is_bdw_sw_turbo
= IS_BROADWELL(dev
);
3774 /* 1a: Software RC state - RC0 */
3775 I915_WRITE(GEN6_RC_STATE
, 0);
3777 /* 1c & 1d: Get forcewake during program sequence. Although the driver
3778 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3779 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
3781 /* 2a: Disable RC states. */
3782 I915_WRITE(GEN6_RC_CONTROL
, 0);
3784 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
3785 parse_rp_state_cap(dev_priv
, rp_state_cap
);
3787 /* 2b: Program RC6 thresholds.*/
3788 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
3789 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
3790 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
3791 for_each_ring(ring
, dev_priv
, unused
)
3792 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
3793 I915_WRITE(GEN6_RC_SLEEP
, 0);
3794 if (IS_BROADWELL(dev
))
3795 I915_WRITE(GEN6_RC6_THRESHOLD
, 625); /* 800us/1.28 for TO */
3797 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000); /* 50/125ms per EI */
3800 if (intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
)
3801 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
3802 intel_print_rc6_info(dev
, rc6_mask
);
3803 if (IS_BROADWELL(dev
))
3804 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
3805 GEN7_RC_CTL_TO_MODE
|
3808 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
3809 GEN6_RC_CTL_EI_MODE(1) |
3812 /* 4 Program defaults and thresholds for RPS*/
3813 I915_WRITE(GEN6_RPNSWREQ
,
3814 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
3815 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
3816 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
3817 ei_up
= 84480; /* 84.48ms */
3819 threshold_up_pct
= 90; /* x percent busy */
3820 threshold_down_pct
= 70;
3822 if (dev_priv
->rps
.is_bdw_sw_turbo
) {
3823 dev_priv
->rps
.sw_turbo
.up
.it_threshold_pct
= threshold_up_pct
;
3824 dev_priv
->rps
.sw_turbo
.up
.eval_interval
= ei_up
;
3825 dev_priv
->rps
.sw_turbo
.up
.is_up
= true;
3826 dev_priv
->rps
.sw_turbo
.up
.last_ts
= 0;
3827 dev_priv
->rps
.sw_turbo
.up
.last_c0
= 0;
3829 dev_priv
->rps
.sw_turbo
.down
.it_threshold_pct
= threshold_down_pct
;
3830 dev_priv
->rps
.sw_turbo
.down
.eval_interval
= ei_down
;
3831 dev_priv
->rps
.sw_turbo
.down
.is_up
= false;
3832 dev_priv
->rps
.sw_turbo
.down
.last_ts
= 0;
3833 dev_priv
->rps
.sw_turbo
.down
.last_c0
= 0;
3835 /* Start the timer to track if flip comes*/
3836 dev_priv
->rps
.sw_turbo
.timeout
= 200*1000; /* in us */
3838 init_timer(&dev_priv
->rps
.sw_turbo
.flip_timer
);
3839 dev_priv
->rps
.sw_turbo
.flip_timer
.function
= flip_active_timeout_handler
;
3840 dev_priv
->rps
.sw_turbo
.flip_timer
.data
= (unsigned long) dev_priv
;
3841 dev_priv
->rps
.sw_turbo
.flip_timer
.expires
=
3842 usecs_to_jiffies(dev_priv
->rps
.sw_turbo
.timeout
) + jiffies
;
3843 add_timer(&dev_priv
->rps
.sw_turbo
.flip_timer
);
3844 INIT_WORK(&dev_priv
->rps
.sw_turbo
.work_max_freq
, gen8_set_frequency_RP0
);
3846 atomic_set(&dev_priv
->rps
.sw_turbo
.flip_received
, true);
3848 /* NB: Docs say 1s, and 1000000 - which aren't equivalent
3849 * 1 second timeout*/
3850 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, FREQ_1_28_US(1000000));
3852 /* Docs recommend 900MHz, and 300 MHz respectively */
3853 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
3854 dev_priv
->rps
.max_freq_softlimit
<< 24 |
3855 dev_priv
->rps
.min_freq_softlimit
<< 16);
3857 I915_WRITE(GEN6_RP_UP_THRESHOLD
,
3858 FREQ_1_28_US(ei_up
* threshold_up_pct
/ 100));
3859 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
,
3860 FREQ_1_28_US(ei_down
* threshold_down_pct
/ 100));
3861 I915_WRITE(GEN6_RP_UP_EI
,
3862 FREQ_1_28_US(ei_up
));
3863 I915_WRITE(GEN6_RP_DOWN_EI
,
3864 FREQ_1_28_US(ei_down
));
3866 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
3870 rp_ctl_flag
= GEN6_RP_MEDIA_TURBO
|
3871 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
3872 GEN6_RP_MEDIA_IS_GFX
|
3873 GEN6_RP_UP_BUSY_AVG
|
3874 GEN6_RP_DOWN_IDLE_AVG
;
3875 if (!dev_priv
->rps
.is_bdw_sw_turbo
)
3876 rp_ctl_flag
|= GEN6_RP_ENABLE
;
3878 I915_WRITE(GEN6_RP_CONTROL
, rp_ctl_flag
);
3880 /* 6: Ring frequency + overclocking
3881 * (our driver does this later */
3882 gen6_set_rps(dev
, (I915_READ(GEN6_GT_PERF_STATUS
) & 0xff00) >> 8);
3883 if (!dev_priv
->rps
.is_bdw_sw_turbo
)
3884 gen8_enable_rps_interrupts(dev
);
3886 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
3889 static void gen6_enable_rps(struct drm_device
*dev
)
3891 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3892 struct intel_engine_cs
*ring
;
3894 u32 rc6vids
, pcu_mbox
= 0, rc6_mask
= 0;
3899 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
3901 /* Here begins a magic sequence of register writes to enable
3902 * auto-downclocking.
3904 * Perhaps there might be some value in exposing these to
3907 I915_WRITE(GEN6_RC_STATE
, 0);
3909 /* Clear the DBG now so we don't confuse earlier errors */
3910 if ((gtfifodbg
= I915_READ(GTFIFODBG
))) {
3911 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg
);
3912 I915_WRITE(GTFIFODBG
, gtfifodbg
);
3915 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
3917 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
3919 parse_rp_state_cap(dev_priv
, rp_state_cap
);
3921 /* disable the counters and set deterministic thresholds */
3922 I915_WRITE(GEN6_RC_CONTROL
, 0);
3924 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT
, 1000 << 16);
3925 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16 | 30);
3926 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT
, 30);
3927 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
3928 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
3930 for_each_ring(ring
, dev_priv
, i
)
3931 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
3933 I915_WRITE(GEN6_RC_SLEEP
, 0);
3934 I915_WRITE(GEN6_RC1e_THRESHOLD
, 1000);
3935 if (IS_IVYBRIDGE(dev
))
3936 I915_WRITE(GEN6_RC6_THRESHOLD
, 125000);
3938 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000);
3939 I915_WRITE(GEN6_RC6p_THRESHOLD
, 150000);
3940 I915_WRITE(GEN6_RC6pp_THRESHOLD
, 64000); /* unused */
3942 /* Check if we are enabling RC6 */
3943 rc6_mode
= intel_enable_rc6(dev_priv
->dev
);
3944 if (rc6_mode
& INTEL_RC6_ENABLE
)
3945 rc6_mask
|= GEN6_RC_CTL_RC6_ENABLE
;
3947 /* We don't use those on Haswell */
3948 if (!IS_HASWELL(dev
)) {
3949 if (rc6_mode
& INTEL_RC6p_ENABLE
)
3950 rc6_mask
|= GEN6_RC_CTL_RC6p_ENABLE
;
3952 if (rc6_mode
& INTEL_RC6pp_ENABLE
)
3953 rc6_mask
|= GEN6_RC_CTL_RC6pp_ENABLE
;
3956 intel_print_rc6_info(dev
, rc6_mask
);
3958 I915_WRITE(GEN6_RC_CONTROL
,
3960 GEN6_RC_CTL_EI_MODE(1) |
3961 GEN6_RC_CTL_HW_ENABLE
);
3963 /* Power down if completely idle for over 50ms */
3964 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 50000);
3965 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
3967 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_MIN_FREQ_TABLE
, 0);
3969 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3971 ret
= sandybridge_pcode_read(dev_priv
, GEN6_READ_OC_PARAMS
, &pcu_mbox
);
3972 if (!ret
&& (pcu_mbox
& (1<<31))) { /* OC supported */
3973 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3974 (dev_priv
->rps
.max_freq_softlimit
& 0xff) * 50,
3975 (pcu_mbox
& 0xff) * 50);
3976 dev_priv
->rps
.max_freq
= pcu_mbox
& 0xff;
3979 dev_priv
->rps
.power
= HIGH_POWER
; /* force a reset */
3980 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
3982 gen6_enable_rps_interrupts(dev
);
3985 ret
= sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
3986 if (IS_GEN6(dev
) && ret
) {
3987 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3988 } else if (IS_GEN6(dev
) && (GEN6_DECODE_RC6_VID(rc6vids
& 0xff) < 450)) {
3989 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3990 GEN6_DECODE_RC6_VID(rc6vids
& 0xff), 450);
3991 rc6vids
&= 0xffff00;
3992 rc6vids
|= GEN6_ENCODE_RC6_VID(450);
3993 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_RC6VIDS
, rc6vids
);
3995 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3998 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
4001 static void __gen6_update_ring_freq(struct drm_device
*dev
)
4003 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4005 unsigned int gpu_freq
;
4006 unsigned int max_ia_freq
, min_ring_freq
;
4007 int scaling_factor
= 180;
4008 struct cpufreq_policy
*policy
;
4010 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4012 policy
= cpufreq_cpu_get(0);
4014 max_ia_freq
= policy
->cpuinfo
.max_freq
;
4015 cpufreq_cpu_put(policy
);
4018 * Default to measured freq if none found, PCU will ensure we
4021 max_ia_freq
= tsc_khz
;
4024 /* Convert from kHz to MHz */
4025 max_ia_freq
/= 1000;
4027 min_ring_freq
= I915_READ(DCLK
) & 0xf;
4028 /* convert DDR frequency from units of 266.6MHz to bandwidth */
4029 min_ring_freq
= mult_frac(min_ring_freq
, 8, 3);
4032 * For each potential GPU frequency, load a ring frequency we'd like
4033 * to use for memory access. We do this by specifying the IA frequency
4034 * the PCU should use as a reference to determine the ring frequency.
4036 for (gpu_freq
= dev_priv
->rps
.max_freq_softlimit
; gpu_freq
>= dev_priv
->rps
.min_freq_softlimit
;
4038 int diff
= dev_priv
->rps
.max_freq_softlimit
- gpu_freq
;
4039 unsigned int ia_freq
= 0, ring_freq
= 0;
4041 if (INTEL_INFO(dev
)->gen
>= 8) {
4042 /* max(2 * GT, DDR). NB: GT is 50MHz units */
4043 ring_freq
= max(min_ring_freq
, gpu_freq
);
4044 } else if (IS_HASWELL(dev
)) {
4045 ring_freq
= mult_frac(gpu_freq
, 5, 4);
4046 ring_freq
= max(min_ring_freq
, ring_freq
);
4047 /* leave ia_freq as the default, chosen by cpufreq */
4049 /* On older processors, there is no separate ring
4050 * clock domain, so in order to boost the bandwidth
4051 * of the ring, we need to upclock the CPU (ia_freq).
4053 * For GPU frequencies less than 750MHz,
4054 * just use the lowest ring freq.
4056 if (gpu_freq
< min_freq
)
4059 ia_freq
= max_ia_freq
- ((diff
* scaling_factor
) / 2);
4060 ia_freq
= DIV_ROUND_CLOSEST(ia_freq
, 100);
4063 sandybridge_pcode_write(dev_priv
,
4064 GEN6_PCODE_WRITE_MIN_FREQ_TABLE
,
4065 ia_freq
<< GEN6_PCODE_FREQ_IA_RATIO_SHIFT
|
4066 ring_freq
<< GEN6_PCODE_FREQ_RING_RATIO_SHIFT
|
4071 void gen6_update_ring_freq(struct drm_device
*dev
)
4073 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4075 if (INTEL_INFO(dev
)->gen
< 6 || IS_VALLEYVIEW(dev
))
4078 mutex_lock(&dev_priv
->rps
.hw_lock
);
4079 __gen6_update_ring_freq(dev
);
4080 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4083 static int cherryview_rps_max_freq(struct drm_i915_private
*dev_priv
)
4087 val
= vlv_punit_read(dev_priv
, PUNIT_GPU_STATUS_REG
);
4088 rp0
= (val
>> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT
) & PUNIT_GPU_STATUS_MAX_FREQ_MASK
;
4093 static int cherryview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
4097 val
= vlv_punit_read(dev_priv
, PUNIT_GPU_DUTYCYCLE_REG
);
4098 rpe
= (val
>> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT
) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK
;
4103 static int cherryview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
4107 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
4108 rp1
= (val
>> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT
) & PUNIT_GPU_STATUS_MAX_FREQ_MASK
;
4113 static int cherryview_rps_min_freq(struct drm_i915_private
*dev_priv
)
4117 val
= vlv_punit_read(dev_priv
, PUNIT_GPU_STATUS_REG
);
4118 rpn
= (val
>> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT
) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK
;
4122 static int valleyview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
4126 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
4128 rp1
= (val
& FB_GFX_FGUARANTEED_FREQ_FUSE_MASK
) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT
;
4133 static int valleyview_rps_max_freq(struct drm_i915_private
*dev_priv
)
4137 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
4139 rp0
= (val
& FB_GFX_MAX_FREQ_FUSE_MASK
) >> FB_GFX_MAX_FREQ_FUSE_SHIFT
;
4141 rp0
= min_t(u32
, rp0
, 0xea);
4146 static int valleyview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
4150 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_LO
);
4151 rpe
= (val
& FB_FMAX_VMIN_FREQ_LO_MASK
) >> FB_FMAX_VMIN_FREQ_LO_SHIFT
;
4152 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_HI
);
4153 rpe
|= (val
& FB_FMAX_VMIN_FREQ_HI_MASK
) << 5;
4158 static int valleyview_rps_min_freq(struct drm_i915_private
*dev_priv
)
4160 return vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
) & 0xff;
4163 /* Check that the pctx buffer wasn't move under us. */
4164 static void valleyview_check_pctx(struct drm_i915_private
*dev_priv
)
4166 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
4168 WARN_ON(pctx_addr
!= dev_priv
->mm
.stolen_base
+
4169 dev_priv
->vlv_pctx
->stolen
->start
);
4173 /* Check that the pcbr address is not empty. */
4174 static void cherryview_check_pctx(struct drm_i915_private
*dev_priv
)
4176 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
4178 WARN_ON((pctx_addr
>> VLV_PCBR_ADDR_SHIFT
) == 0);
4181 static void cherryview_setup_pctx(struct drm_device
*dev
)
4183 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4184 unsigned long pctx_paddr
, paddr
;
4185 struct i915_gtt
*gtt
= &dev_priv
->gtt
;
4187 int pctx_size
= 32*1024;
4189 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
4191 pcbr
= I915_READ(VLV_PCBR
);
4192 if ((pcbr
>> VLV_PCBR_ADDR_SHIFT
) == 0) {
4193 paddr
= (dev_priv
->mm
.stolen_base
+
4194 (gtt
->stolen_size
- pctx_size
));
4196 pctx_paddr
= (paddr
& (~4095));
4197 I915_WRITE(VLV_PCBR
, pctx_paddr
);
4201 static void valleyview_setup_pctx(struct drm_device
*dev
)
4203 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4204 struct drm_i915_gem_object
*pctx
;
4205 unsigned long pctx_paddr
;
4207 int pctx_size
= 24*1024;
4209 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
4211 pcbr
= I915_READ(VLV_PCBR
);
4213 /* BIOS set it up already, grab the pre-alloc'd space */
4216 pcbr_offset
= (pcbr
& (~4095)) - dev_priv
->mm
.stolen_base
;
4217 pctx
= i915_gem_object_create_stolen_for_preallocated(dev_priv
->dev
,
4219 I915_GTT_OFFSET_NONE
,
4225 * From the Gunit register HAS:
4226 * The Gfx driver is expected to program this register and ensure
4227 * proper allocation within Gfx stolen memory. For example, this
4228 * register should be programmed such than the PCBR range does not
4229 * overlap with other ranges, such as the frame buffer, protected
4230 * memory, or any other relevant ranges.
4232 pctx
= i915_gem_object_create_stolen(dev
, pctx_size
);
4234 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
4238 pctx_paddr
= dev_priv
->mm
.stolen_base
+ pctx
->stolen
->start
;
4239 I915_WRITE(VLV_PCBR
, pctx_paddr
);
4242 dev_priv
->vlv_pctx
= pctx
;
4245 static void valleyview_cleanup_pctx(struct drm_device
*dev
)
4247 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4249 if (WARN_ON(!dev_priv
->vlv_pctx
))
4252 drm_gem_object_unreference(&dev_priv
->vlv_pctx
->base
);
4253 dev_priv
->vlv_pctx
= NULL
;
4256 static void valleyview_init_gt_powersave(struct drm_device
*dev
)
4258 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4261 valleyview_setup_pctx(dev
);
4263 mutex_lock(&dev_priv
->rps
.hw_lock
);
4265 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
4266 switch ((val
>> 6) & 3) {
4269 dev_priv
->mem_freq
= 800;
4272 dev_priv
->mem_freq
= 1066;
4275 dev_priv
->mem_freq
= 1333;
4278 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv
->mem_freq
);
4280 dev_priv
->rps
.max_freq
= valleyview_rps_max_freq(dev_priv
);
4281 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
4282 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4283 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
4284 dev_priv
->rps
.max_freq
);
4286 dev_priv
->rps
.efficient_freq
= valleyview_rps_rpe_freq(dev_priv
);
4287 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4288 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
4289 dev_priv
->rps
.efficient_freq
);
4291 dev_priv
->rps
.rp1_freq
= valleyview_rps_guar_freq(dev_priv
);
4292 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
4293 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
4294 dev_priv
->rps
.rp1_freq
);
4296 dev_priv
->rps
.min_freq
= valleyview_rps_min_freq(dev_priv
);
4297 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4298 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
4299 dev_priv
->rps
.min_freq
);
4301 /* Preserve min/max settings in case of re-init */
4302 if (dev_priv
->rps
.max_freq_softlimit
== 0)
4303 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
4305 if (dev_priv
->rps
.min_freq_softlimit
== 0)
4306 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
4308 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4311 static void cherryview_init_gt_powersave(struct drm_device
*dev
)
4313 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4316 cherryview_setup_pctx(dev
);
4318 mutex_lock(&dev_priv
->rps
.hw_lock
);
4320 val
= vlv_punit_read(dev_priv
, CCK_FUSE_REG
);
4321 switch ((val
>> 2) & 0x7) {
4324 dev_priv
->rps
.cz_freq
= 200;
4325 dev_priv
->mem_freq
= 1600;
4328 dev_priv
->rps
.cz_freq
= 267;
4329 dev_priv
->mem_freq
= 1600;
4332 dev_priv
->rps
.cz_freq
= 333;
4333 dev_priv
->mem_freq
= 2000;
4336 dev_priv
->rps
.cz_freq
= 320;
4337 dev_priv
->mem_freq
= 1600;
4340 dev_priv
->rps
.cz_freq
= 400;
4341 dev_priv
->mem_freq
= 1600;
4344 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv
->mem_freq
);
4346 dev_priv
->rps
.max_freq
= cherryview_rps_max_freq(dev_priv
);
4347 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
4348 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4349 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
4350 dev_priv
->rps
.max_freq
);
4352 dev_priv
->rps
.efficient_freq
= cherryview_rps_rpe_freq(dev_priv
);
4353 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4354 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
4355 dev_priv
->rps
.efficient_freq
);
4357 dev_priv
->rps
.rp1_freq
= cherryview_rps_guar_freq(dev_priv
);
4358 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
4359 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
4360 dev_priv
->rps
.rp1_freq
);
4362 dev_priv
->rps
.min_freq
= cherryview_rps_min_freq(dev_priv
);
4363 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4364 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
4365 dev_priv
->rps
.min_freq
);
4367 /* Preserve min/max settings in case of re-init */
4368 if (dev_priv
->rps
.max_freq_softlimit
== 0)
4369 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
4371 if (dev_priv
->rps
.min_freq_softlimit
== 0)
4372 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
4374 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4377 static void valleyview_cleanup_gt_powersave(struct drm_device
*dev
)
4379 valleyview_cleanup_pctx(dev
);
4382 static void cherryview_enable_rps(struct drm_device
*dev
)
4384 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4385 struct intel_engine_cs
*ring
;
4386 u32 gtfifodbg
, val
, rc6_mode
= 0, pcbr
;
4389 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4391 gtfifodbg
= I915_READ(GTFIFODBG
);
4393 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4395 I915_WRITE(GTFIFODBG
, gtfifodbg
);
4398 cherryview_check_pctx(dev_priv
);
4400 /* 1a & 1b: Get forcewake during program sequence. Although the driver
4401 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4402 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
4404 /* 2a: Program RC6 thresholds.*/
4405 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
4406 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
4407 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
4409 for_each_ring(ring
, dev_priv
, i
)
4410 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
4411 I915_WRITE(GEN6_RC_SLEEP
, 0);
4413 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000); /* 50/125ms per EI */
4415 /* allows RC6 residency counter to work */
4416 I915_WRITE(VLV_COUNTER_CONTROL
,
4417 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
|
4418 VLV_MEDIA_RC6_COUNT_EN
|
4419 VLV_RENDER_RC6_COUNT_EN
));
4421 /* For now we assume BIOS is allocating and populating the PCBR */
4422 pcbr
= I915_READ(VLV_PCBR
);
4424 DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr
);
4427 if ((intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
) &&
4428 (pcbr
>> VLV_PCBR_ADDR_SHIFT
))
4429 rc6_mode
= GEN6_RC_CTL_EI_MODE(1);
4431 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
4433 /* 4 Program defaults and thresholds for RPS*/
4434 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
4435 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
4436 I915_WRITE(GEN6_RP_UP_EI
, 66000);
4437 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
4439 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
4441 /* WaDisablePwrmtrEvent:chv (pre-production hw) */
4442 I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
4443 I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
4446 I915_WRITE(GEN6_RP_CONTROL
,
4447 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
4448 GEN6_RP_MEDIA_IS_GFX
| /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
4450 GEN6_RP_UP_BUSY_AVG
|
4451 GEN6_RP_DOWN_IDLE_AVG
);
4453 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
4455 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val
& 0x10 ? "yes" : "no");
4456 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
4458 dev_priv
->rps
.cur_freq
= (val
>> 8) & 0xff;
4459 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4460 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
4461 dev_priv
->rps
.cur_freq
);
4463 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4464 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
4465 dev_priv
->rps
.efficient_freq
);
4467 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.efficient_freq
);
4469 gen8_enable_rps_interrupts(dev
);
4471 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
4474 static void valleyview_enable_rps(struct drm_device
*dev
)
4476 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4477 struct intel_engine_cs
*ring
;
4478 u32 gtfifodbg
, val
, rc6_mode
= 0;
4481 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4483 valleyview_check_pctx(dev_priv
);
4485 if ((gtfifodbg
= I915_READ(GTFIFODBG
))) {
4486 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4488 I915_WRITE(GTFIFODBG
, gtfifodbg
);
4491 /* If VLV, Forcewake all wells, else re-direct to regular path */
4492 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
4494 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
4495 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
4496 I915_WRITE(GEN6_RP_UP_EI
, 66000);
4497 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
4499 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
4500 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 0xf4240);
4502 I915_WRITE(GEN6_RP_CONTROL
,
4503 GEN6_RP_MEDIA_TURBO
|
4504 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
4505 GEN6_RP_MEDIA_IS_GFX
|
4507 GEN6_RP_UP_BUSY_AVG
|
4508 GEN6_RP_DOWN_IDLE_CONT
);
4510 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 0x00280000);
4511 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
4512 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
4514 for_each_ring(ring
, dev_priv
, i
)
4515 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
4517 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x557);
4519 /* allows RC6 residency counter to work */
4520 I915_WRITE(VLV_COUNTER_CONTROL
,
4521 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN
|
4522 VLV_RENDER_RC0_COUNT_EN
|
4523 VLV_MEDIA_RC6_COUNT_EN
|
4524 VLV_RENDER_RC6_COUNT_EN
));
4526 if (intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
)
4527 rc6_mode
= GEN7_RC_CTL_TO_MODE
| VLV_RC_CTL_CTX_RST_PARALLEL
;
4529 intel_print_rc6_info(dev
, rc6_mode
);
4531 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
4533 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
4535 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val
& 0x10 ? "yes" : "no");
4536 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
4538 dev_priv
->rps
.cur_freq
= (val
>> 8) & 0xff;
4539 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4540 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
4541 dev_priv
->rps
.cur_freq
);
4543 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4544 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
4545 dev_priv
->rps
.efficient_freq
);
4547 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.efficient_freq
);
4549 gen6_enable_rps_interrupts(dev
);
4551 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
4554 void ironlake_teardown_rc6(struct drm_device
*dev
)
4556 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4558 if (dev_priv
->ips
.renderctx
) {
4559 i915_gem_object_ggtt_unpin(dev_priv
->ips
.renderctx
);
4560 drm_gem_object_unreference(&dev_priv
->ips
.renderctx
->base
);
4561 dev_priv
->ips
.renderctx
= NULL
;
4564 if (dev_priv
->ips
.pwrctx
) {
4565 i915_gem_object_ggtt_unpin(dev_priv
->ips
.pwrctx
);
4566 drm_gem_object_unreference(&dev_priv
->ips
.pwrctx
->base
);
4567 dev_priv
->ips
.pwrctx
= NULL
;
4571 static void ironlake_disable_rc6(struct drm_device
*dev
)
4573 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4575 if (I915_READ(PWRCTXA
)) {
4576 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
4577 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) | RCX_SW_EXIT
);
4578 wait_for(((I915_READ(RSTDBYCTL
) & RSX_STATUS_MASK
) == RSX_STATUS_ON
),
4581 I915_WRITE(PWRCTXA
, 0);
4582 POSTING_READ(PWRCTXA
);
4584 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) & ~RCX_SW_EXIT
);
4585 POSTING_READ(RSTDBYCTL
);
4589 static int ironlake_setup_rc6(struct drm_device
*dev
)
4591 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4593 if (dev_priv
->ips
.renderctx
== NULL
)
4594 dev_priv
->ips
.renderctx
= intel_alloc_context_page(dev
);
4595 if (!dev_priv
->ips
.renderctx
)
4598 if (dev_priv
->ips
.pwrctx
== NULL
)
4599 dev_priv
->ips
.pwrctx
= intel_alloc_context_page(dev
);
4600 if (!dev_priv
->ips
.pwrctx
) {
4601 ironlake_teardown_rc6(dev
);
4608 static void ironlake_enable_rc6(struct drm_device
*dev
)
4610 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4611 struct intel_engine_cs
*ring
= &dev_priv
->ring
[RCS
];
4612 bool was_interruptible
;
4615 /* rc6 disabled by default due to repeated reports of hanging during
4618 if (!intel_enable_rc6(dev
))
4621 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
4623 ret
= ironlake_setup_rc6(dev
);
4627 was_interruptible
= dev_priv
->mm
.interruptible
;
4628 dev_priv
->mm
.interruptible
= false;
4631 * GPU can automatically power down the render unit if given a page
4634 ret
= intel_ring_begin(ring
, 6);
4636 ironlake_teardown_rc6(dev
);
4637 dev_priv
->mm
.interruptible
= was_interruptible
;
4641 intel_ring_emit(ring
, MI_SUSPEND_FLUSH
| MI_SUSPEND_FLUSH_EN
);
4642 intel_ring_emit(ring
, MI_SET_CONTEXT
);
4643 intel_ring_emit(ring
, i915_gem_obj_ggtt_offset(dev_priv
->ips
.renderctx
) |
4645 MI_SAVE_EXT_STATE_EN
|
4646 MI_RESTORE_EXT_STATE_EN
|
4647 MI_RESTORE_INHIBIT
);
4648 intel_ring_emit(ring
, MI_SUSPEND_FLUSH
);
4649 intel_ring_emit(ring
, MI_NOOP
);
4650 intel_ring_emit(ring
, MI_FLUSH
);
4651 intel_ring_advance(ring
);
4654 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
4655 * does an implicit flush, combined with MI_FLUSH above, it should be
4656 * safe to assume that renderctx is valid
4658 ret
= intel_ring_idle(ring
);
4659 dev_priv
->mm
.interruptible
= was_interruptible
;
4661 DRM_ERROR("failed to enable ironlake power savings\n");
4662 ironlake_teardown_rc6(dev
);
4666 I915_WRITE(PWRCTXA
, i915_gem_obj_ggtt_offset(dev_priv
->ips
.pwrctx
) | PWRCTX_EN
);
4667 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) & ~RCX_SW_EXIT
);
4669 intel_print_rc6_info(dev
, GEN6_RC_CTL_RC6_ENABLE
);
4672 static unsigned long intel_pxfreq(u32 vidfreq
)
4675 int div
= (vidfreq
& 0x3f0000) >> 16;
4676 int post
= (vidfreq
& 0x3000) >> 12;
4677 int pre
= (vidfreq
& 0x7);
4682 freq
= ((div
* 133333) / ((1<<post
) * pre
));
4687 static const struct cparams
{
4693 { 1, 1333, 301, 28664 },
4694 { 1, 1066, 294, 24460 },
4695 { 1, 800, 294, 25192 },
4696 { 0, 1333, 276, 27605 },
4697 { 0, 1066, 276, 27605 },
4698 { 0, 800, 231, 23784 },
4701 static unsigned long __i915_chipset_val(struct drm_i915_private
*dev_priv
)
4703 u64 total_count
, diff
, ret
;
4704 u32 count1
, count2
, count3
, m
= 0, c
= 0;
4705 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
4708 assert_spin_locked(&mchdev_lock
);
4710 diff1
= now
- dev_priv
->ips
.last_time1
;
4712 /* Prevent division-by-zero if we are asking too fast.
4713 * Also, we don't get interesting results if we are polling
4714 * faster than once in 10ms, so just return the saved value
4718 return dev_priv
->ips
.chipset_power
;
4720 count1
= I915_READ(DMIEC
);
4721 count2
= I915_READ(DDREC
);
4722 count3
= I915_READ(CSIEC
);
4724 total_count
= count1
+ count2
+ count3
;
4726 /* FIXME: handle per-counter overflow */
4727 if (total_count
< dev_priv
->ips
.last_count1
) {
4728 diff
= ~0UL - dev_priv
->ips
.last_count1
;
4729 diff
+= total_count
;
4731 diff
= total_count
- dev_priv
->ips
.last_count1
;
4734 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
4735 if (cparams
[i
].i
== dev_priv
->ips
.c_m
&&
4736 cparams
[i
].t
== dev_priv
->ips
.r_t
) {
4743 diff
= div_u64(diff
, diff1
);
4744 ret
= ((m
* diff
) + c
);
4745 ret
= div_u64(ret
, 10);
4747 dev_priv
->ips
.last_count1
= total_count
;
4748 dev_priv
->ips
.last_time1
= now
;
4750 dev_priv
->ips
.chipset_power
= ret
;
4755 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
4757 struct drm_device
*dev
= dev_priv
->dev
;
4760 if (INTEL_INFO(dev
)->gen
!= 5)
4763 spin_lock_irq(&mchdev_lock
);
4765 val
= __i915_chipset_val(dev_priv
);
4767 spin_unlock_irq(&mchdev_lock
);
4772 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
4774 unsigned long m
, x
, b
;
4777 tsfs
= I915_READ(TSFS
);
4779 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
4780 x
= I915_READ8(TR1
);
4782 b
= tsfs
& TSFS_INTR_MASK
;
4784 return ((m
* x
) / 127) - b
;
4787 static u16
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
4789 struct drm_device
*dev
= dev_priv
->dev
;
4790 static const struct v_table
{
4791 u16 vd
; /* in .1 mil */
4792 u16 vm
; /* in .1 mil */
4923 if (INTEL_INFO(dev
)->is_mobile
)
4924 return v_table
[pxvid
].vm
;
4926 return v_table
[pxvid
].vd
;
4929 static void __i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
4931 u64 now
, diff
, diffms
;
4934 assert_spin_locked(&mchdev_lock
);
4936 now
= ktime_get_raw_ns();
4937 diffms
= now
- dev_priv
->ips
.last_time2
;
4938 do_div(diffms
, NSEC_PER_MSEC
);
4940 /* Don't divide by 0 */
4944 count
= I915_READ(GFXEC
);
4946 if (count
< dev_priv
->ips
.last_count2
) {
4947 diff
= ~0UL - dev_priv
->ips
.last_count2
;
4950 diff
= count
- dev_priv
->ips
.last_count2
;
4953 dev_priv
->ips
.last_count2
= count
;
4954 dev_priv
->ips
.last_time2
= now
;
4956 /* More magic constants... */
4958 diff
= div_u64(diff
, diffms
* 10);
4959 dev_priv
->ips
.gfx_power
= diff
;
4962 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
4964 struct drm_device
*dev
= dev_priv
->dev
;
4966 if (INTEL_INFO(dev
)->gen
!= 5)
4969 spin_lock_irq(&mchdev_lock
);
4971 __i915_update_gfx_val(dev_priv
);
4973 spin_unlock_irq(&mchdev_lock
);
4976 static unsigned long __i915_gfx_val(struct drm_i915_private
*dev_priv
)
4978 unsigned long t
, corr
, state1
, corr2
, state2
;
4981 assert_spin_locked(&mchdev_lock
);
4983 pxvid
= I915_READ(PXVFREQ_BASE
+ (dev_priv
->rps
.cur_freq
* 4));
4984 pxvid
= (pxvid
>> 24) & 0x7f;
4985 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
4989 t
= i915_mch_val(dev_priv
);
4991 /* Revel in the empirically derived constants */
4993 /* Correction factor in 1/100000 units */
4995 corr
= ((t
* 2349) + 135940);
4997 corr
= ((t
* 964) + 29317);
4999 corr
= ((t
* 301) + 1004);
5001 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
5003 corr2
= (corr
* dev_priv
->ips
.corr
);
5005 state2
= (corr2
* state1
) / 10000;
5006 state2
/= 100; /* convert to mW */
5008 __i915_update_gfx_val(dev_priv
);
5010 return dev_priv
->ips
.gfx_power
+ state2
;
5013 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
5015 struct drm_device
*dev
= dev_priv
->dev
;
5018 if (INTEL_INFO(dev
)->gen
!= 5)
5021 spin_lock_irq(&mchdev_lock
);
5023 val
= __i915_gfx_val(dev_priv
);
5025 spin_unlock_irq(&mchdev_lock
);
5031 * i915_read_mch_val - return value for IPS use
5033 * Calculate and return a value for the IPS driver to use when deciding whether
5034 * we have thermal and power headroom to increase CPU or GPU power budget.
5036 unsigned long i915_read_mch_val(void)
5038 struct drm_i915_private
*dev_priv
;
5039 unsigned long chipset_val
, graphics_val
, ret
= 0;
5041 spin_lock_irq(&mchdev_lock
);
5044 dev_priv
= i915_mch_dev
;
5046 chipset_val
= __i915_chipset_val(dev_priv
);
5047 graphics_val
= __i915_gfx_val(dev_priv
);
5049 ret
= chipset_val
+ graphics_val
;
5052 spin_unlock_irq(&mchdev_lock
);
5056 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
5059 * i915_gpu_raise - raise GPU frequency limit
5061 * Raise the limit; IPS indicates we have thermal headroom.
5063 bool i915_gpu_raise(void)
5065 struct drm_i915_private
*dev_priv
;
5068 spin_lock_irq(&mchdev_lock
);
5069 if (!i915_mch_dev
) {
5073 dev_priv
= i915_mch_dev
;
5075 if (dev_priv
->ips
.max_delay
> dev_priv
->ips
.fmax
)
5076 dev_priv
->ips
.max_delay
--;
5079 spin_unlock_irq(&mchdev_lock
);
5083 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
5086 * i915_gpu_lower - lower GPU frequency limit
5088 * IPS indicates we're close to a thermal limit, so throttle back the GPU
5089 * frequency maximum.
5091 bool i915_gpu_lower(void)
5093 struct drm_i915_private
*dev_priv
;
5096 spin_lock_irq(&mchdev_lock
);
5097 if (!i915_mch_dev
) {
5101 dev_priv
= i915_mch_dev
;
5103 if (dev_priv
->ips
.max_delay
< dev_priv
->ips
.min_delay
)
5104 dev_priv
->ips
.max_delay
++;
5107 spin_unlock_irq(&mchdev_lock
);
5111 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
5114 * i915_gpu_busy - indicate GPU business to IPS
5116 * Tell the IPS driver whether or not the GPU is busy.
5118 bool i915_gpu_busy(void)
5120 struct drm_i915_private
*dev_priv
;
5121 struct intel_engine_cs
*ring
;
5125 spin_lock_irq(&mchdev_lock
);
5128 dev_priv
= i915_mch_dev
;
5130 for_each_ring(ring
, dev_priv
, i
)
5131 ret
|= !list_empty(&ring
->request_list
);
5134 spin_unlock_irq(&mchdev_lock
);
5138 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
5141 * i915_gpu_turbo_disable - disable graphics turbo
5143 * Disable graphics turbo by resetting the max frequency and setting the
5144 * current frequency to the default.
5146 bool i915_gpu_turbo_disable(void)
5148 struct drm_i915_private
*dev_priv
;
5151 spin_lock_irq(&mchdev_lock
);
5152 if (!i915_mch_dev
) {
5156 dev_priv
= i915_mch_dev
;
5158 dev_priv
->ips
.max_delay
= dev_priv
->ips
.fstart
;
5160 if (!ironlake_set_drps(dev_priv
->dev
, dev_priv
->ips
.fstart
))
5164 spin_unlock_irq(&mchdev_lock
);
5168 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
5171 * Tells the intel_ips driver that the i915 driver is now loaded, if
5172 * IPS got loaded first.
5174 * This awkward dance is so that neither module has to depend on the
5175 * other in order for IPS to do the appropriate communication of
5176 * GPU turbo limits to i915.
5179 ips_ping_for_i915_load(void)
5183 link
= symbol_get(ips_link_to_i915_driver
);
5186 symbol_put(ips_link_to_i915_driver
);
5190 void intel_gpu_ips_init(struct drm_i915_private
*dev_priv
)
5192 /* We only register the i915 ips part with intel-ips once everything is
5193 * set up, to avoid intel-ips sneaking in and reading bogus values. */
5194 spin_lock_irq(&mchdev_lock
);
5195 i915_mch_dev
= dev_priv
;
5196 spin_unlock_irq(&mchdev_lock
);
5198 ips_ping_for_i915_load();
5201 void intel_gpu_ips_teardown(void)
5203 spin_lock_irq(&mchdev_lock
);
5204 i915_mch_dev
= NULL
;
5205 spin_unlock_irq(&mchdev_lock
);
5208 static void intel_init_emon(struct drm_device
*dev
)
5210 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5215 /* Disable to program */
5219 /* Program energy weights for various events */
5220 I915_WRITE(SDEW
, 0x15040d00);
5221 I915_WRITE(CSIEW0
, 0x007f0000);
5222 I915_WRITE(CSIEW1
, 0x1e220004);
5223 I915_WRITE(CSIEW2
, 0x04000004);
5225 for (i
= 0; i
< 5; i
++)
5226 I915_WRITE(PEW
+ (i
* 4), 0);
5227 for (i
= 0; i
< 3; i
++)
5228 I915_WRITE(DEW
+ (i
* 4), 0);
5230 /* Program P-state weights to account for frequency power adjustment */
5231 for (i
= 0; i
< 16; i
++) {
5232 u32 pxvidfreq
= I915_READ(PXVFREQ_BASE
+ (i
* 4));
5233 unsigned long freq
= intel_pxfreq(pxvidfreq
);
5234 unsigned long vid
= (pxvidfreq
& PXVFREQ_PX_MASK
) >>
5239 val
*= (freq
/ 1000);
5241 val
/= (127*127*900);
5243 DRM_ERROR("bad pxval: %ld\n", val
);
5246 /* Render standby states get 0 weight */
5250 for (i
= 0; i
< 4; i
++) {
5251 u32 val
= (pxw
[i
*4] << 24) | (pxw
[(i
*4)+1] << 16) |
5252 (pxw
[(i
*4)+2] << 8) | (pxw
[(i
*4)+3]);
5253 I915_WRITE(PXW
+ (i
* 4), val
);
5256 /* Adjust magic regs to magic values (more experimental results) */
5257 I915_WRITE(OGW0
, 0);
5258 I915_WRITE(OGW1
, 0);
5259 I915_WRITE(EG0
, 0x00007f00);
5260 I915_WRITE(EG1
, 0x0000000e);
5261 I915_WRITE(EG2
, 0x000e0000);
5262 I915_WRITE(EG3
, 0x68000300);
5263 I915_WRITE(EG4
, 0x42000000);
5264 I915_WRITE(EG5
, 0x00140031);
5268 for (i
= 0; i
< 8; i
++)
5269 I915_WRITE(PXWL
+ (i
* 4), 0);
5271 /* Enable PMON + select events */
5272 I915_WRITE(ECR
, 0x80000019);
5274 lcfuse
= I915_READ(LCFUSE02
);
5276 dev_priv
->ips
.corr
= (lcfuse
& LCFUSE_HIV_MASK
);
5279 void intel_init_gt_powersave(struct drm_device
*dev
)
5281 i915
.enable_rc6
= sanitize_rc6_option(dev
, i915
.enable_rc6
);
5283 if (IS_CHERRYVIEW(dev
))
5284 cherryview_init_gt_powersave(dev
);
5285 else if (IS_VALLEYVIEW(dev
))
5286 valleyview_init_gt_powersave(dev
);
5289 void intel_cleanup_gt_powersave(struct drm_device
*dev
)
5291 if (IS_CHERRYVIEW(dev
))
5293 else if (IS_VALLEYVIEW(dev
))
5294 valleyview_cleanup_gt_powersave(dev
);
5298 * intel_suspend_gt_powersave - suspend PM work and helper threads
5301 * We don't want to disable RC6 or other features here, we just want
5302 * to make sure any work we've queued has finished and won't bother
5303 * us while we're suspended.
5305 void intel_suspend_gt_powersave(struct drm_device
*dev
)
5307 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5309 /* Interrupts should be disabled already to avoid re-arming. */
5310 WARN_ON(intel_irqs_enabled(dev_priv
));
5312 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
5314 cancel_work_sync(&dev_priv
->rps
.work
);
5316 /* Force GPU to min freq during suspend */
5317 gen6_rps_idle(dev_priv
);
5320 void intel_disable_gt_powersave(struct drm_device
*dev
)
5322 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5324 /* Interrupts should be disabled already to avoid re-arming. */
5325 WARN_ON(intel_irqs_enabled(dev_priv
));
5327 if (IS_IRONLAKE_M(dev
)) {
5328 ironlake_disable_drps(dev
);
5329 ironlake_disable_rc6(dev
);
5330 } else if (INTEL_INFO(dev
)->gen
>= 6) {
5331 intel_suspend_gt_powersave(dev
);
5333 mutex_lock(&dev_priv
->rps
.hw_lock
);
5334 if (IS_CHERRYVIEW(dev
))
5335 cherryview_disable_rps(dev
);
5336 else if (IS_VALLEYVIEW(dev
))
5337 valleyview_disable_rps(dev
);
5339 gen6_disable_rps(dev
);
5340 dev_priv
->rps
.enabled
= false;
5341 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5345 static void intel_gen6_powersave_work(struct work_struct
*work
)
5347 struct drm_i915_private
*dev_priv
=
5348 container_of(work
, struct drm_i915_private
,
5349 rps
.delayed_resume_work
.work
);
5350 struct drm_device
*dev
= dev_priv
->dev
;
5352 dev_priv
->rps
.is_bdw_sw_turbo
= false;
5354 mutex_lock(&dev_priv
->rps
.hw_lock
);
5356 if (IS_CHERRYVIEW(dev
)) {
5357 cherryview_enable_rps(dev
);
5358 } else if (IS_VALLEYVIEW(dev
)) {
5359 valleyview_enable_rps(dev
);
5360 } else if (IS_BROADWELL(dev
)) {
5361 gen8_enable_rps(dev
);
5362 __gen6_update_ring_freq(dev
);
5364 gen6_enable_rps(dev
);
5365 __gen6_update_ring_freq(dev
);
5367 dev_priv
->rps
.enabled
= true;
5368 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5370 intel_runtime_pm_put(dev_priv
);
5373 void intel_enable_gt_powersave(struct drm_device
*dev
)
5375 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5377 if (IS_IRONLAKE_M(dev
)) {
5378 mutex_lock(&dev
->struct_mutex
);
5379 ironlake_enable_drps(dev
);
5380 ironlake_enable_rc6(dev
);
5381 intel_init_emon(dev
);
5382 mutex_unlock(&dev
->struct_mutex
);
5383 } else if (INTEL_INFO(dev
)->gen
>= 6) {
5385 * PCU communication is slow and this doesn't need to be
5386 * done at any specific time, so do this out of our fast path
5387 * to make resume and init faster.
5389 * We depend on the HW RC6 power context save/restore
5390 * mechanism when entering D3 through runtime PM suspend. So
5391 * disable RPM until RPS/RC6 is properly setup. We can only
5392 * get here via the driver load/system resume/runtime resume
5393 * paths, so the _noresume version is enough (and in case of
5394 * runtime resume it's necessary).
5396 if (schedule_delayed_work(&dev_priv
->rps
.delayed_resume_work
,
5397 round_jiffies_up_relative(HZ
)))
5398 intel_runtime_pm_get_noresume(dev_priv
);
5402 void intel_reset_gt_powersave(struct drm_device
*dev
)
5404 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5406 dev_priv
->rps
.enabled
= false;
5407 intel_enable_gt_powersave(dev
);
5410 static void ibx_init_clock_gating(struct drm_device
*dev
)
5412 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5415 * On Ibex Peak and Cougar Point, we need to disable clock
5416 * gating for the panel power sequencer or it will fail to
5417 * start up when no ports are active.
5419 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
5422 static void g4x_disable_trickle_feed(struct drm_device
*dev
)
5424 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5427 for_each_pipe(dev_priv
, pipe
) {
5428 I915_WRITE(DSPCNTR(pipe
),
5429 I915_READ(DSPCNTR(pipe
)) |
5430 DISPPLANE_TRICKLE_FEED_DISABLE
);
5431 intel_flush_primary_plane(dev_priv
, pipe
);
5435 static void ilk_init_lp_watermarks(struct drm_device
*dev
)
5437 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5439 I915_WRITE(WM3_LP_ILK
, I915_READ(WM3_LP_ILK
) & ~WM1_LP_SR_EN
);
5440 I915_WRITE(WM2_LP_ILK
, I915_READ(WM2_LP_ILK
) & ~WM1_LP_SR_EN
);
5441 I915_WRITE(WM1_LP_ILK
, I915_READ(WM1_LP_ILK
) & ~WM1_LP_SR_EN
);
5444 * Don't touch WM1S_LP_EN here.
5445 * Doing so could cause underruns.
5449 static void ironlake_init_clock_gating(struct drm_device
*dev
)
5451 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5452 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
5456 * WaFbcDisableDpfcClockGating:ilk
5458 dspclk_gate
|= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE
|
5459 ILK_DPFCUNIT_CLOCK_GATE_DISABLE
|
5460 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
;
5462 I915_WRITE(PCH_3DCGDIS0
,
5463 MARIUNIT_CLOCK_GATE_DISABLE
|
5464 SVSMUNIT_CLOCK_GATE_DISABLE
);
5465 I915_WRITE(PCH_3DCGDIS1
,
5466 VFMUNIT_CLOCK_GATE_DISABLE
);
5469 * According to the spec the following bits should be set in
5470 * order to enable memory self-refresh
5471 * The bit 22/21 of 0x42004
5472 * The bit 5 of 0x42020
5473 * The bit 15 of 0x45000
5475 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
5476 (I915_READ(ILK_DISPLAY_CHICKEN2
) |
5477 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
));
5478 dspclk_gate
|= ILK_DPARBUNIT_CLOCK_GATE_ENABLE
;
5479 I915_WRITE(DISP_ARB_CTL
,
5480 (I915_READ(DISP_ARB_CTL
) |
5483 ilk_init_lp_watermarks(dev
);
5486 * Based on the document from hardware guys the following bits
5487 * should be set unconditionally in order to enable FBC.
5488 * The bit 22 of 0x42000
5489 * The bit 22 of 0x42004
5490 * The bit 7,8,9 of 0x42020.
5492 if (IS_IRONLAKE_M(dev
)) {
5493 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
5494 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
5495 I915_READ(ILK_DISPLAY_CHICKEN1
) |
5497 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
5498 I915_READ(ILK_DISPLAY_CHICKEN2
) |
5502 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
5504 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
5505 I915_READ(ILK_DISPLAY_CHICKEN2
) |
5506 ILK_ELPIN_409_SELECT
);
5507 I915_WRITE(_3D_CHICKEN2
,
5508 _3D_CHICKEN2_WM_READ_PIPELINED
<< 16 |
5509 _3D_CHICKEN2_WM_READ_PIPELINED
);
5511 /* WaDisableRenderCachePipelinedFlush:ilk */
5512 I915_WRITE(CACHE_MODE_0
,
5513 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
5515 /* WaDisable_RenderCache_OperationalFlush:ilk */
5516 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
5518 g4x_disable_trickle_feed(dev
);
5520 ibx_init_clock_gating(dev
);
5523 static void cpt_init_clock_gating(struct drm_device
*dev
)
5525 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5530 * On Ibex Peak and Cougar Point, we need to disable clock
5531 * gating for the panel power sequencer or it will fail to
5532 * start up when no ports are active.
5534 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
|
5535 PCH_DPLUNIT_CLOCK_GATE_DISABLE
|
5536 PCH_CPUNIT_CLOCK_GATE_DISABLE
);
5537 I915_WRITE(SOUTH_CHICKEN2
, I915_READ(SOUTH_CHICKEN2
) |
5538 DPLS_EDP_PPS_FIX_DIS
);
5539 /* The below fixes the weird display corruption, a few pixels shifted
5540 * downward, on (only) LVDS of some HP laptops with IVY.
5542 for_each_pipe(dev_priv
, pipe
) {
5543 val
= I915_READ(TRANS_CHICKEN2(pipe
));
5544 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
5545 val
&= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
5546 if (dev_priv
->vbt
.fdi_rx_polarity_inverted
)
5547 val
|= TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
5548 val
&= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK
;
5549 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER
;
5550 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH
;
5551 I915_WRITE(TRANS_CHICKEN2(pipe
), val
);
5553 /* WADP0ClockGatingDisable */
5554 for_each_pipe(dev_priv
, pipe
) {
5555 I915_WRITE(TRANS_CHICKEN1(pipe
),
5556 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
5560 static void gen6_check_mch_setup(struct drm_device
*dev
)
5562 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5565 tmp
= I915_READ(MCH_SSKPD
);
5566 if ((tmp
& MCH_SSKPD_WM0_MASK
) != MCH_SSKPD_WM0_VAL
)
5567 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
5571 static void gen6_init_clock_gating(struct drm_device
*dev
)
5573 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5574 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
5576 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
5578 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
5579 I915_READ(ILK_DISPLAY_CHICKEN2
) |
5580 ILK_ELPIN_409_SELECT
);
5582 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
5583 I915_WRITE(_3D_CHICKEN
,
5584 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB
));
5586 /* WaSetupGtModeTdRowDispatch:snb */
5587 if (IS_SNB_GT1(dev
))
5588 I915_WRITE(GEN6_GT_MODE
,
5589 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE
));
5591 /* WaDisable_RenderCache_OperationalFlush:snb */
5592 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
5595 * BSpec recoomends 8x4 when MSAA is used,
5596 * however in practice 16x4 seems fastest.
5598 * Note that PS/WM thread counts depend on the WIZ hashing
5599 * disable bit, which we don't touch here, but it's good
5600 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5602 I915_WRITE(GEN6_GT_MODE
,
5603 GEN6_WIZ_HASHING_MASK
| GEN6_WIZ_HASHING_16x4
);
5605 ilk_init_lp_watermarks(dev
);
5607 I915_WRITE(CACHE_MODE_0
,
5608 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
5610 I915_WRITE(GEN6_UCGCTL1
,
5611 I915_READ(GEN6_UCGCTL1
) |
5612 GEN6_BLBUNIT_CLOCK_GATE_DISABLE
|
5613 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
5615 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5616 * gating disable must be set. Failure to set it results in
5617 * flickering pixels due to Z write ordering failures after
5618 * some amount of runtime in the Mesa "fire" demo, and Unigine
5619 * Sanctuary and Tropics, and apparently anything else with
5620 * alpha test or pixel discard.
5622 * According to the spec, bit 11 (RCCUNIT) must also be set,
5623 * but we didn't debug actual testcases to find it out.
5625 * WaDisableRCCUnitClockGating:snb
5626 * WaDisableRCPBUnitClockGating:snb
5628 I915_WRITE(GEN6_UCGCTL2
,
5629 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
5630 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
5632 /* WaStripsFansDisableFastClipPerformanceFix:snb */
5633 I915_WRITE(_3D_CHICKEN3
,
5634 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL
));
5638 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
5639 * 3DSTATE_SF number of SF output attributes is more than 16."
5641 I915_WRITE(_3D_CHICKEN3
,
5642 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH
));
5645 * According to the spec the following bits should be
5646 * set in order to enable memory self-refresh and fbc:
5647 * The bit21 and bit22 of 0x42000
5648 * The bit21 and bit22 of 0x42004
5649 * The bit5 and bit7 of 0x42020
5650 * The bit14 of 0x70180
5651 * The bit14 of 0x71180
5653 * WaFbcAsynchFlipDisableFbcQueue:snb
5655 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
5656 I915_READ(ILK_DISPLAY_CHICKEN1
) |
5657 ILK_FBCQ_DIS
| ILK_PABSTRETCH_DIS
);
5658 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
5659 I915_READ(ILK_DISPLAY_CHICKEN2
) |
5660 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
);
5661 I915_WRITE(ILK_DSPCLK_GATE_D
,
5662 I915_READ(ILK_DSPCLK_GATE_D
) |
5663 ILK_DPARBUNIT_CLOCK_GATE_ENABLE
|
5664 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
);
5666 g4x_disable_trickle_feed(dev
);
5668 cpt_init_clock_gating(dev
);
5670 gen6_check_mch_setup(dev
);
5673 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private
*dev_priv
)
5675 uint32_t reg
= I915_READ(GEN7_FF_THREAD_MODE
);
5678 * WaVSThreadDispatchOverride:ivb,vlv
5680 * This actually overrides the dispatch
5681 * mode for all thread types.
5683 reg
&= ~GEN7_FF_SCHED_MASK
;
5684 reg
|= GEN7_FF_TS_SCHED_HW
;
5685 reg
|= GEN7_FF_VS_SCHED_HW
;
5686 reg
|= GEN7_FF_DS_SCHED_HW
;
5688 I915_WRITE(GEN7_FF_THREAD_MODE
, reg
);
5691 static void lpt_init_clock_gating(struct drm_device
*dev
)
5693 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5696 * TODO: this bit should only be enabled when really needed, then
5697 * disabled when not needed anymore in order to save power.
5699 if (dev_priv
->pch_id
== INTEL_PCH_LPT_LP_DEVICE_ID_TYPE
)
5700 I915_WRITE(SOUTH_DSPCLK_GATE_D
,
5701 I915_READ(SOUTH_DSPCLK_GATE_D
) |
5702 PCH_LP_PARTITION_LEVEL_DISABLE
);
5704 /* WADPOClockGatingDisable:hsw */
5705 I915_WRITE(_TRANSA_CHICKEN1
,
5706 I915_READ(_TRANSA_CHICKEN1
) |
5707 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
5710 static void lpt_suspend_hw(struct drm_device
*dev
)
5712 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5714 if (dev_priv
->pch_id
== INTEL_PCH_LPT_LP_DEVICE_ID_TYPE
) {
5715 uint32_t val
= I915_READ(SOUTH_DSPCLK_GATE_D
);
5717 val
&= ~PCH_LP_PARTITION_LEVEL_DISABLE
;
5718 I915_WRITE(SOUTH_DSPCLK_GATE_D
, val
);
5722 static void broadwell_init_clock_gating(struct drm_device
*dev
)
5724 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5727 I915_WRITE(WM3_LP_ILK
, 0);
5728 I915_WRITE(WM2_LP_ILK
, 0);
5729 I915_WRITE(WM1_LP_ILK
, 0);
5731 /* FIXME(BDW): Check all the w/a, some might only apply to
5732 * pre-production hw. */
5735 I915_WRITE(GAMTARBMODE
, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE
));
5737 I915_WRITE(_3D_CHICKEN3
,
5738 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
5741 /* WaSwitchSolVfFArbitrationPriority:bdw */
5742 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
5744 /* WaPsrDPAMaskVBlankInSRD:bdw */
5745 I915_WRITE(CHICKEN_PAR1_1
,
5746 I915_READ(CHICKEN_PAR1_1
) | DPA_MASK_VBLANK_SRD
);
5748 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
5749 for_each_pipe(dev_priv
, pipe
) {
5750 I915_WRITE(CHICKEN_PIPESL_1(pipe
),
5751 I915_READ(CHICKEN_PIPESL_1(pipe
)) |
5752 BDW_DPRS_MASK_VBLANK_SRD
);
5755 /* WaVSRefCountFullforceMissDisable:bdw */
5756 /* WaDSRefCountFullforceMissDisable:bdw */
5757 I915_WRITE(GEN7_FF_THREAD_MODE
,
5758 I915_READ(GEN7_FF_THREAD_MODE
) &
5759 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
5761 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
5762 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
5764 /* WaDisableSDEUnitClockGating:bdw */
5765 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
5766 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
5768 lpt_init_clock_gating(dev
);
5771 static void haswell_init_clock_gating(struct drm_device
*dev
)
5773 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5775 ilk_init_lp_watermarks(dev
);
5777 /* L3 caching of data atomics doesn't work -- disable it. */
5778 I915_WRITE(HSW_SCRATCH1
, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE
);
5779 I915_WRITE(HSW_ROW_CHICKEN3
,
5780 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
));
5782 /* This is required by WaCatErrorRejectionIssue:hsw */
5783 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
5784 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
5785 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
5787 /* WaVSRefCountFullforceMissDisable:hsw */
5788 I915_WRITE(GEN7_FF_THREAD_MODE
,
5789 I915_READ(GEN7_FF_THREAD_MODE
) & ~GEN7_FF_VS_REF_CNT_FFME
);
5791 /* WaDisable_RenderCache_OperationalFlush:hsw */
5792 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
5794 /* enable HiZ Raw Stall Optimization */
5795 I915_WRITE(CACHE_MODE_0_GEN7
,
5796 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
5798 /* WaDisable4x2SubspanOptimization:hsw */
5799 I915_WRITE(CACHE_MODE_1
,
5800 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
5803 * BSpec recommends 8x4 when MSAA is used,
5804 * however in practice 16x4 seems fastest.
5806 * Note that PS/WM thread counts depend on the WIZ hashing
5807 * disable bit, which we don't touch here, but it's good
5808 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5810 I915_WRITE(GEN7_GT_MODE
,
5811 GEN6_WIZ_HASHING_MASK
| GEN6_WIZ_HASHING_16x4
);
5813 /* WaSwitchSolVfFArbitrationPriority:hsw */
5814 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
5816 /* WaRsPkgCStateDisplayPMReq:hsw */
5817 I915_WRITE(CHICKEN_PAR1_1
,
5818 I915_READ(CHICKEN_PAR1_1
) | FORCE_ARB_IDLE_PLANES
);
5820 lpt_init_clock_gating(dev
);
5823 static void ivybridge_init_clock_gating(struct drm_device
*dev
)
5825 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5828 ilk_init_lp_watermarks(dev
);
5830 I915_WRITE(ILK_DSPCLK_GATE_D
, ILK_VRHUNIT_CLOCK_GATE_DISABLE
);
5832 /* WaDisableEarlyCull:ivb */
5833 I915_WRITE(_3D_CHICKEN3
,
5834 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
5836 /* WaDisableBackToBackFlipFix:ivb */
5837 I915_WRITE(IVB_CHICKEN3
,
5838 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
5839 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
5841 /* WaDisablePSDDualDispatchEnable:ivb */
5842 if (IS_IVB_GT1(dev
))
5843 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
5844 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
5846 /* WaDisable_RenderCache_OperationalFlush:ivb */
5847 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
5849 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
5850 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
5851 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
5853 /* WaApplyL3ControlAndL3ChickenMode:ivb */
5854 I915_WRITE(GEN7_L3CNTLREG1
,
5855 GEN7_WA_FOR_GEN7_L3_CONTROL
);
5856 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
5857 GEN7_WA_L3_CHICKEN_MODE
);
5858 if (IS_IVB_GT1(dev
))
5859 I915_WRITE(GEN7_ROW_CHICKEN2
,
5860 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
5862 /* must write both registers */
5863 I915_WRITE(GEN7_ROW_CHICKEN2
,
5864 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
5865 I915_WRITE(GEN7_ROW_CHICKEN2_GT2
,
5866 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
5869 /* WaForceL3Serialization:ivb */
5870 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
5871 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
5874 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5875 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
5877 I915_WRITE(GEN6_UCGCTL2
,
5878 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
5880 /* This is required by WaCatErrorRejectionIssue:ivb */
5881 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
5882 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
5883 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
5885 g4x_disable_trickle_feed(dev
);
5887 gen7_setup_fixed_func_scheduler(dev_priv
);
5889 if (0) { /* causes HiZ corruption on ivb:gt1 */
5890 /* enable HiZ Raw Stall Optimization */
5891 I915_WRITE(CACHE_MODE_0_GEN7
,
5892 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
5895 /* WaDisable4x2SubspanOptimization:ivb */
5896 I915_WRITE(CACHE_MODE_1
,
5897 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
5900 * BSpec recommends 8x4 when MSAA is used,
5901 * however in practice 16x4 seems fastest.
5903 * Note that PS/WM thread counts depend on the WIZ hashing
5904 * disable bit, which we don't touch here, but it's good
5905 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5907 I915_WRITE(GEN7_GT_MODE
,
5908 GEN6_WIZ_HASHING_MASK
| GEN6_WIZ_HASHING_16x4
);
5910 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
5911 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
5912 snpcr
|= GEN6_MBC_SNPCR_MED
;
5913 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
5915 if (!HAS_PCH_NOP(dev
))
5916 cpt_init_clock_gating(dev
);
5918 gen6_check_mch_setup(dev
);
5921 static void valleyview_init_clock_gating(struct drm_device
*dev
)
5923 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5925 I915_WRITE(DSPCLK_GATE_D
, VRHUNIT_CLOCK_GATE_DISABLE
);
5927 /* WaDisableEarlyCull:vlv */
5928 I915_WRITE(_3D_CHICKEN3
,
5929 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
5931 /* WaDisableBackToBackFlipFix:vlv */
5932 I915_WRITE(IVB_CHICKEN3
,
5933 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
5934 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
5936 /* WaPsdDispatchEnable:vlv */
5937 /* WaDisablePSDDualDispatchEnable:vlv */
5938 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
5939 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP
|
5940 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
5942 /* WaDisable_RenderCache_OperationalFlush:vlv */
5943 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
5945 /* WaForceL3Serialization:vlv */
5946 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
5947 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
5949 /* WaDisableDopClockGating:vlv */
5950 I915_WRITE(GEN7_ROW_CHICKEN2
,
5951 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
5953 /* This is required by WaCatErrorRejectionIssue:vlv */
5954 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
5955 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
5956 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
5958 gen7_setup_fixed_func_scheduler(dev_priv
);
5961 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5962 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
5964 I915_WRITE(GEN6_UCGCTL2
,
5965 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
5967 /* WaDisableL3Bank2xClockGate:vlv
5968 * Disabling L3 clock gating- MMIO 940c[25] = 1
5969 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
5970 I915_WRITE(GEN7_UCGCTL4
,
5971 I915_READ(GEN7_UCGCTL4
) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE
);
5973 I915_WRITE(MI_ARB_VLV
, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
);
5976 * BSpec says this must be set, even though
5977 * WaDisable4x2SubspanOptimization isn't listed for VLV.
5979 I915_WRITE(CACHE_MODE_1
,
5980 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
5983 * WaIncreaseL3CreditsForVLVB0:vlv
5984 * This is the hardware default actually.
5986 I915_WRITE(GEN7_L3SQCREG1
, VLV_B0_WA_L3SQCREG1_VALUE
);
5989 * WaDisableVLVClockGating_VBIIssue:vlv
5990 * Disable clock gating on th GCFG unit to prevent a delay
5991 * in the reporting of vblank events.
5993 I915_WRITE(VLV_GUNIT_CLOCK_GATE
, GCFG_DIS
);
5996 static void cherryview_init_clock_gating(struct drm_device
*dev
)
5998 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6000 I915_WRITE(DSPCLK_GATE_D
, VRHUNIT_CLOCK_GATE_DISABLE
);
6002 I915_WRITE(MI_ARB_VLV
, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
);
6004 /* WaDisablePartialInstShootdown:chv */
6005 I915_WRITE(GEN8_ROW_CHICKEN
,
6006 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
));
6008 /* WaDisableThreadStallDopClockGating:chv */
6009 I915_WRITE(GEN8_ROW_CHICKEN
,
6010 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE
));
6012 /* WaVSRefCountFullforceMissDisable:chv */
6013 /* WaDSRefCountFullforceMissDisable:chv */
6014 I915_WRITE(GEN7_FF_THREAD_MODE
,
6015 I915_READ(GEN7_FF_THREAD_MODE
) &
6016 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
6018 /* WaDisableSemaphoreAndSyncFlipWait:chv */
6019 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
6020 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
6022 /* WaDisableCSUnitClockGating:chv */
6023 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
6024 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
6026 /* WaDisableSDEUnitClockGating:chv */
6027 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
6028 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
6030 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
6031 I915_WRITE(HALF_SLICE_CHICKEN3
,
6032 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS
));
6034 /* WaDisableGunitClockGating:chv (pre-production hw) */
6035 I915_WRITE(VLV_GUNIT_CLOCK_GATE
, I915_READ(VLV_GUNIT_CLOCK_GATE
) |
6038 /* WaDisableFfDopClockGating:chv (pre-production hw) */
6039 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
6040 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE
));
6042 /* WaDisableDopClockGating:chv (pre-production hw) */
6043 I915_WRITE(GEN7_ROW_CHICKEN2
,
6044 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6045 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
6046 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE
);
6049 static void g4x_init_clock_gating(struct drm_device
*dev
)
6051 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6052 uint32_t dspclk_gate
;
6054 I915_WRITE(RENCLK_GATE_D1
, 0);
6055 I915_WRITE(RENCLK_GATE_D2
, VF_UNIT_CLOCK_GATE_DISABLE
|
6056 GS_UNIT_CLOCK_GATE_DISABLE
|
6057 CL_UNIT_CLOCK_GATE_DISABLE
);
6058 I915_WRITE(RAMCLK_GATE_D
, 0);
6059 dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
|
6060 OVRUNIT_CLOCK_GATE_DISABLE
|
6061 OVCUNIT_CLOCK_GATE_DISABLE
;
6063 dspclk_gate
|= DSSUNIT_CLOCK_GATE_DISABLE
;
6064 I915_WRITE(DSPCLK_GATE_D
, dspclk_gate
);
6066 /* WaDisableRenderCachePipelinedFlush */
6067 I915_WRITE(CACHE_MODE_0
,
6068 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
6070 /* WaDisable_RenderCache_OperationalFlush:g4x */
6071 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6073 g4x_disable_trickle_feed(dev
);
6076 static void crestline_init_clock_gating(struct drm_device
*dev
)
6078 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6080 I915_WRITE(RENCLK_GATE_D1
, I965_RCC_CLOCK_GATE_DISABLE
);
6081 I915_WRITE(RENCLK_GATE_D2
, 0);
6082 I915_WRITE(DSPCLK_GATE_D
, 0);
6083 I915_WRITE(RAMCLK_GATE_D
, 0);
6084 I915_WRITE16(DEUC
, 0);
6085 I915_WRITE(MI_ARB_STATE
,
6086 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
6088 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6089 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6092 static void broadwater_init_clock_gating(struct drm_device
*dev
)
6094 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6096 I915_WRITE(RENCLK_GATE_D1
, I965_RCZ_CLOCK_GATE_DISABLE
|
6097 I965_RCC_CLOCK_GATE_DISABLE
|
6098 I965_RCPB_CLOCK_GATE_DISABLE
|
6099 I965_ISC_CLOCK_GATE_DISABLE
|
6100 I965_FBC_CLOCK_GATE_DISABLE
);
6101 I915_WRITE(RENCLK_GATE_D2
, 0);
6102 I915_WRITE(MI_ARB_STATE
,
6103 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
6105 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6106 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6109 static void gen3_init_clock_gating(struct drm_device
*dev
)
6111 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6112 u32 dstate
= I915_READ(D_STATE
);
6114 dstate
|= DSTATE_PLL_D3_OFF
| DSTATE_GFX_CLOCK_GATING
|
6115 DSTATE_DOT_CLOCK_GATING
;
6116 I915_WRITE(D_STATE
, dstate
);
6118 if (IS_PINEVIEW(dev
))
6119 I915_WRITE(ECOSKPD
, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY
));
6121 /* IIR "flip pending" means done if this bit is set */
6122 I915_WRITE(ECOSKPD
, _MASKED_BIT_DISABLE(ECO_FLIP_DONE
));
6124 /* interrupts should cause a wake up from C3 */
6125 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN
));
6127 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
6128 I915_WRITE(MI_ARB_STATE
, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE
));
6131 static void i85x_init_clock_gating(struct drm_device
*dev
)
6133 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6135 I915_WRITE(RENCLK_GATE_D1
, SV_CLOCK_GATE_DISABLE
);
6137 /* interrupts should cause a wake up from C3 */
6138 I915_WRITE(MI_STATE
, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN
) |
6139 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE
));
6142 static void i830_init_clock_gating(struct drm_device
*dev
)
6144 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6146 I915_WRITE(DSPCLK_GATE_D
, OVRUNIT_CLOCK_GATE_DISABLE
);
6149 void intel_init_clock_gating(struct drm_device
*dev
)
6151 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6153 dev_priv
->display
.init_clock_gating(dev
);
6156 void intel_suspend_hw(struct drm_device
*dev
)
6158 if (HAS_PCH_LPT(dev
))
6159 lpt_suspend_hw(dev
);
6162 #define for_each_power_well(i, power_well, domain_mask, power_domains) \
6164 i < (power_domains)->power_well_count && \
6165 ((power_well) = &(power_domains)->power_wells[i]); \
6167 if ((power_well)->domains & (domain_mask))
6169 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
6170 for (i = (power_domains)->power_well_count - 1; \
6171 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
6173 if ((power_well)->domains & (domain_mask))
6176 * We should only use the power well if we explicitly asked the hardware to
6177 * enable it, so check if it's enabled and also check if we've requested it to
6180 static bool hsw_power_well_enabled(struct drm_i915_private
*dev_priv
,
6181 struct i915_power_well
*power_well
)
6183 return I915_READ(HSW_PWR_WELL_DRIVER
) ==
6184 (HSW_PWR_WELL_ENABLE_REQUEST
| HSW_PWR_WELL_STATE_ENABLED
);
6187 bool intel_display_power_enabled_unlocked(struct drm_i915_private
*dev_priv
,
6188 enum intel_display_power_domain domain
)
6190 struct i915_power_domains
*power_domains
;
6191 struct i915_power_well
*power_well
;
6195 if (dev_priv
->pm
.suspended
)
6198 power_domains
= &dev_priv
->power_domains
;
6202 for_each_power_well_rev(i
, power_well
, BIT(domain
), power_domains
) {
6203 if (power_well
->always_on
)
6206 if (!power_well
->hw_enabled
) {
6215 bool intel_display_power_enabled(struct drm_i915_private
*dev_priv
,
6216 enum intel_display_power_domain domain
)
6218 struct i915_power_domains
*power_domains
;
6221 power_domains
= &dev_priv
->power_domains
;
6223 mutex_lock(&power_domains
->lock
);
6224 ret
= intel_display_power_enabled_unlocked(dev_priv
, domain
);
6225 mutex_unlock(&power_domains
->lock
);
6231 * Starting with Haswell, we have a "Power Down Well" that can be turned off
6232 * when not needed anymore. We have 4 registers that can request the power well
6233 * to be enabled, and it will only be disabled if none of the registers is
6234 * requesting it to be enabled.
6236 static void hsw_power_well_post_enable(struct drm_i915_private
*dev_priv
)
6238 struct drm_device
*dev
= dev_priv
->dev
;
6241 * After we re-enable the power well, if we touch VGA register 0x3d5
6242 * we'll get unclaimed register interrupts. This stops after we write
6243 * anything to the VGA MSR register. The vgacon module uses this
6244 * register all the time, so if we unbind our driver and, as a
6245 * consequence, bind vgacon, we'll get stuck in an infinite loop at
6246 * console_unlock(). So make here we touch the VGA MSR register, making
6247 * sure vgacon can keep working normally without triggering interrupts
6248 * and error messages.
6250 vga_get_uninterruptible(dev
->pdev
, VGA_RSRC_LEGACY_IO
);
6251 outb(inb(VGA_MSR_READ
), VGA_MSR_WRITE
);
6252 vga_put(dev
->pdev
, VGA_RSRC_LEGACY_IO
);
6254 if (IS_BROADWELL(dev
))
6255 gen8_irq_power_well_post_enable(dev_priv
);
6258 static void hsw_set_power_well(struct drm_i915_private
*dev_priv
,
6259 struct i915_power_well
*power_well
, bool enable
)
6261 bool is_enabled
, enable_requested
;
6264 tmp
= I915_READ(HSW_PWR_WELL_DRIVER
);
6265 is_enabled
= tmp
& HSW_PWR_WELL_STATE_ENABLED
;
6266 enable_requested
= tmp
& HSW_PWR_WELL_ENABLE_REQUEST
;
6269 if (!enable_requested
)
6270 I915_WRITE(HSW_PWR_WELL_DRIVER
,
6271 HSW_PWR_WELL_ENABLE_REQUEST
);
6274 DRM_DEBUG_KMS("Enabling power well\n");
6275 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER
) &
6276 HSW_PWR_WELL_STATE_ENABLED
), 20))
6277 DRM_ERROR("Timeout enabling power well\n");
6280 hsw_power_well_post_enable(dev_priv
);
6282 if (enable_requested
) {
6283 I915_WRITE(HSW_PWR_WELL_DRIVER
, 0);
6284 POSTING_READ(HSW_PWR_WELL_DRIVER
);
6285 DRM_DEBUG_KMS("Requesting to disable the power well\n");
6290 static void hsw_power_well_sync_hw(struct drm_i915_private
*dev_priv
,
6291 struct i915_power_well
*power_well
)
6293 hsw_set_power_well(dev_priv
, power_well
, power_well
->count
> 0);
6296 * We're taking over the BIOS, so clear any requests made by it since
6297 * the driver is in charge now.
6299 if (I915_READ(HSW_PWR_WELL_BIOS
) & HSW_PWR_WELL_ENABLE_REQUEST
)
6300 I915_WRITE(HSW_PWR_WELL_BIOS
, 0);
6303 static void hsw_power_well_enable(struct drm_i915_private
*dev_priv
,
6304 struct i915_power_well
*power_well
)
6306 hsw_set_power_well(dev_priv
, power_well
, true);
6309 static void hsw_power_well_disable(struct drm_i915_private
*dev_priv
,
6310 struct i915_power_well
*power_well
)
6312 hsw_set_power_well(dev_priv
, power_well
, false);
6315 static void i9xx_always_on_power_well_noop(struct drm_i915_private
*dev_priv
,
6316 struct i915_power_well
*power_well
)
6320 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private
*dev_priv
,
6321 struct i915_power_well
*power_well
)
6326 static void vlv_set_power_well(struct drm_i915_private
*dev_priv
,
6327 struct i915_power_well
*power_well
, bool enable
)
6329 enum punit_power_well power_well_id
= power_well
->data
;
6334 mask
= PUNIT_PWRGT_MASK(power_well_id
);
6335 state
= enable
? PUNIT_PWRGT_PWR_ON(power_well_id
) :
6336 PUNIT_PWRGT_PWR_GATE(power_well_id
);
6338 mutex_lock(&dev_priv
->rps
.hw_lock
);
6341 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
6346 ctrl
= vlv_punit_read(dev_priv
, PUNIT_REG_PWRGT_CTRL
);
6349 vlv_punit_write(dev_priv
, PUNIT_REG_PWRGT_CTRL
, ctrl
);
6351 if (wait_for(COND
, 100))
6352 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6354 vlv_punit_read(dev_priv
, PUNIT_REG_PWRGT_CTRL
));
6359 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6362 static void vlv_power_well_sync_hw(struct drm_i915_private
*dev_priv
,
6363 struct i915_power_well
*power_well
)
6365 vlv_set_power_well(dev_priv
, power_well
, power_well
->count
> 0);
6368 static void vlv_power_well_enable(struct drm_i915_private
*dev_priv
,
6369 struct i915_power_well
*power_well
)
6371 vlv_set_power_well(dev_priv
, power_well
, true);
6374 static void vlv_power_well_disable(struct drm_i915_private
*dev_priv
,
6375 struct i915_power_well
*power_well
)
6377 vlv_set_power_well(dev_priv
, power_well
, false);
6380 static bool vlv_power_well_enabled(struct drm_i915_private
*dev_priv
,
6381 struct i915_power_well
*power_well
)
6383 int power_well_id
= power_well
->data
;
6384 bool enabled
= false;
6389 mask
= PUNIT_PWRGT_MASK(power_well_id
);
6390 ctrl
= PUNIT_PWRGT_PWR_ON(power_well_id
);
6392 mutex_lock(&dev_priv
->rps
.hw_lock
);
6394 state
= vlv_punit_read(dev_priv
, PUNIT_REG_PWRGT_STATUS
) & mask
;
6396 * We only ever set the power-on and power-gate states, anything
6397 * else is unexpected.
6399 WARN_ON(state
!= PUNIT_PWRGT_PWR_ON(power_well_id
) &&
6400 state
!= PUNIT_PWRGT_PWR_GATE(power_well_id
));
6405 * A transient state at this point would mean some unexpected party
6406 * is poking at the power controls too.
6408 ctrl
= vlv_punit_read(dev_priv
, PUNIT_REG_PWRGT_CTRL
) & mask
;
6409 WARN_ON(ctrl
!= state
);
6411 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6416 static void vlv_display_power_well_enable(struct drm_i915_private
*dev_priv
,
6417 struct i915_power_well
*power_well
)
6419 WARN_ON_ONCE(power_well
->data
!= PUNIT_POWER_WELL_DISP2D
);
6421 vlv_set_power_well(dev_priv
, power_well
, true);
6423 spin_lock_irq(&dev_priv
->irq_lock
);
6424 valleyview_enable_display_irqs(dev_priv
);
6425 spin_unlock_irq(&dev_priv
->irq_lock
);
6428 * During driver initialization/resume we can avoid restoring the
6429 * part of the HW/SW state that will be inited anyway explicitly.
6431 if (dev_priv
->power_domains
.initializing
)
6434 intel_hpd_init(dev_priv
->dev
);
6436 i915_redisable_vga_power_on(dev_priv
->dev
);
6439 static void vlv_display_power_well_disable(struct drm_i915_private
*dev_priv
,
6440 struct i915_power_well
*power_well
)
6442 WARN_ON_ONCE(power_well
->data
!= PUNIT_POWER_WELL_DISP2D
);
6444 spin_lock_irq(&dev_priv
->irq_lock
);
6445 valleyview_disable_display_irqs(dev_priv
);
6446 spin_unlock_irq(&dev_priv
->irq_lock
);
6448 vlv_set_power_well(dev_priv
, power_well
, false);
6451 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private
*dev_priv
,
6452 struct i915_power_well
*power_well
)
6454 WARN_ON_ONCE(power_well
->data
!= PUNIT_POWER_WELL_DPIO_CMN_BC
);
6457 * Enable the CRI clock source so we can get at the
6458 * display and the reference clock for VGA
6459 * hotplug / manual detection.
6461 I915_WRITE(DPLL(PIPE_B
), I915_READ(DPLL(PIPE_B
)) |
6462 DPLL_REFA_CLK_ENABLE_VLV
| DPLL_INTEGRATED_CRI_CLK_VLV
);
6463 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6465 vlv_set_power_well(dev_priv
, power_well
, true);
6468 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
6469 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
6470 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
6471 * b. The other bits such as sfr settings / modesel may all
6474 * This should only be done on init and resume from S3 with
6475 * both PLLs disabled, or we risk losing DPIO and PLL
6478 I915_WRITE(DPIO_CTL
, I915_READ(DPIO_CTL
) | DPIO_CMNRST
);
6481 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private
*dev_priv
,
6482 struct i915_power_well
*power_well
)
6486 WARN_ON_ONCE(power_well
->data
!= PUNIT_POWER_WELL_DPIO_CMN_BC
);
6488 for_each_pipe(dev_priv
, pipe
)
6489 assert_pll_disabled(dev_priv
, pipe
);
6491 /* Assert common reset */
6492 I915_WRITE(DPIO_CTL
, I915_READ(DPIO_CTL
) & ~DPIO_CMNRST
);
6494 vlv_set_power_well(dev_priv
, power_well
, false);
6497 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private
*dev_priv
,
6498 struct i915_power_well
*power_well
)
6502 WARN_ON_ONCE(power_well
->data
!= PUNIT_POWER_WELL_DPIO_CMN_BC
&&
6503 power_well
->data
!= PUNIT_POWER_WELL_DPIO_CMN_D
);
6506 * Enable the CRI clock source so we can get at the
6507 * display and the reference clock for VGA
6508 * hotplug / manual detection.
6510 if (power_well
->data
== PUNIT_POWER_WELL_DPIO_CMN_BC
) {
6512 I915_WRITE(DPLL(PIPE_B
), I915_READ(DPLL(PIPE_B
)) |
6513 DPLL_REFA_CLK_ENABLE_VLV
);
6514 I915_WRITE(DPLL(PIPE_B
), I915_READ(DPLL(PIPE_B
)) |
6515 DPLL_REFA_CLK_ENABLE_VLV
| DPLL_INTEGRATED_CRI_CLK_VLV
);
6518 I915_WRITE(DPLL(PIPE_C
), I915_READ(DPLL(PIPE_C
)) |
6519 DPLL_REFA_CLK_ENABLE_VLV
| DPLL_INTEGRATED_CRI_CLK_VLV
);
6521 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6522 vlv_set_power_well(dev_priv
, power_well
, true);
6524 /* Poll for phypwrgood signal */
6525 if (wait_for(I915_READ(DISPLAY_PHY_STATUS
) & PHY_POWERGOOD(phy
), 1))
6526 DRM_ERROR("Display PHY %d is not power up\n", phy
);
6528 I915_WRITE(DISPLAY_PHY_CONTROL
, I915_READ(DISPLAY_PHY_CONTROL
) |
6529 PHY_COM_LANE_RESET_DEASSERT(phy
));
6532 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private
*dev_priv
,
6533 struct i915_power_well
*power_well
)
6537 WARN_ON_ONCE(power_well
->data
!= PUNIT_POWER_WELL_DPIO_CMN_BC
&&
6538 power_well
->data
!= PUNIT_POWER_WELL_DPIO_CMN_D
);
6540 if (power_well
->data
== PUNIT_POWER_WELL_DPIO_CMN_BC
) {
6542 assert_pll_disabled(dev_priv
, PIPE_A
);
6543 assert_pll_disabled(dev_priv
, PIPE_B
);
6546 assert_pll_disabled(dev_priv
, PIPE_C
);
6549 I915_WRITE(DISPLAY_PHY_CONTROL
, I915_READ(DISPLAY_PHY_CONTROL
) &
6550 ~PHY_COM_LANE_RESET_DEASSERT(phy
));
6552 vlv_set_power_well(dev_priv
, power_well
, false);
6555 static bool chv_pipe_power_well_enabled(struct drm_i915_private
*dev_priv
,
6556 struct i915_power_well
*power_well
)
6558 enum pipe pipe
= power_well
->data
;
6562 mutex_lock(&dev_priv
->rps
.hw_lock
);
6564 state
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
) & DP_SSS_MASK(pipe
);
6566 * We only ever set the power-on and power-gate states, anything
6567 * else is unexpected.
6569 WARN_ON(state
!= DP_SSS_PWR_ON(pipe
) && state
!= DP_SSS_PWR_GATE(pipe
));
6570 enabled
= state
== DP_SSS_PWR_ON(pipe
);
6573 * A transient state at this point would mean some unexpected party
6574 * is poking at the power controls too.
6576 ctrl
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
) & DP_SSC_MASK(pipe
);
6577 WARN_ON(ctrl
<< 16 != state
);
6579 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6584 static void chv_set_pipe_power_well(struct drm_i915_private
*dev_priv
,
6585 struct i915_power_well
*power_well
,
6588 enum pipe pipe
= power_well
->data
;
6592 state
= enable
? DP_SSS_PWR_ON(pipe
) : DP_SSS_PWR_GATE(pipe
);
6594 mutex_lock(&dev_priv
->rps
.hw_lock
);
6597 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
6602 ctrl
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
);
6603 ctrl
&= ~DP_SSC_MASK(pipe
);
6604 ctrl
|= enable
? DP_SSC_PWR_ON(pipe
) : DP_SSC_PWR_GATE(pipe
);
6605 vlv_punit_write(dev_priv
, PUNIT_REG_DSPFREQ
, ctrl
);
6607 if (wait_for(COND
, 100))
6608 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6610 vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
));
6615 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6618 static void chv_pipe_power_well_sync_hw(struct drm_i915_private
*dev_priv
,
6619 struct i915_power_well
*power_well
)
6621 chv_set_pipe_power_well(dev_priv
, power_well
, power_well
->count
> 0);
6624 static void chv_pipe_power_well_enable(struct drm_i915_private
*dev_priv
,
6625 struct i915_power_well
*power_well
)
6627 WARN_ON_ONCE(power_well
->data
!= PIPE_A
&&
6628 power_well
->data
!= PIPE_B
&&
6629 power_well
->data
!= PIPE_C
);
6631 chv_set_pipe_power_well(dev_priv
, power_well
, true);
6634 static void chv_pipe_power_well_disable(struct drm_i915_private
*dev_priv
,
6635 struct i915_power_well
*power_well
)
6637 WARN_ON_ONCE(power_well
->data
!= PIPE_A
&&
6638 power_well
->data
!= PIPE_B
&&
6639 power_well
->data
!= PIPE_C
);
6641 chv_set_pipe_power_well(dev_priv
, power_well
, false);
6644 static void check_power_well_state(struct drm_i915_private
*dev_priv
,
6645 struct i915_power_well
*power_well
)
6647 bool enabled
= power_well
->ops
->is_enabled(dev_priv
, power_well
);
6649 if (power_well
->always_on
|| !i915
.disable_power_well
) {
6656 if (enabled
!= (power_well
->count
> 0))
6662 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
6663 power_well
->name
, power_well
->always_on
, enabled
,
6664 power_well
->count
, i915
.disable_power_well
);
6667 void intel_display_power_get(struct drm_i915_private
*dev_priv
,
6668 enum intel_display_power_domain domain
)
6670 struct i915_power_domains
*power_domains
;
6671 struct i915_power_well
*power_well
;
6674 intel_runtime_pm_get(dev_priv
);
6676 power_domains
= &dev_priv
->power_domains
;
6678 mutex_lock(&power_domains
->lock
);
6680 for_each_power_well(i
, power_well
, BIT(domain
), power_domains
) {
6681 if (!power_well
->count
++) {
6682 DRM_DEBUG_KMS("enabling %s\n", power_well
->name
);
6683 power_well
->ops
->enable(dev_priv
, power_well
);
6684 power_well
->hw_enabled
= true;
6687 check_power_well_state(dev_priv
, power_well
);
6690 power_domains
->domain_use_count
[domain
]++;
6692 mutex_unlock(&power_domains
->lock
);
6695 void intel_display_power_put(struct drm_i915_private
*dev_priv
,
6696 enum intel_display_power_domain domain
)
6698 struct i915_power_domains
*power_domains
;
6699 struct i915_power_well
*power_well
;
6702 power_domains
= &dev_priv
->power_domains
;
6704 mutex_lock(&power_domains
->lock
);
6706 WARN_ON(!power_domains
->domain_use_count
[domain
]);
6707 power_domains
->domain_use_count
[domain
]--;
6709 for_each_power_well_rev(i
, power_well
, BIT(domain
), power_domains
) {
6710 WARN_ON(!power_well
->count
);
6712 if (!--power_well
->count
&& i915
.disable_power_well
) {
6713 DRM_DEBUG_KMS("disabling %s\n", power_well
->name
);
6714 power_well
->hw_enabled
= false;
6715 power_well
->ops
->disable(dev_priv
, power_well
);
6718 check_power_well_state(dev_priv
, power_well
);
6721 mutex_unlock(&power_domains
->lock
);
6723 intel_runtime_pm_put(dev_priv
);
6726 static struct i915_power_domains
*hsw_pwr
;
6728 /* Display audio driver power well request */
6729 int i915_request_power_well(void)
6731 struct drm_i915_private
*dev_priv
;
6736 dev_priv
= container_of(hsw_pwr
, struct drm_i915_private
,
6738 intel_display_power_get(dev_priv
, POWER_DOMAIN_AUDIO
);
6741 EXPORT_SYMBOL_GPL(i915_request_power_well
);
6743 /* Display audio driver power well release */
6744 int i915_release_power_well(void)
6746 struct drm_i915_private
*dev_priv
;
6751 dev_priv
= container_of(hsw_pwr
, struct drm_i915_private
,
6753 intel_display_power_put(dev_priv
, POWER_DOMAIN_AUDIO
);
6756 EXPORT_SYMBOL_GPL(i915_release_power_well
);
6759 * Private interface for the audio driver to get CDCLK in kHz.
6761 * Caller must request power well using i915_request_power_well() prior to
6764 int i915_get_cdclk_freq(void)
6766 struct drm_i915_private
*dev_priv
;
6771 dev_priv
= container_of(hsw_pwr
, struct drm_i915_private
,
6774 return intel_ddi_get_cdclk_freq(dev_priv
);
6776 EXPORT_SYMBOL_GPL(i915_get_cdclk_freq
);
6779 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
6781 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
6782 BIT(POWER_DOMAIN_PIPE_A) | \
6783 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
6784 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
6785 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
6786 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6787 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6788 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6789 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6790 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6791 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6792 BIT(POWER_DOMAIN_PORT_CRT) | \
6793 BIT(POWER_DOMAIN_PLLS) | \
6794 BIT(POWER_DOMAIN_INIT))
6795 #define HSW_DISPLAY_POWER_DOMAINS ( \
6796 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
6797 BIT(POWER_DOMAIN_INIT))
6799 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
6800 HSW_ALWAYS_ON_POWER_DOMAINS | \
6801 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
6802 #define BDW_DISPLAY_POWER_DOMAINS ( \
6803 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
6804 BIT(POWER_DOMAIN_INIT))
6806 #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
6807 #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
6809 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
6810 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6811 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6812 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6813 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6814 BIT(POWER_DOMAIN_PORT_CRT) | \
6815 BIT(POWER_DOMAIN_INIT))
6817 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
6818 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6819 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6820 BIT(POWER_DOMAIN_INIT))
6822 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
6823 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6824 BIT(POWER_DOMAIN_INIT))
6826 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
6827 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6828 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6829 BIT(POWER_DOMAIN_INIT))
6831 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
6832 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6833 BIT(POWER_DOMAIN_INIT))
6835 #define CHV_PIPE_A_POWER_DOMAINS ( \
6836 BIT(POWER_DOMAIN_PIPE_A) | \
6837 BIT(POWER_DOMAIN_INIT))
6839 #define CHV_PIPE_B_POWER_DOMAINS ( \
6840 BIT(POWER_DOMAIN_PIPE_B) | \
6841 BIT(POWER_DOMAIN_INIT))
6843 #define CHV_PIPE_C_POWER_DOMAINS ( \
6844 BIT(POWER_DOMAIN_PIPE_C) | \
6845 BIT(POWER_DOMAIN_INIT))
6847 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
6848 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6849 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6850 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6851 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6852 BIT(POWER_DOMAIN_INIT))
6854 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
6855 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6856 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6857 BIT(POWER_DOMAIN_INIT))
6859 #define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
6860 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6861 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6862 BIT(POWER_DOMAIN_INIT))
6864 #define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
6865 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6866 BIT(POWER_DOMAIN_INIT))
6868 static const struct i915_power_well_ops i9xx_always_on_power_well_ops
= {
6869 .sync_hw
= i9xx_always_on_power_well_noop
,
6870 .enable
= i9xx_always_on_power_well_noop
,
6871 .disable
= i9xx_always_on_power_well_noop
,
6872 .is_enabled
= i9xx_always_on_power_well_enabled
,
6875 static const struct i915_power_well_ops chv_pipe_power_well_ops
= {
6876 .sync_hw
= chv_pipe_power_well_sync_hw
,
6877 .enable
= chv_pipe_power_well_enable
,
6878 .disable
= chv_pipe_power_well_disable
,
6879 .is_enabled
= chv_pipe_power_well_enabled
,
6882 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops
= {
6883 .sync_hw
= vlv_power_well_sync_hw
,
6884 .enable
= chv_dpio_cmn_power_well_enable
,
6885 .disable
= chv_dpio_cmn_power_well_disable
,
6886 .is_enabled
= vlv_power_well_enabled
,
6889 static struct i915_power_well i9xx_always_on_power_well
[] = {
6891 .name
= "always-on",
6893 .domains
= POWER_DOMAIN_MASK
,
6894 .ops
= &i9xx_always_on_power_well_ops
,
6898 static const struct i915_power_well_ops hsw_power_well_ops
= {
6899 .sync_hw
= hsw_power_well_sync_hw
,
6900 .enable
= hsw_power_well_enable
,
6901 .disable
= hsw_power_well_disable
,
6902 .is_enabled
= hsw_power_well_enabled
,
6905 static struct i915_power_well hsw_power_wells
[] = {
6907 .name
= "always-on",
6909 .domains
= HSW_ALWAYS_ON_POWER_DOMAINS
,
6910 .ops
= &i9xx_always_on_power_well_ops
,
6914 .domains
= HSW_DISPLAY_POWER_DOMAINS
,
6915 .ops
= &hsw_power_well_ops
,
6919 static struct i915_power_well bdw_power_wells
[] = {
6921 .name
= "always-on",
6923 .domains
= BDW_ALWAYS_ON_POWER_DOMAINS
,
6924 .ops
= &i9xx_always_on_power_well_ops
,
6928 .domains
= BDW_DISPLAY_POWER_DOMAINS
,
6929 .ops
= &hsw_power_well_ops
,
6933 static const struct i915_power_well_ops vlv_display_power_well_ops
= {
6934 .sync_hw
= vlv_power_well_sync_hw
,
6935 .enable
= vlv_display_power_well_enable
,
6936 .disable
= vlv_display_power_well_disable
,
6937 .is_enabled
= vlv_power_well_enabled
,
6940 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops
= {
6941 .sync_hw
= vlv_power_well_sync_hw
,
6942 .enable
= vlv_dpio_cmn_power_well_enable
,
6943 .disable
= vlv_dpio_cmn_power_well_disable
,
6944 .is_enabled
= vlv_power_well_enabled
,
6947 static const struct i915_power_well_ops vlv_dpio_power_well_ops
= {
6948 .sync_hw
= vlv_power_well_sync_hw
,
6949 .enable
= vlv_power_well_enable
,
6950 .disable
= vlv_power_well_disable
,
6951 .is_enabled
= vlv_power_well_enabled
,
6954 static struct i915_power_well vlv_power_wells
[] = {
6956 .name
= "always-on",
6958 .domains
= VLV_ALWAYS_ON_POWER_DOMAINS
,
6959 .ops
= &i9xx_always_on_power_well_ops
,
6963 .domains
= VLV_DISPLAY_POWER_DOMAINS
,
6964 .data
= PUNIT_POWER_WELL_DISP2D
,
6965 .ops
= &vlv_display_power_well_ops
,
6968 .name
= "dpio-tx-b-01",
6969 .domains
= VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS
|
6970 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS
|
6971 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS
|
6972 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS
,
6973 .ops
= &vlv_dpio_power_well_ops
,
6974 .data
= PUNIT_POWER_WELL_DPIO_TX_B_LANES_01
,
6977 .name
= "dpio-tx-b-23",
6978 .domains
= VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS
|
6979 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS
|
6980 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS
|
6981 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS
,
6982 .ops
= &vlv_dpio_power_well_ops
,
6983 .data
= PUNIT_POWER_WELL_DPIO_TX_B_LANES_23
,
6986 .name
= "dpio-tx-c-01",
6987 .domains
= VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS
|
6988 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS
|
6989 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS
|
6990 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS
,
6991 .ops
= &vlv_dpio_power_well_ops
,
6992 .data
= PUNIT_POWER_WELL_DPIO_TX_C_LANES_01
,
6995 .name
= "dpio-tx-c-23",
6996 .domains
= VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS
|
6997 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS
|
6998 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS
|
6999 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS
,
7000 .ops
= &vlv_dpio_power_well_ops
,
7001 .data
= PUNIT_POWER_WELL_DPIO_TX_C_LANES_23
,
7004 .name
= "dpio-common",
7005 .domains
= VLV_DPIO_CMN_BC_POWER_DOMAINS
,
7006 .data
= PUNIT_POWER_WELL_DPIO_CMN_BC
,
7007 .ops
= &vlv_dpio_cmn_power_well_ops
,
7011 static struct i915_power_well chv_power_wells
[] = {
7013 .name
= "always-on",
7015 .domains
= VLV_ALWAYS_ON_POWER_DOMAINS
,
7016 .ops
= &i9xx_always_on_power_well_ops
,
7021 .domains
= VLV_DISPLAY_POWER_DOMAINS
,
7022 .data
= PUNIT_POWER_WELL_DISP2D
,
7023 .ops
= &vlv_display_power_well_ops
,
7027 .domains
= CHV_PIPE_A_POWER_DOMAINS
,
7029 .ops
= &chv_pipe_power_well_ops
,
7033 .domains
= CHV_PIPE_B_POWER_DOMAINS
,
7035 .ops
= &chv_pipe_power_well_ops
,
7039 .domains
= CHV_PIPE_C_POWER_DOMAINS
,
7041 .ops
= &chv_pipe_power_well_ops
,
7045 .name
= "dpio-common-bc",
7047 * XXX: cmnreset for one PHY seems to disturb the other.
7048 * As a workaround keep both powered on at the same
7051 .domains
= CHV_DPIO_CMN_BC_POWER_DOMAINS
| CHV_DPIO_CMN_D_POWER_DOMAINS
,
7052 .data
= PUNIT_POWER_WELL_DPIO_CMN_BC
,
7053 .ops
= &chv_dpio_cmn_power_well_ops
,
7056 .name
= "dpio-common-d",
7058 * XXX: cmnreset for one PHY seems to disturb the other.
7059 * As a workaround keep both powered on at the same
7062 .domains
= CHV_DPIO_CMN_BC_POWER_DOMAINS
| CHV_DPIO_CMN_D_POWER_DOMAINS
,
7063 .data
= PUNIT_POWER_WELL_DPIO_CMN_D
,
7064 .ops
= &chv_dpio_cmn_power_well_ops
,
7068 .name
= "dpio-tx-b-01",
7069 .domains
= VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS
|
7070 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS
,
7071 .ops
= &vlv_dpio_power_well_ops
,
7072 .data
= PUNIT_POWER_WELL_DPIO_TX_B_LANES_01
,
7075 .name
= "dpio-tx-b-23",
7076 .domains
= VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS
|
7077 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS
,
7078 .ops
= &vlv_dpio_power_well_ops
,
7079 .data
= PUNIT_POWER_WELL_DPIO_TX_B_LANES_23
,
7082 .name
= "dpio-tx-c-01",
7083 .domains
= VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS
|
7084 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS
,
7085 .ops
= &vlv_dpio_power_well_ops
,
7086 .data
= PUNIT_POWER_WELL_DPIO_TX_C_LANES_01
,
7089 .name
= "dpio-tx-c-23",
7090 .domains
= VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS
|
7091 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS
,
7092 .ops
= &vlv_dpio_power_well_ops
,
7093 .data
= PUNIT_POWER_WELL_DPIO_TX_C_LANES_23
,
7096 .name
= "dpio-tx-d-01",
7097 .domains
= CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS
|
7098 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS
,
7099 .ops
= &vlv_dpio_power_well_ops
,
7100 .data
= PUNIT_POWER_WELL_DPIO_TX_D_LANES_01
,
7103 .name
= "dpio-tx-d-23",
7104 .domains
= CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS
|
7105 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS
,
7106 .ops
= &vlv_dpio_power_well_ops
,
7107 .data
= PUNIT_POWER_WELL_DPIO_TX_D_LANES_23
,
7112 static struct i915_power_well
*lookup_power_well(struct drm_i915_private
*dev_priv
,
7113 enum punit_power_well power_well_id
)
7115 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
7116 struct i915_power_well
*power_well
;
7119 for_each_power_well(i
, power_well
, POWER_DOMAIN_MASK
, power_domains
) {
7120 if (power_well
->data
== power_well_id
)
7127 #define set_power_wells(power_domains, __power_wells) ({ \
7128 (power_domains)->power_wells = (__power_wells); \
7129 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
7132 int intel_power_domains_init(struct drm_i915_private
*dev_priv
)
7134 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
7136 mutex_init(&power_domains
->lock
);
7139 * The enabling order will be from lower to higher indexed wells,
7140 * the disabling order is reversed.
7142 if (IS_HASWELL(dev_priv
->dev
)) {
7143 set_power_wells(power_domains
, hsw_power_wells
);
7144 hsw_pwr
= power_domains
;
7145 } else if (IS_BROADWELL(dev_priv
->dev
)) {
7146 set_power_wells(power_domains
, bdw_power_wells
);
7147 hsw_pwr
= power_domains
;
7148 } else if (IS_CHERRYVIEW(dev_priv
->dev
)) {
7149 set_power_wells(power_domains
, chv_power_wells
);
7150 } else if (IS_VALLEYVIEW(dev_priv
->dev
)) {
7151 set_power_wells(power_domains
, vlv_power_wells
);
7153 set_power_wells(power_domains
, i9xx_always_on_power_well
);
7159 void intel_power_domains_remove(struct drm_i915_private
*dev_priv
)
7164 static void intel_power_domains_resume(struct drm_i915_private
*dev_priv
)
7166 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
7167 struct i915_power_well
*power_well
;
7170 mutex_lock(&power_domains
->lock
);
7171 for_each_power_well(i
, power_well
, POWER_DOMAIN_MASK
, power_domains
) {
7172 power_well
->ops
->sync_hw(dev_priv
, power_well
);
7173 power_well
->hw_enabled
= power_well
->ops
->is_enabled(dev_priv
,
7176 mutex_unlock(&power_domains
->lock
);
7179 static void vlv_cmnlane_wa(struct drm_i915_private
*dev_priv
)
7181 struct i915_power_well
*cmn
=
7182 lookup_power_well(dev_priv
, PUNIT_POWER_WELL_DPIO_CMN_BC
);
7183 struct i915_power_well
*disp2d
=
7184 lookup_power_well(dev_priv
, PUNIT_POWER_WELL_DISP2D
);
7186 /* nothing to do if common lane is already off */
7187 if (!cmn
->ops
->is_enabled(dev_priv
, cmn
))
7190 /* If the display might be already active skip this */
7191 if (disp2d
->ops
->is_enabled(dev_priv
, disp2d
) &&
7192 I915_READ(DPIO_CTL
) & DPIO_CMNRST
)
7195 DRM_DEBUG_KMS("toggling display PHY side reset\n");
7197 /* cmnlane needs DPLL registers */
7198 disp2d
->ops
->enable(dev_priv
, disp2d
);
7201 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
7202 * Need to assert and de-assert PHY SB reset by gating the
7203 * common lane power, then un-gating it.
7204 * Simply ungating isn't enough to reset the PHY enough to get
7205 * ports and lanes running.
7207 cmn
->ops
->disable(dev_priv
, cmn
);
7210 void intel_power_domains_init_hw(struct drm_i915_private
*dev_priv
)
7212 struct drm_device
*dev
= dev_priv
->dev
;
7213 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
7215 power_domains
->initializing
= true;
7217 if (IS_VALLEYVIEW(dev
) && !IS_CHERRYVIEW(dev
)) {
7218 mutex_lock(&power_domains
->lock
);
7219 vlv_cmnlane_wa(dev_priv
);
7220 mutex_unlock(&power_domains
->lock
);
7223 /* For now, we need the power well to be always enabled. */
7224 intel_display_set_init_power(dev_priv
, true);
7225 intel_power_domains_resume(dev_priv
);
7226 power_domains
->initializing
= false;
7229 void intel_aux_display_runtime_get(struct drm_i915_private
*dev_priv
)
7231 intel_runtime_pm_get(dev_priv
);
7234 void intel_aux_display_runtime_put(struct drm_i915_private
*dev_priv
)
7236 intel_runtime_pm_put(dev_priv
);
7239 void intel_runtime_pm_get(struct drm_i915_private
*dev_priv
)
7241 struct drm_device
*dev
= dev_priv
->dev
;
7242 struct device
*device
= &dev
->pdev
->dev
;
7244 if (!HAS_RUNTIME_PM(dev
))
7247 pm_runtime_get_sync(device
);
7248 WARN(dev_priv
->pm
.suspended
, "Device still suspended.\n");
7251 void intel_runtime_pm_get_noresume(struct drm_i915_private
*dev_priv
)
7253 struct drm_device
*dev
= dev_priv
->dev
;
7254 struct device
*device
= &dev
->pdev
->dev
;
7256 if (!HAS_RUNTIME_PM(dev
))
7259 WARN(dev_priv
->pm
.suspended
, "Getting nosync-ref while suspended.\n");
7260 pm_runtime_get_noresume(device
);
7263 void intel_runtime_pm_put(struct drm_i915_private
*dev_priv
)
7265 struct drm_device
*dev
= dev_priv
->dev
;
7266 struct device
*device
= &dev
->pdev
->dev
;
7268 if (!HAS_RUNTIME_PM(dev
))
7271 pm_runtime_mark_last_busy(device
);
7272 pm_runtime_put_autosuspend(device
);
7275 void intel_init_runtime_pm(struct drm_i915_private
*dev_priv
)
7277 struct drm_device
*dev
= dev_priv
->dev
;
7278 struct device
*device
= &dev
->pdev
->dev
;
7280 if (!HAS_RUNTIME_PM(dev
))
7283 pm_runtime_set_active(device
);
7286 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
7289 if (!intel_enable_rc6(dev
)) {
7290 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
7294 pm_runtime_set_autosuspend_delay(device
, 10000); /* 10s */
7295 pm_runtime_mark_last_busy(device
);
7296 pm_runtime_use_autosuspend(device
);
7298 pm_runtime_put_autosuspend(device
);
7301 void intel_fini_runtime_pm(struct drm_i915_private
*dev_priv
)
7303 struct drm_device
*dev
= dev_priv
->dev
;
7304 struct device
*device
= &dev
->pdev
->dev
;
7306 if (!HAS_RUNTIME_PM(dev
))
7309 if (!intel_enable_rc6(dev
))
7312 /* Make sure we're not suspended first. */
7313 pm_runtime_get_sync(device
);
7314 pm_runtime_disable(device
);
7317 /* Set up chip specific power management-related functions */
7318 void intel_init_pm(struct drm_device
*dev
)
7320 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7323 if (INTEL_INFO(dev
)->gen
>= 7) {
7324 dev_priv
->display
.fbc_enabled
= ironlake_fbc_enabled
;
7325 dev_priv
->display
.enable_fbc
= gen7_enable_fbc
;
7326 dev_priv
->display
.disable_fbc
= ironlake_disable_fbc
;
7327 } else if (INTEL_INFO(dev
)->gen
>= 5) {
7328 dev_priv
->display
.fbc_enabled
= ironlake_fbc_enabled
;
7329 dev_priv
->display
.enable_fbc
= ironlake_enable_fbc
;
7330 dev_priv
->display
.disable_fbc
= ironlake_disable_fbc
;
7331 } else if (IS_GM45(dev
)) {
7332 dev_priv
->display
.fbc_enabled
= g4x_fbc_enabled
;
7333 dev_priv
->display
.enable_fbc
= g4x_enable_fbc
;
7334 dev_priv
->display
.disable_fbc
= g4x_disable_fbc
;
7336 dev_priv
->display
.fbc_enabled
= i8xx_fbc_enabled
;
7337 dev_priv
->display
.enable_fbc
= i8xx_enable_fbc
;
7338 dev_priv
->display
.disable_fbc
= i8xx_disable_fbc
;
7340 /* This value was pulled out of someone's hat */
7341 I915_WRITE(FBC_CONTROL
, 500 << FBC_CTL_INTERVAL_SHIFT
);
7346 if (IS_PINEVIEW(dev
))
7347 i915_pineview_get_mem_freq(dev
);
7348 else if (IS_GEN5(dev
))
7349 i915_ironlake_get_mem_freq(dev
);
7351 /* For FIFO watermark updates */
7352 if (HAS_PCH_SPLIT(dev
)) {
7353 ilk_setup_wm_latency(dev
);
7355 if ((IS_GEN5(dev
) && dev_priv
->wm
.pri_latency
[1] &&
7356 dev_priv
->wm
.spr_latency
[1] && dev_priv
->wm
.cur_latency
[1]) ||
7357 (!IS_GEN5(dev
) && dev_priv
->wm
.pri_latency
[0] &&
7358 dev_priv
->wm
.spr_latency
[0] && dev_priv
->wm
.cur_latency
[0])) {
7359 dev_priv
->display
.update_wm
= ilk_update_wm
;
7360 dev_priv
->display
.update_sprite_wm
= ilk_update_sprite_wm
;
7362 DRM_DEBUG_KMS("Failed to read display plane latency. "
7367 dev_priv
->display
.init_clock_gating
= ironlake_init_clock_gating
;
7368 else if (IS_GEN6(dev
))
7369 dev_priv
->display
.init_clock_gating
= gen6_init_clock_gating
;
7370 else if (IS_IVYBRIDGE(dev
))
7371 dev_priv
->display
.init_clock_gating
= ivybridge_init_clock_gating
;
7372 else if (IS_HASWELL(dev
))
7373 dev_priv
->display
.init_clock_gating
= haswell_init_clock_gating
;
7374 else if (INTEL_INFO(dev
)->gen
== 8)
7375 dev_priv
->display
.init_clock_gating
= broadwell_init_clock_gating
;
7376 } else if (IS_CHERRYVIEW(dev
)) {
7377 dev_priv
->display
.update_wm
= cherryview_update_wm
;
7378 dev_priv
->display
.update_sprite_wm
= valleyview_update_sprite_wm
;
7379 dev_priv
->display
.init_clock_gating
=
7380 cherryview_init_clock_gating
;
7381 } else if (IS_VALLEYVIEW(dev
)) {
7382 dev_priv
->display
.update_wm
= valleyview_update_wm
;
7383 dev_priv
->display
.update_sprite_wm
= valleyview_update_sprite_wm
;
7384 dev_priv
->display
.init_clock_gating
=
7385 valleyview_init_clock_gating
;
7386 } else if (IS_PINEVIEW(dev
)) {
7387 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev
),
7390 dev_priv
->mem_freq
)) {
7391 DRM_INFO("failed to find known CxSR latency "
7392 "(found ddr%s fsb freq %d, mem freq %d), "
7394 (dev_priv
->is_ddr3
== 1) ? "3" : "2",
7395 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
7396 /* Disable CxSR and never update its watermark again */
7397 intel_set_memory_cxsr(dev_priv
, false);
7398 dev_priv
->display
.update_wm
= NULL
;
7400 dev_priv
->display
.update_wm
= pineview_update_wm
;
7401 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
7402 } else if (IS_G4X(dev
)) {
7403 dev_priv
->display
.update_wm
= g4x_update_wm
;
7404 dev_priv
->display
.init_clock_gating
= g4x_init_clock_gating
;
7405 } else if (IS_GEN4(dev
)) {
7406 dev_priv
->display
.update_wm
= i965_update_wm
;
7407 if (IS_CRESTLINE(dev
))
7408 dev_priv
->display
.init_clock_gating
= crestline_init_clock_gating
;
7409 else if (IS_BROADWATER(dev
))
7410 dev_priv
->display
.init_clock_gating
= broadwater_init_clock_gating
;
7411 } else if (IS_GEN3(dev
)) {
7412 dev_priv
->display
.update_wm
= i9xx_update_wm
;
7413 dev_priv
->display
.get_fifo_size
= i9xx_get_fifo_size
;
7414 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
7415 } else if (IS_GEN2(dev
)) {
7416 if (INTEL_INFO(dev
)->num_pipes
== 1) {
7417 dev_priv
->display
.update_wm
= i845_update_wm
;
7418 dev_priv
->display
.get_fifo_size
= i845_get_fifo_size
;
7420 dev_priv
->display
.update_wm
= i9xx_update_wm
;
7421 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
7424 if (IS_I85X(dev
) || IS_I865G(dev
))
7425 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
7427 dev_priv
->display
.init_clock_gating
= i830_init_clock_gating
;
7429 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7433 int sandybridge_pcode_read(struct drm_i915_private
*dev_priv
, u8 mbox
, u32
*val
)
7435 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
7437 if (I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
7438 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7442 I915_WRITE(GEN6_PCODE_DATA
, *val
);
7443 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
7445 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
7447 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox
);
7451 *val
= I915_READ(GEN6_PCODE_DATA
);
7452 I915_WRITE(GEN6_PCODE_DATA
, 0);
7457 int sandybridge_pcode_write(struct drm_i915_private
*dev_priv
, u8 mbox
, u32 val
)
7459 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
7461 if (I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
7462 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7466 I915_WRITE(GEN6_PCODE_DATA
, val
);
7467 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
7469 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
7471 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox
);
7475 I915_WRITE(GEN6_PCODE_DATA
, 0);
7480 static int byt_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
7485 switch (dev_priv
->mem_freq
) {
7499 return DIV_ROUND_CLOSEST(dev_priv
->mem_freq
* (val
+ 6 - 0xbd), 4 * div
);
7502 static int byt_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
7507 switch (dev_priv
->mem_freq
) {
7521 return DIV_ROUND_CLOSEST(4 * mul
* val
, dev_priv
->mem_freq
) + 0xbd - 6;
7524 static int chv_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
7528 switch (dev_priv
->rps
.cz_freq
) {
7544 freq
= (DIV_ROUND_CLOSEST((dev_priv
->rps
.cz_freq
* val
), 2 * div
) / 2);
7549 static int chv_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
7553 switch (dev_priv
->rps
.cz_freq
) {
7569 opcode
= (DIV_ROUND_CLOSEST((val
* 2 * mul
), dev_priv
->rps
.cz_freq
) * 2);
7574 int vlv_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
7578 if (IS_CHERRYVIEW(dev_priv
->dev
))
7579 ret
= chv_gpu_freq(dev_priv
, val
);
7580 else if (IS_VALLEYVIEW(dev_priv
->dev
))
7581 ret
= byt_gpu_freq(dev_priv
, val
);
7586 int vlv_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
7590 if (IS_CHERRYVIEW(dev_priv
->dev
))
7591 ret
= chv_freq_opcode(dev_priv
, val
);
7592 else if (IS_VALLEYVIEW(dev_priv
->dev
))
7593 ret
= byt_freq_opcode(dev_priv
, val
);
7598 void intel_pm_setup(struct drm_device
*dev
)
7600 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7602 mutex_init(&dev_priv
->rps
.hw_lock
);
7604 INIT_DELAYED_WORK(&dev_priv
->rps
.delayed_resume_work
,
7605 intel_gen6_powersave_work
);
7607 dev_priv
->pm
.suspended
= false;
7608 dev_priv
->pm
._irqs_disabled
= false;