2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
35 * RC6 is a special power stage which allows the GPU to enter an very
36 * low-voltage mode when idle, using down to 0V while at this stage. This
37 * stage is entered automatically when the GPU is idle when RC6 support is
38 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
40 * There are different RC6 modes available in Intel GPU, which differentiate
41 * among each other with the latency required to enter and leave RC6 and
42 * voltage consumed by the GPU in different states.
44 * The combination of the following flags define which states GPU is allowed
45 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
46 * RC6pp is deepest RC6. Their support by hardware varies according to the
47 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
48 * which brings the most power savings; deeper states save more power, but
49 * require higher latency to switch to and wake up.
51 #define INTEL_RC6_ENABLE (1<<0)
52 #define INTEL_RC6p_ENABLE (1<<1)
53 #define INTEL_RC6pp_ENABLE (1<<2)
55 static void gen9_init_clock_gating(struct drm_device
*dev
)
57 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
60 * WaDisableSDEUnitClockGating:skl
61 * This seems to be a pre-production w/a.
63 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
64 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
67 * WaDisableDgMirrorFixInHalfSliceChicken5:skl
68 * This is a pre-production w/a.
70 I915_WRITE(GEN9_HALF_SLICE_CHICKEN5
,
71 I915_READ(GEN9_HALF_SLICE_CHICKEN5
) &
72 ~GEN9_DG_MIRROR_FIX_ENABLE
);
74 /* Wa4x4STCOptimizationDisable:skl */
75 I915_WRITE(CACHE_MODE_1
,
76 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE
));
80 static void i915_pineview_get_mem_freq(struct drm_device
*dev
)
82 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
85 tmp
= I915_READ(CLKCFG
);
87 switch (tmp
& CLKCFG_FSB_MASK
) {
89 dev_priv
->fsb_freq
= 533; /* 133*4 */
92 dev_priv
->fsb_freq
= 800; /* 200*4 */
95 dev_priv
->fsb_freq
= 667; /* 167*4 */
98 dev_priv
->fsb_freq
= 400; /* 100*4 */
102 switch (tmp
& CLKCFG_MEM_MASK
) {
104 dev_priv
->mem_freq
= 533;
107 dev_priv
->mem_freq
= 667;
110 dev_priv
->mem_freq
= 800;
114 /* detect pineview DDR3 setting */
115 tmp
= I915_READ(CSHRDDR3CTL
);
116 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
119 static void i915_ironlake_get_mem_freq(struct drm_device
*dev
)
121 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
124 ddrpll
= I915_READ16(DDRMPLL1
);
125 csipll
= I915_READ16(CSIPLL0
);
127 switch (ddrpll
& 0xff) {
129 dev_priv
->mem_freq
= 800;
132 dev_priv
->mem_freq
= 1066;
135 dev_priv
->mem_freq
= 1333;
138 dev_priv
->mem_freq
= 1600;
141 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
143 dev_priv
->mem_freq
= 0;
147 dev_priv
->ips
.r_t
= dev_priv
->mem_freq
;
149 switch (csipll
& 0x3ff) {
151 dev_priv
->fsb_freq
= 3200;
154 dev_priv
->fsb_freq
= 3733;
157 dev_priv
->fsb_freq
= 4266;
160 dev_priv
->fsb_freq
= 4800;
163 dev_priv
->fsb_freq
= 5333;
166 dev_priv
->fsb_freq
= 5866;
169 dev_priv
->fsb_freq
= 6400;
172 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
174 dev_priv
->fsb_freq
= 0;
178 if (dev_priv
->fsb_freq
== 3200) {
179 dev_priv
->ips
.c_m
= 0;
180 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
181 dev_priv
->ips
.c_m
= 1;
183 dev_priv
->ips
.c_m
= 2;
187 static const struct cxsr_latency cxsr_latency_table
[] = {
188 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
189 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
190 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
191 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
192 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
194 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
195 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
196 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
197 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
198 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
200 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
201 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
202 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
203 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
204 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
206 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
207 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
208 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
209 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
210 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
212 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
213 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
214 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
215 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
216 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
218 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
219 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
220 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
221 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
222 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
225 static const struct cxsr_latency
*intel_get_cxsr_latency(int is_desktop
,
230 const struct cxsr_latency
*latency
;
233 if (fsb
== 0 || mem
== 0)
236 for (i
= 0; i
< ARRAY_SIZE(cxsr_latency_table
); i
++) {
237 latency
= &cxsr_latency_table
[i
];
238 if (is_desktop
== latency
->is_desktop
&&
239 is_ddr3
== latency
->is_ddr3
&&
240 fsb
== latency
->fsb_freq
&& mem
== latency
->mem_freq
)
244 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
249 void intel_set_memory_cxsr(struct drm_i915_private
*dev_priv
, bool enable
)
251 struct drm_device
*dev
= dev_priv
->dev
;
254 if (IS_VALLEYVIEW(dev
)) {
255 I915_WRITE(FW_BLC_SELF_VLV
, enable
? FW_CSPWRDWNEN
: 0);
256 } else if (IS_G4X(dev
) || IS_CRESTLINE(dev
)) {
257 I915_WRITE(FW_BLC_SELF
, enable
? FW_BLC_SELF_EN
: 0);
258 } else if (IS_PINEVIEW(dev
)) {
259 val
= I915_READ(DSPFW3
) & ~PINEVIEW_SELF_REFRESH_EN
;
260 val
|= enable
? PINEVIEW_SELF_REFRESH_EN
: 0;
261 I915_WRITE(DSPFW3
, val
);
262 } else if (IS_I945G(dev
) || IS_I945GM(dev
)) {
263 val
= enable
? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN
) :
264 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN
);
265 I915_WRITE(FW_BLC_SELF
, val
);
266 } else if (IS_I915GM(dev
)) {
267 val
= enable
? _MASKED_BIT_ENABLE(INSTPM_SELF_EN
) :
268 _MASKED_BIT_DISABLE(INSTPM_SELF_EN
);
269 I915_WRITE(INSTPM
, val
);
274 DRM_DEBUG_KMS("memory self-refresh is %s\n",
275 enable
? "enabled" : "disabled");
279 * Latency for FIFO fetches is dependent on several factors:
280 * - memory configuration (speed, channels)
282 * - current MCH state
283 * It can be fairly high in some situations, so here we assume a fairly
284 * pessimal value. It's a tradeoff between extra memory fetches (if we
285 * set this value too high, the FIFO will fetch frequently to stay full)
286 * and power consumption (set it too low to save power and we might see
287 * FIFO underruns and display "flicker").
289 * A value of 5us seems to be a good balance; safe for very low end
290 * platforms but not overly aggressive on lower latency configs.
292 static const int pessimal_latency_ns
= 5000;
294 static int i9xx_get_fifo_size(struct drm_device
*dev
, int plane
)
296 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
297 uint32_t dsparb
= I915_READ(DSPARB
);
300 size
= dsparb
& 0x7f;
302 size
= ((dsparb
>> DSPARB_CSTART_SHIFT
) & 0x7f) - size
;
304 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
305 plane
? "B" : "A", size
);
310 static int i830_get_fifo_size(struct drm_device
*dev
, int plane
)
312 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
313 uint32_t dsparb
= I915_READ(DSPARB
);
316 size
= dsparb
& 0x1ff;
318 size
= ((dsparb
>> DSPARB_BEND_SHIFT
) & 0x1ff) - size
;
319 size
>>= 1; /* Convert to cachelines */
321 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
322 plane
? "B" : "A", size
);
327 static int i845_get_fifo_size(struct drm_device
*dev
, int plane
)
329 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
330 uint32_t dsparb
= I915_READ(DSPARB
);
333 size
= dsparb
& 0x7f;
334 size
>>= 2; /* Convert to cachelines */
336 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
343 /* Pineview has different values for various configs */
344 static const struct intel_watermark_params pineview_display_wm
= {
345 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
346 .max_wm
= PINEVIEW_MAX_WM
,
347 .default_wm
= PINEVIEW_DFT_WM
,
348 .guard_size
= PINEVIEW_GUARD_WM
,
349 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
351 static const struct intel_watermark_params pineview_display_hplloff_wm
= {
352 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
353 .max_wm
= PINEVIEW_MAX_WM
,
354 .default_wm
= PINEVIEW_DFT_HPLLOFF_WM
,
355 .guard_size
= PINEVIEW_GUARD_WM
,
356 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
358 static const struct intel_watermark_params pineview_cursor_wm
= {
359 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
360 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
361 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
362 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
363 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
365 static const struct intel_watermark_params pineview_cursor_hplloff_wm
= {
366 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
367 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
368 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
369 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
370 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
372 static const struct intel_watermark_params g4x_wm_info
= {
373 .fifo_size
= G4X_FIFO_SIZE
,
374 .max_wm
= G4X_MAX_WM
,
375 .default_wm
= G4X_MAX_WM
,
377 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
379 static const struct intel_watermark_params g4x_cursor_wm_info
= {
380 .fifo_size
= I965_CURSOR_FIFO
,
381 .max_wm
= I965_CURSOR_MAX_WM
,
382 .default_wm
= I965_CURSOR_DFT_WM
,
384 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
386 static const struct intel_watermark_params valleyview_wm_info
= {
387 .fifo_size
= VALLEYVIEW_FIFO_SIZE
,
388 .max_wm
= VALLEYVIEW_MAX_WM
,
389 .default_wm
= VALLEYVIEW_MAX_WM
,
391 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
393 static const struct intel_watermark_params valleyview_cursor_wm_info
= {
394 .fifo_size
= I965_CURSOR_FIFO
,
395 .max_wm
= VALLEYVIEW_CURSOR_MAX_WM
,
396 .default_wm
= I965_CURSOR_DFT_WM
,
398 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
400 static const struct intel_watermark_params i965_cursor_wm_info
= {
401 .fifo_size
= I965_CURSOR_FIFO
,
402 .max_wm
= I965_CURSOR_MAX_WM
,
403 .default_wm
= I965_CURSOR_DFT_WM
,
405 .cacheline_size
= I915_FIFO_LINE_SIZE
,
407 static const struct intel_watermark_params i945_wm_info
= {
408 .fifo_size
= I945_FIFO_SIZE
,
409 .max_wm
= I915_MAX_WM
,
412 .cacheline_size
= I915_FIFO_LINE_SIZE
,
414 static const struct intel_watermark_params i915_wm_info
= {
415 .fifo_size
= I915_FIFO_SIZE
,
416 .max_wm
= I915_MAX_WM
,
419 .cacheline_size
= I915_FIFO_LINE_SIZE
,
421 static const struct intel_watermark_params i830_a_wm_info
= {
422 .fifo_size
= I855GM_FIFO_SIZE
,
423 .max_wm
= I915_MAX_WM
,
426 .cacheline_size
= I830_FIFO_LINE_SIZE
,
428 static const struct intel_watermark_params i830_bc_wm_info
= {
429 .fifo_size
= I855GM_FIFO_SIZE
,
430 .max_wm
= I915_MAX_WM
/2,
433 .cacheline_size
= I830_FIFO_LINE_SIZE
,
435 static const struct intel_watermark_params i845_wm_info
= {
436 .fifo_size
= I830_FIFO_SIZE
,
437 .max_wm
= I915_MAX_WM
,
440 .cacheline_size
= I830_FIFO_LINE_SIZE
,
444 * intel_calculate_wm - calculate watermark level
445 * @clock_in_khz: pixel clock
446 * @wm: chip FIFO params
447 * @pixel_size: display pixel size
448 * @latency_ns: memory latency for the platform
450 * Calculate the watermark level (the level at which the display plane will
451 * start fetching from memory again). Each chip has a different display
452 * FIFO size and allocation, so the caller needs to figure that out and pass
453 * in the correct intel_watermark_params structure.
455 * As the pixel clock runs, the FIFO will be drained at a rate that depends
456 * on the pixel size. When it reaches the watermark level, it'll start
457 * fetching FIFO line sized based chunks from memory until the FIFO fills
458 * past the watermark point. If the FIFO drains completely, a FIFO underrun
459 * will occur, and a display engine hang could result.
461 static unsigned long intel_calculate_wm(unsigned long clock_in_khz
,
462 const struct intel_watermark_params
*wm
,
465 unsigned long latency_ns
)
467 long entries_required
, wm_size
;
470 * Note: we need to make sure we don't overflow for various clock &
472 * clocks go from a few thousand to several hundred thousand.
473 * latency is usually a few thousand
475 entries_required
= ((clock_in_khz
/ 1000) * pixel_size
* latency_ns
) /
477 entries_required
= DIV_ROUND_UP(entries_required
, wm
->cacheline_size
);
479 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required
);
481 wm_size
= fifo_size
- (entries_required
+ wm
->guard_size
);
483 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size
);
485 /* Don't promote wm_size to unsigned... */
486 if (wm_size
> (long)wm
->max_wm
)
487 wm_size
= wm
->max_wm
;
489 wm_size
= wm
->default_wm
;
492 * Bspec seems to indicate that the value shouldn't be lower than
493 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
494 * Lets go for 8 which is the burst size since certain platforms
495 * already use a hardcoded 8 (which is what the spec says should be
504 static struct drm_crtc
*single_enabled_crtc(struct drm_device
*dev
)
506 struct drm_crtc
*crtc
, *enabled
= NULL
;
508 for_each_crtc(dev
, crtc
) {
509 if (intel_crtc_active(crtc
)) {
519 static void pineview_update_wm(struct drm_crtc
*unused_crtc
)
521 struct drm_device
*dev
= unused_crtc
->dev
;
522 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
523 struct drm_crtc
*crtc
;
524 const struct cxsr_latency
*latency
;
528 latency
= intel_get_cxsr_latency(IS_PINEVIEW_G(dev
), dev_priv
->is_ddr3
,
529 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
531 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
532 intel_set_memory_cxsr(dev_priv
, false);
536 crtc
= single_enabled_crtc(dev
);
538 const struct drm_display_mode
*adjusted_mode
;
539 int pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
542 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
543 clock
= adjusted_mode
->crtc_clock
;
546 wm
= intel_calculate_wm(clock
, &pineview_display_wm
,
547 pineview_display_wm
.fifo_size
,
548 pixel_size
, latency
->display_sr
);
549 reg
= I915_READ(DSPFW1
);
550 reg
&= ~DSPFW_SR_MASK
;
551 reg
|= wm
<< DSPFW_SR_SHIFT
;
552 I915_WRITE(DSPFW1
, reg
);
553 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg
);
556 wm
= intel_calculate_wm(clock
, &pineview_cursor_wm
,
557 pineview_display_wm
.fifo_size
,
558 pixel_size
, latency
->cursor_sr
);
559 reg
= I915_READ(DSPFW3
);
560 reg
&= ~DSPFW_CURSOR_SR_MASK
;
561 reg
|= (wm
& 0x3f) << DSPFW_CURSOR_SR_SHIFT
;
562 I915_WRITE(DSPFW3
, reg
);
564 /* Display HPLL off SR */
565 wm
= intel_calculate_wm(clock
, &pineview_display_hplloff_wm
,
566 pineview_display_hplloff_wm
.fifo_size
,
567 pixel_size
, latency
->display_hpll_disable
);
568 reg
= I915_READ(DSPFW3
);
569 reg
&= ~DSPFW_HPLL_SR_MASK
;
570 reg
|= wm
& DSPFW_HPLL_SR_MASK
;
571 I915_WRITE(DSPFW3
, reg
);
573 /* cursor HPLL off SR */
574 wm
= intel_calculate_wm(clock
, &pineview_cursor_hplloff_wm
,
575 pineview_display_hplloff_wm
.fifo_size
,
576 pixel_size
, latency
->cursor_hpll_disable
);
577 reg
= I915_READ(DSPFW3
);
578 reg
&= ~DSPFW_HPLL_CURSOR_MASK
;
579 reg
|= (wm
& 0x3f) << DSPFW_HPLL_CURSOR_SHIFT
;
580 I915_WRITE(DSPFW3
, reg
);
581 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg
);
583 intel_set_memory_cxsr(dev_priv
, true);
585 intel_set_memory_cxsr(dev_priv
, false);
589 static bool g4x_compute_wm0(struct drm_device
*dev
,
591 const struct intel_watermark_params
*display
,
592 int display_latency_ns
,
593 const struct intel_watermark_params
*cursor
,
594 int cursor_latency_ns
,
598 struct drm_crtc
*crtc
;
599 const struct drm_display_mode
*adjusted_mode
;
600 int htotal
, hdisplay
, clock
, pixel_size
;
601 int line_time_us
, line_count
;
602 int entries
, tlb_miss
;
604 crtc
= intel_get_crtc_for_plane(dev
, plane
);
605 if (!intel_crtc_active(crtc
)) {
606 *cursor_wm
= cursor
->guard_size
;
607 *plane_wm
= display
->guard_size
;
611 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
612 clock
= adjusted_mode
->crtc_clock
;
613 htotal
= adjusted_mode
->crtc_htotal
;
614 hdisplay
= to_intel_crtc(crtc
)->config
->pipe_src_w
;
615 pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
617 /* Use the small buffer method to calculate plane watermark */
618 entries
= ((clock
* pixel_size
/ 1000) * display_latency_ns
) / 1000;
619 tlb_miss
= display
->fifo_size
*display
->cacheline_size
- hdisplay
* 8;
622 entries
= DIV_ROUND_UP(entries
, display
->cacheline_size
);
623 *plane_wm
= entries
+ display
->guard_size
;
624 if (*plane_wm
> (int)display
->max_wm
)
625 *plane_wm
= display
->max_wm
;
627 /* Use the large buffer method to calculate cursor watermark */
628 line_time_us
= max(htotal
* 1000 / clock
, 1);
629 line_count
= (cursor_latency_ns
/ line_time_us
+ 1000) / 1000;
630 entries
= line_count
* to_intel_crtc(crtc
)->cursor_width
* pixel_size
;
631 tlb_miss
= cursor
->fifo_size
*cursor
->cacheline_size
- hdisplay
* 8;
634 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
635 *cursor_wm
= entries
+ cursor
->guard_size
;
636 if (*cursor_wm
> (int)cursor
->max_wm
)
637 *cursor_wm
= (int)cursor
->max_wm
;
643 * Check the wm result.
645 * If any calculated watermark values is larger than the maximum value that
646 * can be programmed into the associated watermark register, that watermark
649 static bool g4x_check_srwm(struct drm_device
*dev
,
650 int display_wm
, int cursor_wm
,
651 const struct intel_watermark_params
*display
,
652 const struct intel_watermark_params
*cursor
)
654 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
655 display_wm
, cursor_wm
);
657 if (display_wm
> display
->max_wm
) {
658 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
659 display_wm
, display
->max_wm
);
663 if (cursor_wm
> cursor
->max_wm
) {
664 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
665 cursor_wm
, cursor
->max_wm
);
669 if (!(display_wm
|| cursor_wm
)) {
670 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
677 static bool g4x_compute_srwm(struct drm_device
*dev
,
680 const struct intel_watermark_params
*display
,
681 const struct intel_watermark_params
*cursor
,
682 int *display_wm
, int *cursor_wm
)
684 struct drm_crtc
*crtc
;
685 const struct drm_display_mode
*adjusted_mode
;
686 int hdisplay
, htotal
, pixel_size
, clock
;
687 unsigned long line_time_us
;
688 int line_count
, line_size
;
693 *display_wm
= *cursor_wm
= 0;
697 crtc
= intel_get_crtc_for_plane(dev
, plane
);
698 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
699 clock
= adjusted_mode
->crtc_clock
;
700 htotal
= adjusted_mode
->crtc_htotal
;
701 hdisplay
= to_intel_crtc(crtc
)->config
->pipe_src_w
;
702 pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
704 line_time_us
= max(htotal
* 1000 / clock
, 1);
705 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
706 line_size
= hdisplay
* pixel_size
;
708 /* Use the minimum of the small and large buffer method for primary */
709 small
= ((clock
* pixel_size
/ 1000) * latency_ns
) / 1000;
710 large
= line_count
* line_size
;
712 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
713 *display_wm
= entries
+ display
->guard_size
;
715 /* calculate the self-refresh watermark for display cursor */
716 entries
= line_count
* pixel_size
* to_intel_crtc(crtc
)->cursor_width
;
717 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
718 *cursor_wm
= entries
+ cursor
->guard_size
;
720 return g4x_check_srwm(dev
,
721 *display_wm
, *cursor_wm
,
725 static bool vlv_compute_drain_latency(struct drm_crtc
*crtc
,
730 struct drm_device
*dev
= crtc
->dev
;
732 int clock
= to_intel_crtc(crtc
)->config
->base
.adjusted_mode
.crtc_clock
;
734 if (WARN(clock
== 0, "Pixel clock is zero!\n"))
737 if (WARN(pixel_size
== 0, "Pixel size is zero!\n"))
740 entries
= DIV_ROUND_UP(clock
, 1000) * pixel_size
;
741 if (IS_CHERRYVIEW(dev
))
742 *prec_mult
= (entries
> 128) ? DRAIN_LATENCY_PRECISION_32
:
743 DRAIN_LATENCY_PRECISION_16
;
745 *prec_mult
= (entries
> 128) ? DRAIN_LATENCY_PRECISION_64
:
746 DRAIN_LATENCY_PRECISION_32
;
747 *drain_latency
= (64 * (*prec_mult
) * 4) / entries
;
749 if (*drain_latency
> DRAIN_LATENCY_MASK
)
750 *drain_latency
= DRAIN_LATENCY_MASK
;
756 * Update drain latency registers of memory arbiter
758 * Valleyview SoC has a new memory arbiter and needs drain latency registers
759 * to be programmed. Each plane has a drain latency multiplier and a drain
763 static void vlv_update_drain_latency(struct drm_crtc
*crtc
)
765 struct drm_device
*dev
= crtc
->dev
;
766 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
767 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
770 enum pipe pipe
= intel_crtc
->pipe
;
771 int plane_prec
, prec_mult
, plane_dl
;
772 const int high_precision
= IS_CHERRYVIEW(dev
) ?
773 DRAIN_LATENCY_PRECISION_32
: DRAIN_LATENCY_PRECISION_64
;
775 plane_dl
= I915_READ(VLV_DDL(pipe
)) & ~(DDL_PLANE_PRECISION_HIGH
|
776 DRAIN_LATENCY_MASK
| DDL_CURSOR_PRECISION_HIGH
|
777 (DRAIN_LATENCY_MASK
<< DDL_CURSOR_SHIFT
));
779 if (!intel_crtc_active(crtc
)) {
780 I915_WRITE(VLV_DDL(pipe
), plane_dl
);
784 /* Primary plane Drain Latency */
785 pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8; /* BPP */
786 if (vlv_compute_drain_latency(crtc
, pixel_size
, &prec_mult
, &drain_latency
)) {
787 plane_prec
= (prec_mult
== high_precision
) ?
788 DDL_PLANE_PRECISION_HIGH
:
789 DDL_PLANE_PRECISION_LOW
;
790 plane_dl
|= plane_prec
| drain_latency
;
793 /* Cursor Drain Latency
794 * BPP is always 4 for cursor
798 /* Program cursor DL only if it is enabled */
799 if (intel_crtc
->cursor_base
&&
800 vlv_compute_drain_latency(crtc
, pixel_size
, &prec_mult
, &drain_latency
)) {
801 plane_prec
= (prec_mult
== high_precision
) ?
802 DDL_CURSOR_PRECISION_HIGH
:
803 DDL_CURSOR_PRECISION_LOW
;
804 plane_dl
|= plane_prec
| (drain_latency
<< DDL_CURSOR_SHIFT
);
807 I915_WRITE(VLV_DDL(pipe
), plane_dl
);
810 #define single_plane_enabled(mask) is_power_of_2(mask)
812 static void valleyview_update_wm(struct drm_crtc
*crtc
)
814 struct drm_device
*dev
= crtc
->dev
;
815 static const int sr_latency_ns
= 12000;
816 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
817 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
818 int plane_sr
, cursor_sr
;
819 int ignore_plane_sr
, ignore_cursor_sr
;
820 unsigned int enabled
= 0;
823 vlv_update_drain_latency(crtc
);
825 if (g4x_compute_wm0(dev
, PIPE_A
,
826 &valleyview_wm_info
, pessimal_latency_ns
,
827 &valleyview_cursor_wm_info
, pessimal_latency_ns
,
828 &planea_wm
, &cursora_wm
))
829 enabled
|= 1 << PIPE_A
;
831 if (g4x_compute_wm0(dev
, PIPE_B
,
832 &valleyview_wm_info
, pessimal_latency_ns
,
833 &valleyview_cursor_wm_info
, pessimal_latency_ns
,
834 &planeb_wm
, &cursorb_wm
))
835 enabled
|= 1 << PIPE_B
;
837 if (single_plane_enabled(enabled
) &&
838 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
841 &valleyview_cursor_wm_info
,
842 &plane_sr
, &ignore_cursor_sr
) &&
843 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
846 &valleyview_cursor_wm_info
,
847 &ignore_plane_sr
, &cursor_sr
)) {
850 cxsr_enabled
= false;
851 intel_set_memory_cxsr(dev_priv
, false);
852 plane_sr
= cursor_sr
= 0;
855 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
856 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
857 planea_wm
, cursora_wm
,
858 planeb_wm
, cursorb_wm
,
859 plane_sr
, cursor_sr
);
862 (plane_sr
<< DSPFW_SR_SHIFT
) |
863 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
864 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
865 (planea_wm
<< DSPFW_PLANEA_SHIFT
));
867 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
868 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
870 (I915_READ(DSPFW3
) & ~DSPFW_CURSOR_SR_MASK
) |
871 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
874 intel_set_memory_cxsr(dev_priv
, true);
877 static void cherryview_update_wm(struct drm_crtc
*crtc
)
879 struct drm_device
*dev
= crtc
->dev
;
880 static const int sr_latency_ns
= 12000;
881 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
882 int planea_wm
, planeb_wm
, planec_wm
;
883 int cursora_wm
, cursorb_wm
, cursorc_wm
;
884 int plane_sr
, cursor_sr
;
885 int ignore_plane_sr
, ignore_cursor_sr
;
886 unsigned int enabled
= 0;
889 vlv_update_drain_latency(crtc
);
891 if (g4x_compute_wm0(dev
, PIPE_A
,
892 &valleyview_wm_info
, pessimal_latency_ns
,
893 &valleyview_cursor_wm_info
, pessimal_latency_ns
,
894 &planea_wm
, &cursora_wm
))
895 enabled
|= 1 << PIPE_A
;
897 if (g4x_compute_wm0(dev
, PIPE_B
,
898 &valleyview_wm_info
, pessimal_latency_ns
,
899 &valleyview_cursor_wm_info
, pessimal_latency_ns
,
900 &planeb_wm
, &cursorb_wm
))
901 enabled
|= 1 << PIPE_B
;
903 if (g4x_compute_wm0(dev
, PIPE_C
,
904 &valleyview_wm_info
, pessimal_latency_ns
,
905 &valleyview_cursor_wm_info
, pessimal_latency_ns
,
906 &planec_wm
, &cursorc_wm
))
907 enabled
|= 1 << PIPE_C
;
909 if (single_plane_enabled(enabled
) &&
910 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
913 &valleyview_cursor_wm_info
,
914 &plane_sr
, &ignore_cursor_sr
) &&
915 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
918 &valleyview_cursor_wm_info
,
919 &ignore_plane_sr
, &cursor_sr
)) {
922 cxsr_enabled
= false;
923 intel_set_memory_cxsr(dev_priv
, false);
924 plane_sr
= cursor_sr
= 0;
927 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
928 "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
929 "SR: plane=%d, cursor=%d\n",
930 planea_wm
, cursora_wm
,
931 planeb_wm
, cursorb_wm
,
932 planec_wm
, cursorc_wm
,
933 plane_sr
, cursor_sr
);
936 (plane_sr
<< DSPFW_SR_SHIFT
) |
937 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
938 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
939 (planea_wm
<< DSPFW_PLANEA_SHIFT
));
941 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
942 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
944 (I915_READ(DSPFW3
) & ~DSPFW_CURSOR_SR_MASK
) |
945 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
946 I915_WRITE(DSPFW9_CHV
,
947 (I915_READ(DSPFW9_CHV
) & ~(DSPFW_PLANEC_MASK
|
948 DSPFW_CURSORC_MASK
)) |
949 (planec_wm
<< DSPFW_PLANEC_SHIFT
) |
950 (cursorc_wm
<< DSPFW_CURSORC_SHIFT
));
953 intel_set_memory_cxsr(dev_priv
, true);
956 static void valleyview_update_sprite_wm(struct drm_plane
*plane
,
957 struct drm_crtc
*crtc
,
958 uint32_t sprite_width
,
959 uint32_t sprite_height
,
961 bool enabled
, bool scaled
)
963 struct drm_device
*dev
= crtc
->dev
;
964 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
965 int pipe
= to_intel_plane(plane
)->pipe
;
966 int sprite
= to_intel_plane(plane
)->plane
;
971 const int high_precision
= IS_CHERRYVIEW(dev
) ?
972 DRAIN_LATENCY_PRECISION_32
: DRAIN_LATENCY_PRECISION_64
;
974 sprite_dl
= I915_READ(VLV_DDL(pipe
)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite
) |
975 (DRAIN_LATENCY_MASK
<< DDL_SPRITE_SHIFT(sprite
)));
977 if (enabled
&& vlv_compute_drain_latency(crtc
, pixel_size
, &prec_mult
,
979 plane_prec
= (prec_mult
== high_precision
) ?
980 DDL_SPRITE_PRECISION_HIGH(sprite
) :
981 DDL_SPRITE_PRECISION_LOW(sprite
);
982 sprite_dl
|= plane_prec
|
983 (drain_latency
<< DDL_SPRITE_SHIFT(sprite
));
986 I915_WRITE(VLV_DDL(pipe
), sprite_dl
);
989 static void g4x_update_wm(struct drm_crtc
*crtc
)
991 struct drm_device
*dev
= crtc
->dev
;
992 static const int sr_latency_ns
= 12000;
993 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
994 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
995 int plane_sr
, cursor_sr
;
996 unsigned int enabled
= 0;
999 if (g4x_compute_wm0(dev
, PIPE_A
,
1000 &g4x_wm_info
, pessimal_latency_ns
,
1001 &g4x_cursor_wm_info
, pessimal_latency_ns
,
1002 &planea_wm
, &cursora_wm
))
1003 enabled
|= 1 << PIPE_A
;
1005 if (g4x_compute_wm0(dev
, PIPE_B
,
1006 &g4x_wm_info
, pessimal_latency_ns
,
1007 &g4x_cursor_wm_info
, pessimal_latency_ns
,
1008 &planeb_wm
, &cursorb_wm
))
1009 enabled
|= 1 << PIPE_B
;
1011 if (single_plane_enabled(enabled
) &&
1012 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1015 &g4x_cursor_wm_info
,
1016 &plane_sr
, &cursor_sr
)) {
1017 cxsr_enabled
= true;
1019 cxsr_enabled
= false;
1020 intel_set_memory_cxsr(dev_priv
, false);
1021 plane_sr
= cursor_sr
= 0;
1024 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1025 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1026 planea_wm
, cursora_wm
,
1027 planeb_wm
, cursorb_wm
,
1028 plane_sr
, cursor_sr
);
1031 (plane_sr
<< DSPFW_SR_SHIFT
) |
1032 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1033 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1034 (planea_wm
<< DSPFW_PLANEA_SHIFT
));
1036 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1037 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1038 /* HPLL off in SR has some issues on G4x... disable it */
1040 (I915_READ(DSPFW3
) & ~(DSPFW_HPLL_SR_EN
| DSPFW_CURSOR_SR_MASK
)) |
1041 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1044 intel_set_memory_cxsr(dev_priv
, true);
1047 static void i965_update_wm(struct drm_crtc
*unused_crtc
)
1049 struct drm_device
*dev
= unused_crtc
->dev
;
1050 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1051 struct drm_crtc
*crtc
;
1056 /* Calc sr entries for one plane configs */
1057 crtc
= single_enabled_crtc(dev
);
1059 /* self-refresh has much higher latency */
1060 static const int sr_latency_ns
= 12000;
1061 const struct drm_display_mode
*adjusted_mode
=
1062 &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
1063 int clock
= adjusted_mode
->crtc_clock
;
1064 int htotal
= adjusted_mode
->crtc_htotal
;
1065 int hdisplay
= to_intel_crtc(crtc
)->config
->pipe_src_w
;
1066 int pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1067 unsigned long line_time_us
;
1070 line_time_us
= max(htotal
* 1000 / clock
, 1);
1072 /* Use ns/us then divide to preserve precision */
1073 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1074 pixel_size
* hdisplay
;
1075 entries
= DIV_ROUND_UP(entries
, I915_FIFO_LINE_SIZE
);
1076 srwm
= I965_FIFO_SIZE
- entries
;
1080 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1083 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1084 pixel_size
* to_intel_crtc(crtc
)->cursor_width
;
1085 entries
= DIV_ROUND_UP(entries
,
1086 i965_cursor_wm_info
.cacheline_size
);
1087 cursor_sr
= i965_cursor_wm_info
.fifo_size
-
1088 (entries
+ i965_cursor_wm_info
.guard_size
);
1090 if (cursor_sr
> i965_cursor_wm_info
.max_wm
)
1091 cursor_sr
= i965_cursor_wm_info
.max_wm
;
1093 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1094 "cursor %d\n", srwm
, cursor_sr
);
1096 cxsr_enabled
= true;
1098 cxsr_enabled
= false;
1099 /* Turn off self refresh if both pipes are enabled */
1100 intel_set_memory_cxsr(dev_priv
, false);
1103 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1106 /* 965 has limitations... */
1107 I915_WRITE(DSPFW1
, (srwm
<< DSPFW_SR_SHIFT
) |
1108 (8 << DSPFW_CURSORB_SHIFT
) |
1109 (8 << DSPFW_PLANEB_SHIFT
) |
1110 (8 << DSPFW_PLANEA_SHIFT
));
1111 I915_WRITE(DSPFW2
, (8 << DSPFW_CURSORA_SHIFT
) |
1112 (8 << DSPFW_PLANEC_SHIFT_OLD
));
1113 /* update cursor SR watermark */
1114 I915_WRITE(DSPFW3
, (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1117 intel_set_memory_cxsr(dev_priv
, true);
1120 static void i9xx_update_wm(struct drm_crtc
*unused_crtc
)
1122 struct drm_device
*dev
= unused_crtc
->dev
;
1123 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1124 const struct intel_watermark_params
*wm_info
;
1129 int planea_wm
, planeb_wm
;
1130 struct drm_crtc
*crtc
, *enabled
= NULL
;
1133 wm_info
= &i945_wm_info
;
1134 else if (!IS_GEN2(dev
))
1135 wm_info
= &i915_wm_info
;
1137 wm_info
= &i830_a_wm_info
;
1139 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 0);
1140 crtc
= intel_get_crtc_for_plane(dev
, 0);
1141 if (intel_crtc_active(crtc
)) {
1142 const struct drm_display_mode
*adjusted_mode
;
1143 int cpp
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1147 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
1148 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1149 wm_info
, fifo_size
, cpp
,
1150 pessimal_latency_ns
);
1153 planea_wm
= fifo_size
- wm_info
->guard_size
;
1154 if (planea_wm
> (long)wm_info
->max_wm
)
1155 planea_wm
= wm_info
->max_wm
;
1159 wm_info
= &i830_bc_wm_info
;
1161 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 1);
1162 crtc
= intel_get_crtc_for_plane(dev
, 1);
1163 if (intel_crtc_active(crtc
)) {
1164 const struct drm_display_mode
*adjusted_mode
;
1165 int cpp
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1169 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
1170 planeb_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1171 wm_info
, fifo_size
, cpp
,
1172 pessimal_latency_ns
);
1173 if (enabled
== NULL
)
1178 planeb_wm
= fifo_size
- wm_info
->guard_size
;
1179 if (planeb_wm
> (long)wm_info
->max_wm
)
1180 planeb_wm
= wm_info
->max_wm
;
1183 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm
, planeb_wm
);
1185 if (IS_I915GM(dev
) && enabled
) {
1186 struct drm_i915_gem_object
*obj
;
1188 obj
= intel_fb_obj(enabled
->primary
->fb
);
1190 /* self-refresh seems busted with untiled */
1191 if (obj
->tiling_mode
== I915_TILING_NONE
)
1196 * Overlay gets an aggressive default since video jitter is bad.
1200 /* Play safe and disable self-refresh before adjusting watermarks. */
1201 intel_set_memory_cxsr(dev_priv
, false);
1203 /* Calc sr entries for one plane configs */
1204 if (HAS_FW_BLC(dev
) && enabled
) {
1205 /* self-refresh has much higher latency */
1206 static const int sr_latency_ns
= 6000;
1207 const struct drm_display_mode
*adjusted_mode
=
1208 &to_intel_crtc(enabled
)->config
->base
.adjusted_mode
;
1209 int clock
= adjusted_mode
->crtc_clock
;
1210 int htotal
= adjusted_mode
->crtc_htotal
;
1211 int hdisplay
= to_intel_crtc(enabled
)->config
->pipe_src_w
;
1212 int pixel_size
= enabled
->primary
->fb
->bits_per_pixel
/ 8;
1213 unsigned long line_time_us
;
1216 line_time_us
= max(htotal
* 1000 / clock
, 1);
1218 /* Use ns/us then divide to preserve precision */
1219 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1220 pixel_size
* hdisplay
;
1221 entries
= DIV_ROUND_UP(entries
, wm_info
->cacheline_size
);
1222 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries
);
1223 srwm
= wm_info
->fifo_size
- entries
;
1227 if (IS_I945G(dev
) || IS_I945GM(dev
))
1228 I915_WRITE(FW_BLC_SELF
,
1229 FW_BLC_SELF_FIFO_MASK
| (srwm
& 0xff));
1230 else if (IS_I915GM(dev
))
1231 I915_WRITE(FW_BLC_SELF
, srwm
& 0x3f);
1234 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1235 planea_wm
, planeb_wm
, cwm
, srwm
);
1237 fwater_lo
= ((planeb_wm
& 0x3f) << 16) | (planea_wm
& 0x3f);
1238 fwater_hi
= (cwm
& 0x1f);
1240 /* Set request length to 8 cachelines per fetch */
1241 fwater_lo
= fwater_lo
| (1 << 24) | (1 << 8);
1242 fwater_hi
= fwater_hi
| (1 << 8);
1244 I915_WRITE(FW_BLC
, fwater_lo
);
1245 I915_WRITE(FW_BLC2
, fwater_hi
);
1248 intel_set_memory_cxsr(dev_priv
, true);
1251 static void i845_update_wm(struct drm_crtc
*unused_crtc
)
1253 struct drm_device
*dev
= unused_crtc
->dev
;
1254 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1255 struct drm_crtc
*crtc
;
1256 const struct drm_display_mode
*adjusted_mode
;
1260 crtc
= single_enabled_crtc(dev
);
1264 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
1265 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1267 dev_priv
->display
.get_fifo_size(dev
, 0),
1268 4, pessimal_latency_ns
);
1269 fwater_lo
= I915_READ(FW_BLC
) & ~0xfff;
1270 fwater_lo
|= (3<<8) | planea_wm
;
1272 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm
);
1274 I915_WRITE(FW_BLC
, fwater_lo
);
1277 static uint32_t ilk_pipe_pixel_rate(struct drm_device
*dev
,
1278 struct drm_crtc
*crtc
)
1280 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1281 uint32_t pixel_rate
;
1283 pixel_rate
= intel_crtc
->config
->base
.adjusted_mode
.crtc_clock
;
1285 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1286 * adjust the pixel_rate here. */
1288 if (intel_crtc
->config
->pch_pfit
.enabled
) {
1289 uint64_t pipe_w
, pipe_h
, pfit_w
, pfit_h
;
1290 uint32_t pfit_size
= intel_crtc
->config
->pch_pfit
.size
;
1292 pipe_w
= intel_crtc
->config
->pipe_src_w
;
1293 pipe_h
= intel_crtc
->config
->pipe_src_h
;
1294 pfit_w
= (pfit_size
>> 16) & 0xFFFF;
1295 pfit_h
= pfit_size
& 0xFFFF;
1296 if (pipe_w
< pfit_w
)
1298 if (pipe_h
< pfit_h
)
1301 pixel_rate
= div_u64((uint64_t) pixel_rate
* pipe_w
* pipe_h
,
1308 /* latency must be in 0.1us units. */
1309 static uint32_t ilk_wm_method1(uint32_t pixel_rate
, uint8_t bytes_per_pixel
,
1314 if (WARN(latency
== 0, "Latency value missing\n"))
1317 ret
= (uint64_t) pixel_rate
* bytes_per_pixel
* latency
;
1318 ret
= DIV_ROUND_UP_ULL(ret
, 64 * 10000) + 2;
1323 /* latency must be in 0.1us units. */
1324 static uint32_t ilk_wm_method2(uint32_t pixel_rate
, uint32_t pipe_htotal
,
1325 uint32_t horiz_pixels
, uint8_t bytes_per_pixel
,
1330 if (WARN(latency
== 0, "Latency value missing\n"))
1333 ret
= (latency
* pixel_rate
) / (pipe_htotal
* 10000);
1334 ret
= (ret
+ 1) * horiz_pixels
* bytes_per_pixel
;
1335 ret
= DIV_ROUND_UP(ret
, 64) + 2;
1339 static uint32_t ilk_wm_fbc(uint32_t pri_val
, uint32_t horiz_pixels
,
1340 uint8_t bytes_per_pixel
)
1342 return DIV_ROUND_UP(pri_val
* 64, horiz_pixels
* bytes_per_pixel
) + 2;
1345 struct skl_pipe_wm_parameters
{
1347 uint32_t pipe_htotal
;
1348 uint32_t pixel_rate
; /* in KHz */
1349 struct intel_plane_wm_parameters plane
[I915_MAX_PLANES
];
1350 struct intel_plane_wm_parameters cursor
;
1353 struct ilk_pipe_wm_parameters
{
1355 uint32_t pipe_htotal
;
1356 uint32_t pixel_rate
;
1357 struct intel_plane_wm_parameters pri
;
1358 struct intel_plane_wm_parameters spr
;
1359 struct intel_plane_wm_parameters cur
;
1362 struct ilk_wm_maximums
{
1369 /* used in computing the new watermarks state */
1370 struct intel_wm_config
{
1371 unsigned int num_pipes_active
;
1372 bool sprites_enabled
;
1373 bool sprites_scaled
;
1377 * For both WM_PIPE and WM_LP.
1378 * mem_value must be in 0.1us units.
1380 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters
*params
,
1384 uint32_t method1
, method2
;
1386 if (!params
->active
|| !params
->pri
.enabled
)
1389 method1
= ilk_wm_method1(params
->pixel_rate
,
1390 params
->pri
.bytes_per_pixel
,
1396 method2
= ilk_wm_method2(params
->pixel_rate
,
1397 params
->pipe_htotal
,
1398 params
->pri
.horiz_pixels
,
1399 params
->pri
.bytes_per_pixel
,
1402 return min(method1
, method2
);
1406 * For both WM_PIPE and WM_LP.
1407 * mem_value must be in 0.1us units.
1409 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters
*params
,
1412 uint32_t method1
, method2
;
1414 if (!params
->active
|| !params
->spr
.enabled
)
1417 method1
= ilk_wm_method1(params
->pixel_rate
,
1418 params
->spr
.bytes_per_pixel
,
1420 method2
= ilk_wm_method2(params
->pixel_rate
,
1421 params
->pipe_htotal
,
1422 params
->spr
.horiz_pixels
,
1423 params
->spr
.bytes_per_pixel
,
1425 return min(method1
, method2
);
1429 * For both WM_PIPE and WM_LP.
1430 * mem_value must be in 0.1us units.
1432 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters
*params
,
1435 if (!params
->active
|| !params
->cur
.enabled
)
1438 return ilk_wm_method2(params
->pixel_rate
,
1439 params
->pipe_htotal
,
1440 params
->cur
.horiz_pixels
,
1441 params
->cur
.bytes_per_pixel
,
1445 /* Only for WM_LP. */
1446 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters
*params
,
1449 if (!params
->active
|| !params
->pri
.enabled
)
1452 return ilk_wm_fbc(pri_val
,
1453 params
->pri
.horiz_pixels
,
1454 params
->pri
.bytes_per_pixel
);
1457 static unsigned int ilk_display_fifo_size(const struct drm_device
*dev
)
1459 if (INTEL_INFO(dev
)->gen
>= 8)
1461 else if (INTEL_INFO(dev
)->gen
>= 7)
1467 static unsigned int ilk_plane_wm_reg_max(const struct drm_device
*dev
,
1468 int level
, bool is_sprite
)
1470 if (INTEL_INFO(dev
)->gen
>= 8)
1471 /* BDW primary/sprite plane watermarks */
1472 return level
== 0 ? 255 : 2047;
1473 else if (INTEL_INFO(dev
)->gen
>= 7)
1474 /* IVB/HSW primary/sprite plane watermarks */
1475 return level
== 0 ? 127 : 1023;
1476 else if (!is_sprite
)
1477 /* ILK/SNB primary plane watermarks */
1478 return level
== 0 ? 127 : 511;
1480 /* ILK/SNB sprite plane watermarks */
1481 return level
== 0 ? 63 : 255;
1484 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device
*dev
,
1487 if (INTEL_INFO(dev
)->gen
>= 7)
1488 return level
== 0 ? 63 : 255;
1490 return level
== 0 ? 31 : 63;
1493 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device
*dev
)
1495 if (INTEL_INFO(dev
)->gen
>= 8)
1501 /* Calculate the maximum primary/sprite plane watermark */
1502 static unsigned int ilk_plane_wm_max(const struct drm_device
*dev
,
1504 const struct intel_wm_config
*config
,
1505 enum intel_ddb_partitioning ddb_partitioning
,
1508 unsigned int fifo_size
= ilk_display_fifo_size(dev
);
1510 /* if sprites aren't enabled, sprites get nothing */
1511 if (is_sprite
&& !config
->sprites_enabled
)
1514 /* HSW allows LP1+ watermarks even with multiple pipes */
1515 if (level
== 0 || config
->num_pipes_active
> 1) {
1516 fifo_size
/= INTEL_INFO(dev
)->num_pipes
;
1519 * For some reason the non self refresh
1520 * FIFO size is only half of the self
1521 * refresh FIFO size on ILK/SNB.
1523 if (INTEL_INFO(dev
)->gen
<= 6)
1527 if (config
->sprites_enabled
) {
1528 /* level 0 is always calculated with 1:1 split */
1529 if (level
> 0 && ddb_partitioning
== INTEL_DDB_PART_5_6
) {
1538 /* clamp to max that the registers can hold */
1539 return min(fifo_size
, ilk_plane_wm_reg_max(dev
, level
, is_sprite
));
1542 /* Calculate the maximum cursor plane watermark */
1543 static unsigned int ilk_cursor_wm_max(const struct drm_device
*dev
,
1545 const struct intel_wm_config
*config
)
1547 /* HSW LP1+ watermarks w/ multiple pipes */
1548 if (level
> 0 && config
->num_pipes_active
> 1)
1551 /* otherwise just report max that registers can hold */
1552 return ilk_cursor_wm_reg_max(dev
, level
);
1555 static void ilk_compute_wm_maximums(const struct drm_device
*dev
,
1557 const struct intel_wm_config
*config
,
1558 enum intel_ddb_partitioning ddb_partitioning
,
1559 struct ilk_wm_maximums
*max
)
1561 max
->pri
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, false);
1562 max
->spr
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, true);
1563 max
->cur
= ilk_cursor_wm_max(dev
, level
, config
);
1564 max
->fbc
= ilk_fbc_wm_reg_max(dev
);
1567 static void ilk_compute_wm_reg_maximums(struct drm_device
*dev
,
1569 struct ilk_wm_maximums
*max
)
1571 max
->pri
= ilk_plane_wm_reg_max(dev
, level
, false);
1572 max
->spr
= ilk_plane_wm_reg_max(dev
, level
, true);
1573 max
->cur
= ilk_cursor_wm_reg_max(dev
, level
);
1574 max
->fbc
= ilk_fbc_wm_reg_max(dev
);
1577 static bool ilk_validate_wm_level(int level
,
1578 const struct ilk_wm_maximums
*max
,
1579 struct intel_wm_level
*result
)
1583 /* already determined to be invalid? */
1584 if (!result
->enable
)
1587 result
->enable
= result
->pri_val
<= max
->pri
&&
1588 result
->spr_val
<= max
->spr
&&
1589 result
->cur_val
<= max
->cur
;
1591 ret
= result
->enable
;
1594 * HACK until we can pre-compute everything,
1595 * and thus fail gracefully if LP0 watermarks
1598 if (level
== 0 && !result
->enable
) {
1599 if (result
->pri_val
> max
->pri
)
1600 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1601 level
, result
->pri_val
, max
->pri
);
1602 if (result
->spr_val
> max
->spr
)
1603 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1604 level
, result
->spr_val
, max
->spr
);
1605 if (result
->cur_val
> max
->cur
)
1606 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1607 level
, result
->cur_val
, max
->cur
);
1609 result
->pri_val
= min_t(uint32_t, result
->pri_val
, max
->pri
);
1610 result
->spr_val
= min_t(uint32_t, result
->spr_val
, max
->spr
);
1611 result
->cur_val
= min_t(uint32_t, result
->cur_val
, max
->cur
);
1612 result
->enable
= true;
1618 static void ilk_compute_wm_level(const struct drm_i915_private
*dev_priv
,
1620 const struct ilk_pipe_wm_parameters
*p
,
1621 struct intel_wm_level
*result
)
1623 uint16_t pri_latency
= dev_priv
->wm
.pri_latency
[level
];
1624 uint16_t spr_latency
= dev_priv
->wm
.spr_latency
[level
];
1625 uint16_t cur_latency
= dev_priv
->wm
.cur_latency
[level
];
1627 /* WM1+ latency values stored in 0.5us units */
1634 result
->pri_val
= ilk_compute_pri_wm(p
, pri_latency
, level
);
1635 result
->spr_val
= ilk_compute_spr_wm(p
, spr_latency
);
1636 result
->cur_val
= ilk_compute_cur_wm(p
, cur_latency
);
1637 result
->fbc_val
= ilk_compute_fbc_wm(p
, result
->pri_val
);
1638 result
->enable
= true;
1642 hsw_compute_linetime_wm(struct drm_device
*dev
, struct drm_crtc
*crtc
)
1644 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1645 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1646 struct drm_display_mode
*mode
= &intel_crtc
->config
->base
.adjusted_mode
;
1647 u32 linetime
, ips_linetime
;
1649 if (!intel_crtc_active(crtc
))
1652 /* The WM are computed with base on how long it takes to fill a single
1653 * row at the given clock rate, multiplied by 8.
1655 linetime
= DIV_ROUND_CLOSEST(mode
->crtc_htotal
* 1000 * 8,
1657 ips_linetime
= DIV_ROUND_CLOSEST(mode
->crtc_htotal
* 1000 * 8,
1658 intel_ddi_get_cdclk_freq(dev_priv
));
1660 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime
) |
1661 PIPE_WM_LINETIME_TIME(linetime
);
1664 static void intel_read_wm_latency(struct drm_device
*dev
, uint16_t wm
[8])
1666 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1671 int level
, max_level
= ilk_wm_max_level(dev
);
1673 /* read the first set of memory latencies[0:3] */
1674 val
= 0; /* data0 to be programmed to 0 for first set */
1675 mutex_lock(&dev_priv
->rps
.hw_lock
);
1676 ret
= sandybridge_pcode_read(dev_priv
,
1677 GEN9_PCODE_READ_MEM_LATENCY
,
1679 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1682 DRM_ERROR("SKL Mailbox read error = %d\n", ret
);
1686 wm
[0] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
1687 wm
[1] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
1688 GEN9_MEM_LATENCY_LEVEL_MASK
;
1689 wm
[2] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
1690 GEN9_MEM_LATENCY_LEVEL_MASK
;
1691 wm
[3] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
1692 GEN9_MEM_LATENCY_LEVEL_MASK
;
1694 /* read the second set of memory latencies[4:7] */
1695 val
= 1; /* data0 to be programmed to 1 for second set */
1696 mutex_lock(&dev_priv
->rps
.hw_lock
);
1697 ret
= sandybridge_pcode_read(dev_priv
,
1698 GEN9_PCODE_READ_MEM_LATENCY
,
1700 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1702 DRM_ERROR("SKL Mailbox read error = %d\n", ret
);
1706 wm
[4] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
1707 wm
[5] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
1708 GEN9_MEM_LATENCY_LEVEL_MASK
;
1709 wm
[6] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
1710 GEN9_MEM_LATENCY_LEVEL_MASK
;
1711 wm
[7] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
1712 GEN9_MEM_LATENCY_LEVEL_MASK
;
1715 * punit doesn't take into account the read latency so we need
1716 * to add 2us to the various latency levels we retrieve from
1718 * - W0 is a bit special in that it's the only level that
1719 * can't be disabled if we want to have display working, so
1720 * we always add 2us there.
1721 * - For levels >=1, punit returns 0us latency when they are
1722 * disabled, so we respect that and don't add 2us then
1724 * Additionally, if a level n (n > 1) has a 0us latency, all
1725 * levels m (m >= n) need to be disabled. We make sure to
1726 * sanitize the values out of the punit to satisfy this
1730 for (level
= 1; level
<= max_level
; level
++)
1734 for (i
= level
+ 1; i
<= max_level
; i
++)
1739 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
1740 uint64_t sskpd
= I915_READ64(MCH_SSKPD
);
1742 wm
[0] = (sskpd
>> 56) & 0xFF;
1744 wm
[0] = sskpd
& 0xF;
1745 wm
[1] = (sskpd
>> 4) & 0xFF;
1746 wm
[2] = (sskpd
>> 12) & 0xFF;
1747 wm
[3] = (sskpd
>> 20) & 0x1FF;
1748 wm
[4] = (sskpd
>> 32) & 0x1FF;
1749 } else if (INTEL_INFO(dev
)->gen
>= 6) {
1750 uint32_t sskpd
= I915_READ(MCH_SSKPD
);
1752 wm
[0] = (sskpd
>> SSKPD_WM0_SHIFT
) & SSKPD_WM_MASK
;
1753 wm
[1] = (sskpd
>> SSKPD_WM1_SHIFT
) & SSKPD_WM_MASK
;
1754 wm
[2] = (sskpd
>> SSKPD_WM2_SHIFT
) & SSKPD_WM_MASK
;
1755 wm
[3] = (sskpd
>> SSKPD_WM3_SHIFT
) & SSKPD_WM_MASK
;
1756 } else if (INTEL_INFO(dev
)->gen
>= 5) {
1757 uint32_t mltr
= I915_READ(MLTR_ILK
);
1759 /* ILK primary LP0 latency is 700 ns */
1761 wm
[1] = (mltr
>> MLTR_WM1_SHIFT
) & ILK_SRLT_MASK
;
1762 wm
[2] = (mltr
>> MLTR_WM2_SHIFT
) & ILK_SRLT_MASK
;
1766 static void intel_fixup_spr_wm_latency(struct drm_device
*dev
, uint16_t wm
[5])
1768 /* ILK sprite LP0 latency is 1300 ns */
1769 if (INTEL_INFO(dev
)->gen
== 5)
1773 static void intel_fixup_cur_wm_latency(struct drm_device
*dev
, uint16_t wm
[5])
1775 /* ILK cursor LP0 latency is 1300 ns */
1776 if (INTEL_INFO(dev
)->gen
== 5)
1779 /* WaDoubleCursorLP3Latency:ivb */
1780 if (IS_IVYBRIDGE(dev
))
1784 int ilk_wm_max_level(const struct drm_device
*dev
)
1786 /* how many WM levels are we expecting */
1789 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1791 else if (INTEL_INFO(dev
)->gen
>= 6)
1797 static void intel_print_wm_latency(struct drm_device
*dev
,
1799 const uint16_t wm
[8])
1801 int level
, max_level
= ilk_wm_max_level(dev
);
1803 for (level
= 0; level
<= max_level
; level
++) {
1804 unsigned int latency
= wm
[level
];
1807 DRM_ERROR("%s WM%d latency not provided\n",
1813 * - latencies are in us on gen9.
1814 * - before then, WM1+ latency values are in 0.5us units
1821 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
1822 name
, level
, wm
[level
],
1823 latency
/ 10, latency
% 10);
1827 static bool ilk_increase_wm_latency(struct drm_i915_private
*dev_priv
,
1828 uint16_t wm
[5], uint16_t min
)
1830 int level
, max_level
= ilk_wm_max_level(dev_priv
->dev
);
1835 wm
[0] = max(wm
[0], min
);
1836 for (level
= 1; level
<= max_level
; level
++)
1837 wm
[level
] = max_t(uint16_t, wm
[level
], DIV_ROUND_UP(min
, 5));
1842 static void snb_wm_latency_quirk(struct drm_device
*dev
)
1844 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1848 * The BIOS provided WM memory latency values are often
1849 * inadequate for high resolution displays. Adjust them.
1851 changed
= ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
, 12) |
1852 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
, 12) |
1853 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
, 12);
1858 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
1859 intel_print_wm_latency(dev
, "Primary", dev_priv
->wm
.pri_latency
);
1860 intel_print_wm_latency(dev
, "Sprite", dev_priv
->wm
.spr_latency
);
1861 intel_print_wm_latency(dev
, "Cursor", dev_priv
->wm
.cur_latency
);
1864 static void ilk_setup_wm_latency(struct drm_device
*dev
)
1866 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1868 intel_read_wm_latency(dev
, dev_priv
->wm
.pri_latency
);
1870 memcpy(dev_priv
->wm
.spr_latency
, dev_priv
->wm
.pri_latency
,
1871 sizeof(dev_priv
->wm
.pri_latency
));
1872 memcpy(dev_priv
->wm
.cur_latency
, dev_priv
->wm
.pri_latency
,
1873 sizeof(dev_priv
->wm
.pri_latency
));
1875 intel_fixup_spr_wm_latency(dev
, dev_priv
->wm
.spr_latency
);
1876 intel_fixup_cur_wm_latency(dev
, dev_priv
->wm
.cur_latency
);
1878 intel_print_wm_latency(dev
, "Primary", dev_priv
->wm
.pri_latency
);
1879 intel_print_wm_latency(dev
, "Sprite", dev_priv
->wm
.spr_latency
);
1880 intel_print_wm_latency(dev
, "Cursor", dev_priv
->wm
.cur_latency
);
1883 snb_wm_latency_quirk(dev
);
1886 static void skl_setup_wm_latency(struct drm_device
*dev
)
1888 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1890 intel_read_wm_latency(dev
, dev_priv
->wm
.skl_latency
);
1891 intel_print_wm_latency(dev
, "Gen9 Plane", dev_priv
->wm
.skl_latency
);
1894 static void ilk_compute_wm_parameters(struct drm_crtc
*crtc
,
1895 struct ilk_pipe_wm_parameters
*p
)
1897 struct drm_device
*dev
= crtc
->dev
;
1898 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1899 enum pipe pipe
= intel_crtc
->pipe
;
1900 struct drm_plane
*plane
;
1902 if (!intel_crtc_active(crtc
))
1906 p
->pipe_htotal
= intel_crtc
->config
->base
.adjusted_mode
.crtc_htotal
;
1907 p
->pixel_rate
= ilk_pipe_pixel_rate(dev
, crtc
);
1908 p
->pri
.bytes_per_pixel
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1909 p
->cur
.bytes_per_pixel
= 4;
1910 p
->pri
.horiz_pixels
= intel_crtc
->config
->pipe_src_w
;
1911 p
->cur
.horiz_pixels
= intel_crtc
->cursor_width
;
1912 /* TODO: for now, assume primary and cursor planes are always enabled. */
1913 p
->pri
.enabled
= true;
1914 p
->cur
.enabled
= true;
1916 drm_for_each_legacy_plane(plane
, &dev
->mode_config
.plane_list
) {
1917 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
1919 if (intel_plane
->pipe
== pipe
) {
1920 p
->spr
= intel_plane
->wm
;
1926 static void ilk_compute_wm_config(struct drm_device
*dev
,
1927 struct intel_wm_config
*config
)
1929 struct intel_crtc
*intel_crtc
;
1931 /* Compute the currently _active_ config */
1932 for_each_intel_crtc(dev
, intel_crtc
) {
1933 const struct intel_pipe_wm
*wm
= &intel_crtc
->wm
.active
;
1935 if (!wm
->pipe_enabled
)
1938 config
->sprites_enabled
|= wm
->sprites_enabled
;
1939 config
->sprites_scaled
|= wm
->sprites_scaled
;
1940 config
->num_pipes_active
++;
1944 /* Compute new watermarks for the pipe */
1945 static bool intel_compute_pipe_wm(struct drm_crtc
*crtc
,
1946 const struct ilk_pipe_wm_parameters
*params
,
1947 struct intel_pipe_wm
*pipe_wm
)
1949 struct drm_device
*dev
= crtc
->dev
;
1950 const struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1951 int level
, max_level
= ilk_wm_max_level(dev
);
1952 /* LP0 watermark maximums depend on this pipe alone */
1953 struct intel_wm_config config
= {
1954 .num_pipes_active
= 1,
1955 .sprites_enabled
= params
->spr
.enabled
,
1956 .sprites_scaled
= params
->spr
.scaled
,
1958 struct ilk_wm_maximums max
;
1960 pipe_wm
->pipe_enabled
= params
->active
;
1961 pipe_wm
->sprites_enabled
= params
->spr
.enabled
;
1962 pipe_wm
->sprites_scaled
= params
->spr
.scaled
;
1964 /* ILK/SNB: LP2+ watermarks only w/o sprites */
1965 if (INTEL_INFO(dev
)->gen
<= 6 && params
->spr
.enabled
)
1968 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
1969 if (params
->spr
.scaled
)
1972 ilk_compute_wm_level(dev_priv
, 0, params
, &pipe_wm
->wm
[0]);
1974 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1975 pipe_wm
->linetime
= hsw_compute_linetime_wm(dev
, crtc
);
1977 /* LP0 watermarks always use 1/2 DDB partitioning */
1978 ilk_compute_wm_maximums(dev
, 0, &config
, INTEL_DDB_PART_1_2
, &max
);
1980 /* At least LP0 must be valid */
1981 if (!ilk_validate_wm_level(0, &max
, &pipe_wm
->wm
[0]))
1984 ilk_compute_wm_reg_maximums(dev
, 1, &max
);
1986 for (level
= 1; level
<= max_level
; level
++) {
1987 struct intel_wm_level wm
= {};
1989 ilk_compute_wm_level(dev_priv
, level
, params
, &wm
);
1992 * Disable any watermark level that exceeds the
1993 * register maximums since such watermarks are
1996 if (!ilk_validate_wm_level(level
, &max
, &wm
))
1999 pipe_wm
->wm
[level
] = wm
;
2006 * Merge the watermarks from all active pipes for a specific level.
2008 static void ilk_merge_wm_level(struct drm_device
*dev
,
2010 struct intel_wm_level
*ret_wm
)
2012 const struct intel_crtc
*intel_crtc
;
2014 ret_wm
->enable
= true;
2016 for_each_intel_crtc(dev
, intel_crtc
) {
2017 const struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
;
2018 const struct intel_wm_level
*wm
= &active
->wm
[level
];
2020 if (!active
->pipe_enabled
)
2024 * The watermark values may have been used in the past,
2025 * so we must maintain them in the registers for some
2026 * time even if the level is now disabled.
2029 ret_wm
->enable
= false;
2031 ret_wm
->pri_val
= max(ret_wm
->pri_val
, wm
->pri_val
);
2032 ret_wm
->spr_val
= max(ret_wm
->spr_val
, wm
->spr_val
);
2033 ret_wm
->cur_val
= max(ret_wm
->cur_val
, wm
->cur_val
);
2034 ret_wm
->fbc_val
= max(ret_wm
->fbc_val
, wm
->fbc_val
);
2039 * Merge all low power watermarks for all active pipes.
2041 static void ilk_wm_merge(struct drm_device
*dev
,
2042 const struct intel_wm_config
*config
,
2043 const struct ilk_wm_maximums
*max
,
2044 struct intel_pipe_wm
*merged
)
2046 int level
, max_level
= ilk_wm_max_level(dev
);
2047 int last_enabled_level
= max_level
;
2049 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2050 if ((INTEL_INFO(dev
)->gen
<= 6 || IS_IVYBRIDGE(dev
)) &&
2051 config
->num_pipes_active
> 1)
2054 /* ILK: FBC WM must be disabled always */
2055 merged
->fbc_wm_enabled
= INTEL_INFO(dev
)->gen
>= 6;
2057 /* merge each WM1+ level */
2058 for (level
= 1; level
<= max_level
; level
++) {
2059 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2061 ilk_merge_wm_level(dev
, level
, wm
);
2063 if (level
> last_enabled_level
)
2065 else if (!ilk_validate_wm_level(level
, max
, wm
))
2066 /* make sure all following levels get disabled */
2067 last_enabled_level
= level
- 1;
2070 * The spec says it is preferred to disable
2071 * FBC WMs instead of disabling a WM level.
2073 if (wm
->fbc_val
> max
->fbc
) {
2075 merged
->fbc_wm_enabled
= false;
2080 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2082 * FIXME this is racy. FBC might get enabled later.
2083 * What we should check here is whether FBC can be
2084 * enabled sometime later.
2086 if (IS_GEN5(dev
) && !merged
->fbc_wm_enabled
&& intel_fbc_enabled(dev
)) {
2087 for (level
= 2; level
<= max_level
; level
++) {
2088 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2095 static int ilk_wm_lp_to_level(int wm_lp
, const struct intel_pipe_wm
*pipe_wm
)
2097 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2098 return wm_lp
+ (wm_lp
>= 2 && pipe_wm
->wm
[4].enable
);
2101 /* The value we need to program into the WM_LPx latency field */
2102 static unsigned int ilk_wm_lp_latency(struct drm_device
*dev
, int level
)
2104 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2106 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2109 return dev_priv
->wm
.pri_latency
[level
];
2112 static void ilk_compute_wm_results(struct drm_device
*dev
,
2113 const struct intel_pipe_wm
*merged
,
2114 enum intel_ddb_partitioning partitioning
,
2115 struct ilk_wm_values
*results
)
2117 struct intel_crtc
*intel_crtc
;
2120 results
->enable_fbc_wm
= merged
->fbc_wm_enabled
;
2121 results
->partitioning
= partitioning
;
2123 /* LP1+ register values */
2124 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2125 const struct intel_wm_level
*r
;
2127 level
= ilk_wm_lp_to_level(wm_lp
, merged
);
2129 r
= &merged
->wm
[level
];
2132 * Maintain the watermark values even if the level is
2133 * disabled. Doing otherwise could cause underruns.
2135 results
->wm_lp
[wm_lp
- 1] =
2136 (ilk_wm_lp_latency(dev
, level
) << WM1_LP_LATENCY_SHIFT
) |
2137 (r
->pri_val
<< WM1_LP_SR_SHIFT
) |
2141 results
->wm_lp
[wm_lp
- 1] |= WM1_LP_SR_EN
;
2143 if (INTEL_INFO(dev
)->gen
>= 8)
2144 results
->wm_lp
[wm_lp
- 1] |=
2145 r
->fbc_val
<< WM1_LP_FBC_SHIFT_BDW
;
2147 results
->wm_lp
[wm_lp
- 1] |=
2148 r
->fbc_val
<< WM1_LP_FBC_SHIFT
;
2151 * Always set WM1S_LP_EN when spr_val != 0, even if the
2152 * level is disabled. Doing otherwise could cause underruns.
2154 if (INTEL_INFO(dev
)->gen
<= 6 && r
->spr_val
) {
2155 WARN_ON(wm_lp
!= 1);
2156 results
->wm_lp_spr
[wm_lp
- 1] = WM1S_LP_EN
| r
->spr_val
;
2158 results
->wm_lp_spr
[wm_lp
- 1] = r
->spr_val
;
2161 /* LP0 register values */
2162 for_each_intel_crtc(dev
, intel_crtc
) {
2163 enum pipe pipe
= intel_crtc
->pipe
;
2164 const struct intel_wm_level
*r
=
2165 &intel_crtc
->wm
.active
.wm
[0];
2167 if (WARN_ON(!r
->enable
))
2170 results
->wm_linetime
[pipe
] = intel_crtc
->wm
.active
.linetime
;
2172 results
->wm_pipe
[pipe
] =
2173 (r
->pri_val
<< WM0_PIPE_PLANE_SHIFT
) |
2174 (r
->spr_val
<< WM0_PIPE_SPRITE_SHIFT
) |
2179 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2180 * case both are at the same level. Prefer r1 in case they're the same. */
2181 static struct intel_pipe_wm
*ilk_find_best_result(struct drm_device
*dev
,
2182 struct intel_pipe_wm
*r1
,
2183 struct intel_pipe_wm
*r2
)
2185 int level
, max_level
= ilk_wm_max_level(dev
);
2186 int level1
= 0, level2
= 0;
2188 for (level
= 1; level
<= max_level
; level
++) {
2189 if (r1
->wm
[level
].enable
)
2191 if (r2
->wm
[level
].enable
)
2195 if (level1
== level2
) {
2196 if (r2
->fbc_wm_enabled
&& !r1
->fbc_wm_enabled
)
2200 } else if (level1
> level2
) {
2207 /* dirty bits used to track which watermarks need changes */
2208 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2209 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2210 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2211 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2212 #define WM_DIRTY_FBC (1 << 24)
2213 #define WM_DIRTY_DDB (1 << 25)
2215 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private
*dev_priv
,
2216 const struct ilk_wm_values
*old
,
2217 const struct ilk_wm_values
*new)
2219 unsigned int dirty
= 0;
2223 for_each_pipe(dev_priv
, pipe
) {
2224 if (old
->wm_linetime
[pipe
] != new->wm_linetime
[pipe
]) {
2225 dirty
|= WM_DIRTY_LINETIME(pipe
);
2226 /* Must disable LP1+ watermarks too */
2227 dirty
|= WM_DIRTY_LP_ALL
;
2230 if (old
->wm_pipe
[pipe
] != new->wm_pipe
[pipe
]) {
2231 dirty
|= WM_DIRTY_PIPE(pipe
);
2232 /* Must disable LP1+ watermarks too */
2233 dirty
|= WM_DIRTY_LP_ALL
;
2237 if (old
->enable_fbc_wm
!= new->enable_fbc_wm
) {
2238 dirty
|= WM_DIRTY_FBC
;
2239 /* Must disable LP1+ watermarks too */
2240 dirty
|= WM_DIRTY_LP_ALL
;
2243 if (old
->partitioning
!= new->partitioning
) {
2244 dirty
|= WM_DIRTY_DDB
;
2245 /* Must disable LP1+ watermarks too */
2246 dirty
|= WM_DIRTY_LP_ALL
;
2249 /* LP1+ watermarks already deemed dirty, no need to continue */
2250 if (dirty
& WM_DIRTY_LP_ALL
)
2253 /* Find the lowest numbered LP1+ watermark in need of an update... */
2254 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2255 if (old
->wm_lp
[wm_lp
- 1] != new->wm_lp
[wm_lp
- 1] ||
2256 old
->wm_lp_spr
[wm_lp
- 1] != new->wm_lp_spr
[wm_lp
- 1])
2260 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2261 for (; wm_lp
<= 3; wm_lp
++)
2262 dirty
|= WM_DIRTY_LP(wm_lp
);
2267 static bool _ilk_disable_lp_wm(struct drm_i915_private
*dev_priv
,
2270 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2271 bool changed
= false;
2273 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] & WM1_LP_SR_EN
) {
2274 previous
->wm_lp
[2] &= ~WM1_LP_SR_EN
;
2275 I915_WRITE(WM3_LP_ILK
, previous
->wm_lp
[2]);
2278 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] & WM1_LP_SR_EN
) {
2279 previous
->wm_lp
[1] &= ~WM1_LP_SR_EN
;
2280 I915_WRITE(WM2_LP_ILK
, previous
->wm_lp
[1]);
2283 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] & WM1_LP_SR_EN
) {
2284 previous
->wm_lp
[0] &= ~WM1_LP_SR_EN
;
2285 I915_WRITE(WM1_LP_ILK
, previous
->wm_lp
[0]);
2290 * Don't touch WM1S_LP_EN here.
2291 * Doing so could cause underruns.
2298 * The spec says we shouldn't write when we don't need, because every write
2299 * causes WMs to be re-evaluated, expending some power.
2301 static void ilk_write_wm_values(struct drm_i915_private
*dev_priv
,
2302 struct ilk_wm_values
*results
)
2304 struct drm_device
*dev
= dev_priv
->dev
;
2305 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2309 dirty
= ilk_compute_wm_dirty(dev_priv
, previous
, results
);
2313 _ilk_disable_lp_wm(dev_priv
, dirty
);
2315 if (dirty
& WM_DIRTY_PIPE(PIPE_A
))
2316 I915_WRITE(WM0_PIPEA_ILK
, results
->wm_pipe
[0]);
2317 if (dirty
& WM_DIRTY_PIPE(PIPE_B
))
2318 I915_WRITE(WM0_PIPEB_ILK
, results
->wm_pipe
[1]);
2319 if (dirty
& WM_DIRTY_PIPE(PIPE_C
))
2320 I915_WRITE(WM0_PIPEC_IVB
, results
->wm_pipe
[2]);
2322 if (dirty
& WM_DIRTY_LINETIME(PIPE_A
))
2323 I915_WRITE(PIPE_WM_LINETIME(PIPE_A
), results
->wm_linetime
[0]);
2324 if (dirty
& WM_DIRTY_LINETIME(PIPE_B
))
2325 I915_WRITE(PIPE_WM_LINETIME(PIPE_B
), results
->wm_linetime
[1]);
2326 if (dirty
& WM_DIRTY_LINETIME(PIPE_C
))
2327 I915_WRITE(PIPE_WM_LINETIME(PIPE_C
), results
->wm_linetime
[2]);
2329 if (dirty
& WM_DIRTY_DDB
) {
2330 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
2331 val
= I915_READ(WM_MISC
);
2332 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
2333 val
&= ~WM_MISC_DATA_PARTITION_5_6
;
2335 val
|= WM_MISC_DATA_PARTITION_5_6
;
2336 I915_WRITE(WM_MISC
, val
);
2338 val
= I915_READ(DISP_ARB_CTL2
);
2339 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
2340 val
&= ~DISP_DATA_PARTITION_5_6
;
2342 val
|= DISP_DATA_PARTITION_5_6
;
2343 I915_WRITE(DISP_ARB_CTL2
, val
);
2347 if (dirty
& WM_DIRTY_FBC
) {
2348 val
= I915_READ(DISP_ARB_CTL
);
2349 if (results
->enable_fbc_wm
)
2350 val
&= ~DISP_FBC_WM_DIS
;
2352 val
|= DISP_FBC_WM_DIS
;
2353 I915_WRITE(DISP_ARB_CTL
, val
);
2356 if (dirty
& WM_DIRTY_LP(1) &&
2357 previous
->wm_lp_spr
[0] != results
->wm_lp_spr
[0])
2358 I915_WRITE(WM1S_LP_ILK
, results
->wm_lp_spr
[0]);
2360 if (INTEL_INFO(dev
)->gen
>= 7) {
2361 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp_spr
[1] != results
->wm_lp_spr
[1])
2362 I915_WRITE(WM2S_LP_IVB
, results
->wm_lp_spr
[1]);
2363 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp_spr
[2] != results
->wm_lp_spr
[2])
2364 I915_WRITE(WM3S_LP_IVB
, results
->wm_lp_spr
[2]);
2367 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] != results
->wm_lp
[0])
2368 I915_WRITE(WM1_LP_ILK
, results
->wm_lp
[0]);
2369 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] != results
->wm_lp
[1])
2370 I915_WRITE(WM2_LP_ILK
, results
->wm_lp
[1]);
2371 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] != results
->wm_lp
[2])
2372 I915_WRITE(WM3_LP_ILK
, results
->wm_lp
[2]);
2374 dev_priv
->wm
.hw
= *results
;
2377 static bool ilk_disable_lp_wm(struct drm_device
*dev
)
2379 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2381 return _ilk_disable_lp_wm(dev_priv
, WM_DIRTY_LP_ALL
);
2385 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2386 * different active planes.
2389 #define SKL_DDB_SIZE 896 /* in blocks */
2392 skl_ddb_get_pipe_allocation_limits(struct drm_device
*dev
,
2393 struct drm_crtc
*for_crtc
,
2394 const struct intel_wm_config
*config
,
2395 const struct skl_pipe_wm_parameters
*params
,
2396 struct skl_ddb_entry
*alloc
/* out */)
2398 struct drm_crtc
*crtc
;
2399 unsigned int pipe_size
, ddb_size
;
2400 int nth_active_pipe
;
2402 if (!params
->active
) {
2408 ddb_size
= SKL_DDB_SIZE
;
2410 ddb_size
-= 4; /* 4 blocks for bypass path allocation */
2412 nth_active_pipe
= 0;
2413 for_each_crtc(dev
, crtc
) {
2414 if (!intel_crtc_active(crtc
))
2417 if (crtc
== for_crtc
)
2423 pipe_size
= ddb_size
/ config
->num_pipes_active
;
2424 alloc
->start
= nth_active_pipe
* ddb_size
/ config
->num_pipes_active
;
2425 alloc
->end
= alloc
->start
+ pipe_size
;
2428 static unsigned int skl_cursor_allocation(const struct intel_wm_config
*config
)
2430 if (config
->num_pipes_active
== 1)
2436 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry
*entry
, u32 reg
)
2438 entry
->start
= reg
& 0x3ff;
2439 entry
->end
= (reg
>> 16) & 0x3ff;
2444 void skl_ddb_get_hw_state(struct drm_i915_private
*dev_priv
,
2445 struct skl_ddb_allocation
*ddb
/* out */)
2447 struct drm_device
*dev
= dev_priv
->dev
;
2452 for_each_pipe(dev_priv
, pipe
) {
2453 for_each_plane(pipe
, plane
) {
2454 val
= I915_READ(PLANE_BUF_CFG(pipe
, plane
));
2455 skl_ddb_entry_init_from_hw(&ddb
->plane
[pipe
][plane
],
2459 val
= I915_READ(CUR_BUF_CFG(pipe
));
2460 skl_ddb_entry_init_from_hw(&ddb
->cursor
[pipe
], val
);
2465 skl_plane_relative_data_rate(const struct intel_plane_wm_parameters
*p
)
2467 return p
->horiz_pixels
* p
->vert_pixels
* p
->bytes_per_pixel
;
2471 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
2472 * a 8192x4096@32bpp framebuffer:
2473 * 3 * 4096 * 8192 * 4 < 2^32
2476 skl_get_total_relative_data_rate(struct intel_crtc
*intel_crtc
,
2477 const struct skl_pipe_wm_parameters
*params
)
2479 unsigned int total_data_rate
= 0;
2482 for (plane
= 0; plane
< intel_num_planes(intel_crtc
); plane
++) {
2483 const struct intel_plane_wm_parameters
*p
;
2485 p
= ¶ms
->plane
[plane
];
2489 total_data_rate
+= skl_plane_relative_data_rate(p
);
2492 return total_data_rate
;
2496 skl_allocate_pipe_ddb(struct drm_crtc
*crtc
,
2497 const struct intel_wm_config
*config
,
2498 const struct skl_pipe_wm_parameters
*params
,
2499 struct skl_ddb_allocation
*ddb
/* out */)
2501 struct drm_device
*dev
= crtc
->dev
;
2502 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2503 enum pipe pipe
= intel_crtc
->pipe
;
2504 struct skl_ddb_entry
*alloc
= &ddb
->pipe
[pipe
];
2505 uint16_t alloc_size
, start
, cursor_blocks
;
2506 unsigned int total_data_rate
;
2509 skl_ddb_get_pipe_allocation_limits(dev
, crtc
, config
, params
, alloc
);
2510 alloc_size
= skl_ddb_entry_size(alloc
);
2511 if (alloc_size
== 0) {
2512 memset(ddb
->plane
[pipe
], 0, sizeof(ddb
->plane
[pipe
]));
2513 memset(&ddb
->cursor
[pipe
], 0, sizeof(ddb
->cursor
[pipe
]));
2517 cursor_blocks
= skl_cursor_allocation(config
);
2518 ddb
->cursor
[pipe
].start
= alloc
->end
- cursor_blocks
;
2519 ddb
->cursor
[pipe
].end
= alloc
->end
;
2521 alloc_size
-= cursor_blocks
;
2522 alloc
->end
-= cursor_blocks
;
2525 * Each active plane get a portion of the remaining space, in
2526 * proportion to the amount of data they need to fetch from memory.
2528 * FIXME: we may not allocate every single block here.
2530 total_data_rate
= skl_get_total_relative_data_rate(intel_crtc
, params
);
2532 start
= alloc
->start
;
2533 for (plane
= 0; plane
< intel_num_planes(intel_crtc
); plane
++) {
2534 const struct intel_plane_wm_parameters
*p
;
2535 unsigned int data_rate
;
2536 uint16_t plane_blocks
;
2538 p
= ¶ms
->plane
[plane
];
2542 data_rate
= skl_plane_relative_data_rate(p
);
2545 * promote the expression to 64 bits to avoid overflowing, the
2546 * result is < available as data_rate / total_data_rate < 1
2548 plane_blocks
= div_u64((uint64_t)alloc_size
* data_rate
,
2551 ddb
->plane
[pipe
][plane
].start
= start
;
2552 ddb
->plane
[pipe
][plane
].end
= start
+ plane_blocks
;
2554 start
+= plane_blocks
;
2559 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state
*config
)
2561 /* TODO: Take into account the scalers once we support them */
2562 return config
->base
.adjusted_mode
.crtc_clock
;
2566 * The max latency should be 257 (max the punit can code is 255 and we add 2us
2567 * for the read latency) and bytes_per_pixel should always be <= 8, so that
2568 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
2569 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
2571 static uint32_t skl_wm_method1(uint32_t pixel_rate
, uint8_t bytes_per_pixel
,
2574 uint32_t wm_intermediate_val
, ret
;
2579 wm_intermediate_val
= latency
* pixel_rate
* bytes_per_pixel
;
2580 ret
= DIV_ROUND_UP(wm_intermediate_val
, 1000);
2585 static uint32_t skl_wm_method2(uint32_t pixel_rate
, uint32_t pipe_htotal
,
2586 uint32_t horiz_pixels
, uint8_t bytes_per_pixel
,
2589 uint32_t ret
, plane_bytes_per_line
, wm_intermediate_val
;
2594 plane_bytes_per_line
= horiz_pixels
* bytes_per_pixel
;
2595 wm_intermediate_val
= latency
* pixel_rate
;
2596 ret
= DIV_ROUND_UP(wm_intermediate_val
, pipe_htotal
* 1000) *
2597 plane_bytes_per_line
;
2602 static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation
*new_ddb
,
2603 const struct intel_crtc
*intel_crtc
)
2605 struct drm_device
*dev
= intel_crtc
->base
.dev
;
2606 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2607 const struct skl_ddb_allocation
*cur_ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
2608 enum pipe pipe
= intel_crtc
->pipe
;
2610 if (memcmp(new_ddb
->plane
[pipe
], cur_ddb
->plane
[pipe
],
2611 sizeof(new_ddb
->plane
[pipe
])))
2614 if (memcmp(&new_ddb
->cursor
[pipe
], &cur_ddb
->cursor
[pipe
],
2615 sizeof(new_ddb
->cursor
[pipe
])))
2621 static void skl_compute_wm_global_parameters(struct drm_device
*dev
,
2622 struct intel_wm_config
*config
)
2624 struct drm_crtc
*crtc
;
2625 struct drm_plane
*plane
;
2627 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
)
2628 config
->num_pipes_active
+= intel_crtc_active(crtc
);
2630 /* FIXME: I don't think we need those two global parameters on SKL */
2631 list_for_each_entry(plane
, &dev
->mode_config
.plane_list
, head
) {
2632 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
2634 config
->sprites_enabled
|= intel_plane
->wm
.enabled
;
2635 config
->sprites_scaled
|= intel_plane
->wm
.scaled
;
2639 static void skl_compute_wm_pipe_parameters(struct drm_crtc
*crtc
,
2640 struct skl_pipe_wm_parameters
*p
)
2642 struct drm_device
*dev
= crtc
->dev
;
2643 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2644 enum pipe pipe
= intel_crtc
->pipe
;
2645 struct drm_plane
*plane
;
2646 int i
= 1; /* Index for sprite planes start */
2648 p
->active
= intel_crtc_active(crtc
);
2650 p
->pipe_htotal
= intel_crtc
->config
->base
.adjusted_mode
.crtc_htotal
;
2651 p
->pixel_rate
= skl_pipe_pixel_rate(intel_crtc
->config
);
2654 * For now, assume primary and cursor planes are always enabled.
2656 p
->plane
[0].enabled
= true;
2657 p
->plane
[0].bytes_per_pixel
=
2658 crtc
->primary
->fb
->bits_per_pixel
/ 8;
2659 p
->plane
[0].horiz_pixels
= intel_crtc
->config
->pipe_src_w
;
2660 p
->plane
[0].vert_pixels
= intel_crtc
->config
->pipe_src_h
;
2662 p
->cursor
.enabled
= true;
2663 p
->cursor
.bytes_per_pixel
= 4;
2664 p
->cursor
.horiz_pixels
= intel_crtc
->cursor_width
?
2665 intel_crtc
->cursor_width
: 64;
2668 list_for_each_entry(plane
, &dev
->mode_config
.plane_list
, head
) {
2669 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
2671 if (intel_plane
->pipe
== pipe
&&
2672 plane
->type
== DRM_PLANE_TYPE_OVERLAY
)
2673 p
->plane
[i
++] = intel_plane
->wm
;
2677 static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters
*p
,
2678 struct intel_plane_wm_parameters
*p_params
,
2679 uint16_t ddb_allocation
,
2681 uint16_t *out_blocks
, /* out */
2682 uint8_t *out_lines
/* out */)
2684 uint32_t method1
, method2
, plane_bytes_per_line
, res_blocks
, res_lines
;
2685 uint32_t result_bytes
;
2687 if (mem_value
== 0 || !p
->active
|| !p_params
->enabled
)
2690 method1
= skl_wm_method1(p
->pixel_rate
,
2691 p_params
->bytes_per_pixel
,
2693 method2
= skl_wm_method2(p
->pixel_rate
,
2695 p_params
->horiz_pixels
,
2696 p_params
->bytes_per_pixel
,
2699 plane_bytes_per_line
= p_params
->horiz_pixels
*
2700 p_params
->bytes_per_pixel
;
2702 /* For now xtile and linear */
2703 if (((ddb_allocation
* 512) / plane_bytes_per_line
) >= 1)
2704 result_bytes
= min(method1
, method2
);
2706 result_bytes
= method1
;
2708 res_blocks
= DIV_ROUND_UP(result_bytes
, 512) + 1;
2709 res_lines
= DIV_ROUND_UP(result_bytes
, plane_bytes_per_line
);
2711 if (res_blocks
> ddb_allocation
|| res_lines
> 31)
2714 *out_blocks
= res_blocks
;
2715 *out_lines
= res_lines
;
2720 static void skl_compute_wm_level(const struct drm_i915_private
*dev_priv
,
2721 struct skl_ddb_allocation
*ddb
,
2722 struct skl_pipe_wm_parameters
*p
,
2726 struct skl_wm_level
*result
)
2728 uint16_t latency
= dev_priv
->wm
.skl_latency
[level
];
2729 uint16_t ddb_blocks
;
2732 for (i
= 0; i
< num_planes
; i
++) {
2733 ddb_blocks
= skl_ddb_entry_size(&ddb
->plane
[pipe
][i
]);
2735 result
->plane_en
[i
] = skl_compute_plane_wm(p
, &p
->plane
[i
],
2738 &result
->plane_res_b
[i
],
2739 &result
->plane_res_l
[i
]);
2742 ddb_blocks
= skl_ddb_entry_size(&ddb
->cursor
[pipe
]);
2743 result
->cursor_en
= skl_compute_plane_wm(p
, &p
->cursor
, ddb_blocks
,
2744 latency
, &result
->cursor_res_b
,
2745 &result
->cursor_res_l
);
2749 skl_compute_linetime_wm(struct drm_crtc
*crtc
, struct skl_pipe_wm_parameters
*p
)
2751 if (!intel_crtc_active(crtc
))
2754 return DIV_ROUND_UP(8 * p
->pipe_htotal
* 1000, p
->pixel_rate
);
2758 static void skl_compute_transition_wm(struct drm_crtc
*crtc
,
2759 struct skl_pipe_wm_parameters
*params
,
2760 struct skl_wm_level
*trans_wm
/* out */)
2762 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2765 if (!params
->active
)
2768 /* Until we know more, just disable transition WMs */
2769 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++)
2770 trans_wm
->plane_en
[i
] = false;
2771 trans_wm
->cursor_en
= false;
2774 static void skl_compute_pipe_wm(struct drm_crtc
*crtc
,
2775 struct skl_ddb_allocation
*ddb
,
2776 struct skl_pipe_wm_parameters
*params
,
2777 struct skl_pipe_wm
*pipe_wm
)
2779 struct drm_device
*dev
= crtc
->dev
;
2780 const struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2781 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2782 int level
, max_level
= ilk_wm_max_level(dev
);
2784 for (level
= 0; level
<= max_level
; level
++) {
2785 skl_compute_wm_level(dev_priv
, ddb
, params
, intel_crtc
->pipe
,
2786 level
, intel_num_planes(intel_crtc
),
2787 &pipe_wm
->wm
[level
]);
2789 pipe_wm
->linetime
= skl_compute_linetime_wm(crtc
, params
);
2791 skl_compute_transition_wm(crtc
, params
, &pipe_wm
->trans_wm
);
2794 static void skl_compute_wm_results(struct drm_device
*dev
,
2795 struct skl_pipe_wm_parameters
*p
,
2796 struct skl_pipe_wm
*p_wm
,
2797 struct skl_wm_values
*r
,
2798 struct intel_crtc
*intel_crtc
)
2800 int level
, max_level
= ilk_wm_max_level(dev
);
2801 enum pipe pipe
= intel_crtc
->pipe
;
2805 for (level
= 0; level
<= max_level
; level
++) {
2806 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
2809 temp
|= p_wm
->wm
[level
].plane_res_l
[i
] <<
2810 PLANE_WM_LINES_SHIFT
;
2811 temp
|= p_wm
->wm
[level
].plane_res_b
[i
];
2812 if (p_wm
->wm
[level
].plane_en
[i
])
2813 temp
|= PLANE_WM_EN
;
2815 r
->plane
[pipe
][i
][level
] = temp
;
2820 temp
|= p_wm
->wm
[level
].cursor_res_l
<< PLANE_WM_LINES_SHIFT
;
2821 temp
|= p_wm
->wm
[level
].cursor_res_b
;
2823 if (p_wm
->wm
[level
].cursor_en
)
2824 temp
|= PLANE_WM_EN
;
2826 r
->cursor
[pipe
][level
] = temp
;
2830 /* transition WMs */
2831 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
2833 temp
|= p_wm
->trans_wm
.plane_res_l
[i
] << PLANE_WM_LINES_SHIFT
;
2834 temp
|= p_wm
->trans_wm
.plane_res_b
[i
];
2835 if (p_wm
->trans_wm
.plane_en
[i
])
2836 temp
|= PLANE_WM_EN
;
2838 r
->plane_trans
[pipe
][i
] = temp
;
2842 temp
|= p_wm
->trans_wm
.cursor_res_l
<< PLANE_WM_LINES_SHIFT
;
2843 temp
|= p_wm
->trans_wm
.cursor_res_b
;
2844 if (p_wm
->trans_wm
.cursor_en
)
2845 temp
|= PLANE_WM_EN
;
2847 r
->cursor_trans
[pipe
] = temp
;
2849 r
->wm_linetime
[pipe
] = p_wm
->linetime
;
2852 static void skl_ddb_entry_write(struct drm_i915_private
*dev_priv
, uint32_t reg
,
2853 const struct skl_ddb_entry
*entry
)
2856 I915_WRITE(reg
, (entry
->end
- 1) << 16 | entry
->start
);
2861 static void skl_write_wm_values(struct drm_i915_private
*dev_priv
,
2862 const struct skl_wm_values
*new)
2864 struct drm_device
*dev
= dev_priv
->dev
;
2865 struct intel_crtc
*crtc
;
2867 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, base
.head
) {
2868 int i
, level
, max_level
= ilk_wm_max_level(dev
);
2869 enum pipe pipe
= crtc
->pipe
;
2871 if (!new->dirty
[pipe
])
2874 I915_WRITE(PIPE_WM_LINETIME(pipe
), new->wm_linetime
[pipe
]);
2876 for (level
= 0; level
<= max_level
; level
++) {
2877 for (i
= 0; i
< intel_num_planes(crtc
); i
++)
2878 I915_WRITE(PLANE_WM(pipe
, i
, level
),
2879 new->plane
[pipe
][i
][level
]);
2880 I915_WRITE(CUR_WM(pipe
, level
),
2881 new->cursor
[pipe
][level
]);
2883 for (i
= 0; i
< intel_num_planes(crtc
); i
++)
2884 I915_WRITE(PLANE_WM_TRANS(pipe
, i
),
2885 new->plane_trans
[pipe
][i
]);
2886 I915_WRITE(CUR_WM_TRANS(pipe
), new->cursor_trans
[pipe
]);
2888 for (i
= 0; i
< intel_num_planes(crtc
); i
++)
2889 skl_ddb_entry_write(dev_priv
,
2890 PLANE_BUF_CFG(pipe
, i
),
2891 &new->ddb
.plane
[pipe
][i
]);
2893 skl_ddb_entry_write(dev_priv
, CUR_BUF_CFG(pipe
),
2894 &new->ddb
.cursor
[pipe
]);
2899 * When setting up a new DDB allocation arrangement, we need to correctly
2900 * sequence the times at which the new allocations for the pipes are taken into
2901 * account or we'll have pipes fetching from space previously allocated to
2904 * Roughly the sequence looks like:
2905 * 1. re-allocate the pipe(s) with the allocation being reduced and not
2906 * overlapping with a previous light-up pipe (another way to put it is:
2907 * pipes with their new allocation strickly included into their old ones).
2908 * 2. re-allocate the other pipes that get their allocation reduced
2909 * 3. allocate the pipes having their allocation increased
2911 * Steps 1. and 2. are here to take care of the following case:
2912 * - Initially DDB looks like this:
2915 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
2919 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
2923 skl_wm_flush_pipe(struct drm_i915_private
*dev_priv
, enum pipe pipe
, int pass
)
2925 struct drm_device
*dev
= dev_priv
->dev
;
2928 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe
), pass
);
2930 for_each_plane(pipe
, plane
) {
2931 I915_WRITE(PLANE_SURF(pipe
, plane
),
2932 I915_READ(PLANE_SURF(pipe
, plane
)));
2934 I915_WRITE(CURBASE(pipe
), I915_READ(CURBASE(pipe
)));
2938 skl_ddb_allocation_included(const struct skl_ddb_allocation
*old
,
2939 const struct skl_ddb_allocation
*new,
2942 uint16_t old_size
, new_size
;
2944 old_size
= skl_ddb_entry_size(&old
->pipe
[pipe
]);
2945 new_size
= skl_ddb_entry_size(&new->pipe
[pipe
]);
2947 return old_size
!= new_size
&&
2948 new->pipe
[pipe
].start
>= old
->pipe
[pipe
].start
&&
2949 new->pipe
[pipe
].end
<= old
->pipe
[pipe
].end
;
2952 static void skl_flush_wm_values(struct drm_i915_private
*dev_priv
,
2953 struct skl_wm_values
*new_values
)
2955 struct drm_device
*dev
= dev_priv
->dev
;
2956 struct skl_ddb_allocation
*cur_ddb
, *new_ddb
;
2957 bool reallocated
[I915_MAX_PIPES
] = {false, false, false};
2958 struct intel_crtc
*crtc
;
2961 new_ddb
= &new_values
->ddb
;
2962 cur_ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
2965 * First pass: flush the pipes with the new allocation contained into
2968 * We'll wait for the vblank on those pipes to ensure we can safely
2969 * re-allocate the freed space without this pipe fetching from it.
2971 for_each_intel_crtc(dev
, crtc
) {
2977 if (!skl_ddb_allocation_included(cur_ddb
, new_ddb
, pipe
))
2980 skl_wm_flush_pipe(dev_priv
, pipe
, 1);
2981 intel_wait_for_vblank(dev
, pipe
);
2983 reallocated
[pipe
] = true;
2988 * Second pass: flush the pipes that are having their allocation
2989 * reduced, but overlapping with a previous allocation.
2991 * Here as well we need to wait for the vblank to make sure the freed
2992 * space is not used anymore.
2994 for_each_intel_crtc(dev
, crtc
) {
3000 if (reallocated
[pipe
])
3003 if (skl_ddb_entry_size(&new_ddb
->pipe
[pipe
]) <
3004 skl_ddb_entry_size(&cur_ddb
->pipe
[pipe
])) {
3005 skl_wm_flush_pipe(dev_priv
, pipe
, 2);
3006 intel_wait_for_vblank(dev
, pipe
);
3007 reallocated
[pipe
] = true;
3012 * Third pass: flush the pipes that got more space allocated.
3014 * We don't need to actively wait for the update here, next vblank
3015 * will just get more DDB space with the correct WM values.
3017 for_each_intel_crtc(dev
, crtc
) {
3024 * At this point, only the pipes more space than before are
3025 * left to re-allocate.
3027 if (reallocated
[pipe
])
3030 skl_wm_flush_pipe(dev_priv
, pipe
, 3);
3034 static bool skl_update_pipe_wm(struct drm_crtc
*crtc
,
3035 struct skl_pipe_wm_parameters
*params
,
3036 struct intel_wm_config
*config
,
3037 struct skl_ddb_allocation
*ddb
, /* out */
3038 struct skl_pipe_wm
*pipe_wm
/* out */)
3040 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3042 skl_compute_wm_pipe_parameters(crtc
, params
);
3043 skl_allocate_pipe_ddb(crtc
, config
, params
, ddb
);
3044 skl_compute_pipe_wm(crtc
, ddb
, params
, pipe_wm
);
3046 if (!memcmp(&intel_crtc
->wm
.skl_active
, pipe_wm
, sizeof(*pipe_wm
)))
3049 intel_crtc
->wm
.skl_active
= *pipe_wm
;
3053 static void skl_update_other_pipe_wm(struct drm_device
*dev
,
3054 struct drm_crtc
*crtc
,
3055 struct intel_wm_config
*config
,
3056 struct skl_wm_values
*r
)
3058 struct intel_crtc
*intel_crtc
;
3059 struct intel_crtc
*this_crtc
= to_intel_crtc(crtc
);
3062 * If the WM update hasn't changed the allocation for this_crtc (the
3063 * crtc we are currently computing the new WM values for), other
3064 * enabled crtcs will keep the same allocation and we don't need to
3065 * recompute anything for them.
3067 if (!skl_ddb_allocation_changed(&r
->ddb
, this_crtc
))
3071 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3072 * other active pipes need new DDB allocation and WM values.
3074 list_for_each_entry(intel_crtc
, &dev
->mode_config
.crtc_list
,
3076 struct skl_pipe_wm_parameters params
= {};
3077 struct skl_pipe_wm pipe_wm
= {};
3080 if (this_crtc
->pipe
== intel_crtc
->pipe
)
3083 if (!intel_crtc
->active
)
3086 wm_changed
= skl_update_pipe_wm(&intel_crtc
->base
,
3091 * If we end up re-computing the other pipe WM values, it's
3092 * because it was really needed, so we expect the WM values to
3095 WARN_ON(!wm_changed
);
3097 skl_compute_wm_results(dev
, ¶ms
, &pipe_wm
, r
, intel_crtc
);
3098 r
->dirty
[intel_crtc
->pipe
] = true;
3102 static void skl_update_wm(struct drm_crtc
*crtc
)
3104 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3105 struct drm_device
*dev
= crtc
->dev
;
3106 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3107 struct skl_pipe_wm_parameters params
= {};
3108 struct skl_wm_values
*results
= &dev_priv
->wm
.skl_results
;
3109 struct skl_pipe_wm pipe_wm
= {};
3110 struct intel_wm_config config
= {};
3112 memset(results
, 0, sizeof(*results
));
3114 skl_compute_wm_global_parameters(dev
, &config
);
3116 if (!skl_update_pipe_wm(crtc
, ¶ms
, &config
,
3117 &results
->ddb
, &pipe_wm
))
3120 skl_compute_wm_results(dev
, ¶ms
, &pipe_wm
, results
, intel_crtc
);
3121 results
->dirty
[intel_crtc
->pipe
] = true;
3123 skl_update_other_pipe_wm(dev
, crtc
, &config
, results
);
3124 skl_write_wm_values(dev_priv
, results
);
3125 skl_flush_wm_values(dev_priv
, results
);
3127 /* store the new configuration */
3128 dev_priv
->wm
.skl_hw
= *results
;
3132 skl_update_sprite_wm(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
3133 uint32_t sprite_width
, uint32_t sprite_height
,
3134 int pixel_size
, bool enabled
, bool scaled
)
3136 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
3138 intel_plane
->wm
.enabled
= enabled
;
3139 intel_plane
->wm
.scaled
= scaled
;
3140 intel_plane
->wm
.horiz_pixels
= sprite_width
;
3141 intel_plane
->wm
.vert_pixels
= sprite_height
;
3142 intel_plane
->wm
.bytes_per_pixel
= pixel_size
;
3144 skl_update_wm(crtc
);
3147 static void ilk_update_wm(struct drm_crtc
*crtc
)
3149 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3150 struct drm_device
*dev
= crtc
->dev
;
3151 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3152 struct ilk_wm_maximums max
;
3153 struct ilk_pipe_wm_parameters params
= {};
3154 struct ilk_wm_values results
= {};
3155 enum intel_ddb_partitioning partitioning
;
3156 struct intel_pipe_wm pipe_wm
= {};
3157 struct intel_pipe_wm lp_wm_1_2
= {}, lp_wm_5_6
= {}, *best_lp_wm
;
3158 struct intel_wm_config config
= {};
3160 ilk_compute_wm_parameters(crtc
, ¶ms
);
3162 intel_compute_pipe_wm(crtc
, ¶ms
, &pipe_wm
);
3164 if (!memcmp(&intel_crtc
->wm
.active
, &pipe_wm
, sizeof(pipe_wm
)))
3167 intel_crtc
->wm
.active
= pipe_wm
;
3169 ilk_compute_wm_config(dev
, &config
);
3171 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_1_2
, &max
);
3172 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_1_2
);
3174 /* 5/6 split only in single pipe config on IVB+ */
3175 if (INTEL_INFO(dev
)->gen
>= 7 &&
3176 config
.num_pipes_active
== 1 && config
.sprites_enabled
) {
3177 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_5_6
, &max
);
3178 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_5_6
);
3180 best_lp_wm
= ilk_find_best_result(dev
, &lp_wm_1_2
, &lp_wm_5_6
);
3182 best_lp_wm
= &lp_wm_1_2
;
3185 partitioning
= (best_lp_wm
== &lp_wm_1_2
) ?
3186 INTEL_DDB_PART_1_2
: INTEL_DDB_PART_5_6
;
3188 ilk_compute_wm_results(dev
, best_lp_wm
, partitioning
, &results
);
3190 ilk_write_wm_values(dev_priv
, &results
);
3194 ilk_update_sprite_wm(struct drm_plane
*plane
,
3195 struct drm_crtc
*crtc
,
3196 uint32_t sprite_width
, uint32_t sprite_height
,
3197 int pixel_size
, bool enabled
, bool scaled
)
3199 struct drm_device
*dev
= plane
->dev
;
3200 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
3202 intel_plane
->wm
.enabled
= enabled
;
3203 intel_plane
->wm
.scaled
= scaled
;
3204 intel_plane
->wm
.horiz_pixels
= sprite_width
;
3205 intel_plane
->wm
.vert_pixels
= sprite_width
;
3206 intel_plane
->wm
.bytes_per_pixel
= pixel_size
;
3209 * IVB workaround: must disable low power watermarks for at least
3210 * one frame before enabling scaling. LP watermarks can be re-enabled
3211 * when scaling is disabled.
3213 * WaCxSRDisabledForSpriteScaling:ivb
3215 if (IS_IVYBRIDGE(dev
) && scaled
&& ilk_disable_lp_wm(dev
))
3216 intel_wait_for_vblank(dev
, intel_plane
->pipe
);
3218 ilk_update_wm(crtc
);
3221 static void skl_pipe_wm_active_state(uint32_t val
,
3222 struct skl_pipe_wm
*active
,
3228 bool is_enabled
= (val
& PLANE_WM_EN
) != 0;
3232 active
->wm
[level
].plane_en
[i
] = is_enabled
;
3233 active
->wm
[level
].plane_res_b
[i
] =
3234 val
& PLANE_WM_BLOCKS_MASK
;
3235 active
->wm
[level
].plane_res_l
[i
] =
3236 (val
>> PLANE_WM_LINES_SHIFT
) &
3237 PLANE_WM_LINES_MASK
;
3239 active
->wm
[level
].cursor_en
= is_enabled
;
3240 active
->wm
[level
].cursor_res_b
=
3241 val
& PLANE_WM_BLOCKS_MASK
;
3242 active
->wm
[level
].cursor_res_l
=
3243 (val
>> PLANE_WM_LINES_SHIFT
) &
3244 PLANE_WM_LINES_MASK
;
3248 active
->trans_wm
.plane_en
[i
] = is_enabled
;
3249 active
->trans_wm
.plane_res_b
[i
] =
3250 val
& PLANE_WM_BLOCKS_MASK
;
3251 active
->trans_wm
.plane_res_l
[i
] =
3252 (val
>> PLANE_WM_LINES_SHIFT
) &
3253 PLANE_WM_LINES_MASK
;
3255 active
->trans_wm
.cursor_en
= is_enabled
;
3256 active
->trans_wm
.cursor_res_b
=
3257 val
& PLANE_WM_BLOCKS_MASK
;
3258 active
->trans_wm
.cursor_res_l
=
3259 (val
>> PLANE_WM_LINES_SHIFT
) &
3260 PLANE_WM_LINES_MASK
;
3265 static void skl_pipe_wm_get_hw_state(struct drm_crtc
*crtc
)
3267 struct drm_device
*dev
= crtc
->dev
;
3268 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3269 struct skl_wm_values
*hw
= &dev_priv
->wm
.skl_hw
;
3270 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3271 struct skl_pipe_wm
*active
= &intel_crtc
->wm
.skl_active
;
3272 enum pipe pipe
= intel_crtc
->pipe
;
3273 int level
, i
, max_level
;
3276 max_level
= ilk_wm_max_level(dev
);
3278 hw
->wm_linetime
[pipe
] = I915_READ(PIPE_WM_LINETIME(pipe
));
3280 for (level
= 0; level
<= max_level
; level
++) {
3281 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++)
3282 hw
->plane
[pipe
][i
][level
] =
3283 I915_READ(PLANE_WM(pipe
, i
, level
));
3284 hw
->cursor
[pipe
][level
] = I915_READ(CUR_WM(pipe
, level
));
3287 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++)
3288 hw
->plane_trans
[pipe
][i
] = I915_READ(PLANE_WM_TRANS(pipe
, i
));
3289 hw
->cursor_trans
[pipe
] = I915_READ(CUR_WM_TRANS(pipe
));
3291 if (!intel_crtc_active(crtc
))
3294 hw
->dirty
[pipe
] = true;
3296 active
->linetime
= hw
->wm_linetime
[pipe
];
3298 for (level
= 0; level
<= max_level
; level
++) {
3299 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
3300 temp
= hw
->plane
[pipe
][i
][level
];
3301 skl_pipe_wm_active_state(temp
, active
, false,
3304 temp
= hw
->cursor
[pipe
][level
];
3305 skl_pipe_wm_active_state(temp
, active
, false, true, i
, level
);
3308 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
3309 temp
= hw
->plane_trans
[pipe
][i
];
3310 skl_pipe_wm_active_state(temp
, active
, true, false, i
, 0);
3313 temp
= hw
->cursor_trans
[pipe
];
3314 skl_pipe_wm_active_state(temp
, active
, true, true, i
, 0);
3317 void skl_wm_get_hw_state(struct drm_device
*dev
)
3319 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3320 struct skl_ddb_allocation
*ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
3321 struct drm_crtc
*crtc
;
3323 skl_ddb_get_hw_state(dev_priv
, ddb
);
3324 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
)
3325 skl_pipe_wm_get_hw_state(crtc
);
3328 static void ilk_pipe_wm_get_hw_state(struct drm_crtc
*crtc
)
3330 struct drm_device
*dev
= crtc
->dev
;
3331 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3332 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
3333 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3334 struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
;
3335 enum pipe pipe
= intel_crtc
->pipe
;
3336 static const unsigned int wm0_pipe_reg
[] = {
3337 [PIPE_A
] = WM0_PIPEA_ILK
,
3338 [PIPE_B
] = WM0_PIPEB_ILK
,
3339 [PIPE_C
] = WM0_PIPEC_IVB
,
3342 hw
->wm_pipe
[pipe
] = I915_READ(wm0_pipe_reg
[pipe
]);
3343 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
3344 hw
->wm_linetime
[pipe
] = I915_READ(PIPE_WM_LINETIME(pipe
));
3346 active
->pipe_enabled
= intel_crtc_active(crtc
);
3348 if (active
->pipe_enabled
) {
3349 u32 tmp
= hw
->wm_pipe
[pipe
];
3352 * For active pipes LP0 watermark is marked as
3353 * enabled, and LP1+ watermaks as disabled since
3354 * we can't really reverse compute them in case
3355 * multiple pipes are active.
3357 active
->wm
[0].enable
= true;
3358 active
->wm
[0].pri_val
= (tmp
& WM0_PIPE_PLANE_MASK
) >> WM0_PIPE_PLANE_SHIFT
;
3359 active
->wm
[0].spr_val
= (tmp
& WM0_PIPE_SPRITE_MASK
) >> WM0_PIPE_SPRITE_SHIFT
;
3360 active
->wm
[0].cur_val
= tmp
& WM0_PIPE_CURSOR_MASK
;
3361 active
->linetime
= hw
->wm_linetime
[pipe
];
3363 int level
, max_level
= ilk_wm_max_level(dev
);
3366 * For inactive pipes, all watermark levels
3367 * should be marked as enabled but zeroed,
3368 * which is what we'd compute them to.
3370 for (level
= 0; level
<= max_level
; level
++)
3371 active
->wm
[level
].enable
= true;
3375 void ilk_wm_get_hw_state(struct drm_device
*dev
)
3377 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3378 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
3379 struct drm_crtc
*crtc
;
3381 for_each_crtc(dev
, crtc
)
3382 ilk_pipe_wm_get_hw_state(crtc
);
3384 hw
->wm_lp
[0] = I915_READ(WM1_LP_ILK
);
3385 hw
->wm_lp
[1] = I915_READ(WM2_LP_ILK
);
3386 hw
->wm_lp
[2] = I915_READ(WM3_LP_ILK
);
3388 hw
->wm_lp_spr
[0] = I915_READ(WM1S_LP_ILK
);
3389 if (INTEL_INFO(dev
)->gen
>= 7) {
3390 hw
->wm_lp_spr
[1] = I915_READ(WM2S_LP_IVB
);
3391 hw
->wm_lp_spr
[2] = I915_READ(WM3S_LP_IVB
);
3394 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
3395 hw
->partitioning
= (I915_READ(WM_MISC
) & WM_MISC_DATA_PARTITION_5_6
) ?
3396 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
3397 else if (IS_IVYBRIDGE(dev
))
3398 hw
->partitioning
= (I915_READ(DISP_ARB_CTL2
) & DISP_DATA_PARTITION_5_6
) ?
3399 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
3402 !(I915_READ(DISP_ARB_CTL
) & DISP_FBC_WM_DIS
);
3406 * intel_update_watermarks - update FIFO watermark values based on current modes
3408 * Calculate watermark values for the various WM regs based on current mode
3409 * and plane configuration.
3411 * There are several cases to deal with here:
3412 * - normal (i.e. non-self-refresh)
3413 * - self-refresh (SR) mode
3414 * - lines are large relative to FIFO size (buffer can hold up to 2)
3415 * - lines are small relative to FIFO size (buffer can hold more than 2
3416 * lines), so need to account for TLB latency
3418 * The normal calculation is:
3419 * watermark = dotclock * bytes per pixel * latency
3420 * where latency is platform & configuration dependent (we assume pessimal
3423 * The SR calculation is:
3424 * watermark = (trunc(latency/line time)+1) * surface width *
3427 * line time = htotal / dotclock
3428 * surface width = hdisplay for normal plane and 64 for cursor
3429 * and latency is assumed to be high, as above.
3431 * The final value programmed to the register should always be rounded up,
3432 * and include an extra 2 entries to account for clock crossings.
3434 * We don't use the sprite, so we can ignore that. And on Crestline we have
3435 * to set the non-SR watermarks to 8.
3437 void intel_update_watermarks(struct drm_crtc
*crtc
)
3439 struct drm_i915_private
*dev_priv
= crtc
->dev
->dev_private
;
3441 if (dev_priv
->display
.update_wm
)
3442 dev_priv
->display
.update_wm(crtc
);
3445 void intel_update_sprite_watermarks(struct drm_plane
*plane
,
3446 struct drm_crtc
*crtc
,
3447 uint32_t sprite_width
,
3448 uint32_t sprite_height
,
3450 bool enabled
, bool scaled
)
3452 struct drm_i915_private
*dev_priv
= plane
->dev
->dev_private
;
3454 if (dev_priv
->display
.update_sprite_wm
)
3455 dev_priv
->display
.update_sprite_wm(plane
, crtc
,
3456 sprite_width
, sprite_height
,
3457 pixel_size
, enabled
, scaled
);
3460 static struct drm_i915_gem_object
*
3461 intel_alloc_context_page(struct drm_device
*dev
)
3463 struct drm_i915_gem_object
*ctx
;
3466 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
3468 ctx
= i915_gem_alloc_object(dev
, 4096);
3470 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
3474 ret
= i915_gem_obj_ggtt_pin(ctx
, 4096, 0);
3476 DRM_ERROR("failed to pin power context: %d\n", ret
);
3480 ret
= i915_gem_object_set_to_gtt_domain(ctx
, 1);
3482 DRM_ERROR("failed to set-domain on power context: %d\n", ret
);
3489 i915_gem_object_ggtt_unpin(ctx
);
3491 drm_gem_object_unreference(&ctx
->base
);
3496 * Lock protecting IPS related data structures
3498 DEFINE_SPINLOCK(mchdev_lock
);
3500 /* Global for IPS driver to get at the current i915 device. Protected by
3502 static struct drm_i915_private
*i915_mch_dev
;
3504 bool ironlake_set_drps(struct drm_device
*dev
, u8 val
)
3506 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3509 assert_spin_locked(&mchdev_lock
);
3511 rgvswctl
= I915_READ16(MEMSWCTL
);
3512 if (rgvswctl
& MEMCTL_CMD_STS
) {
3513 DRM_DEBUG("gpu busy, RCS change rejected\n");
3514 return false; /* still busy with another command */
3517 rgvswctl
= (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
3518 (val
<< MEMCTL_FREQ_SHIFT
) | MEMCTL_SFCAVM
;
3519 I915_WRITE16(MEMSWCTL
, rgvswctl
);
3520 POSTING_READ16(MEMSWCTL
);
3522 rgvswctl
|= MEMCTL_CMD_STS
;
3523 I915_WRITE16(MEMSWCTL
, rgvswctl
);
3528 static void ironlake_enable_drps(struct drm_device
*dev
)
3530 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3531 u32 rgvmodectl
= I915_READ(MEMMODECTL
);
3532 u8 fmax
, fmin
, fstart
, vstart
;
3534 spin_lock_irq(&mchdev_lock
);
3536 /* Enable temp reporting */
3537 I915_WRITE16(PMMISC
, I915_READ(PMMISC
) | MCPPCE_EN
);
3538 I915_WRITE16(TSC1
, I915_READ(TSC1
) | TSE
);
3540 /* 100ms RC evaluation intervals */
3541 I915_WRITE(RCUPEI
, 100000);
3542 I915_WRITE(RCDNEI
, 100000);
3544 /* Set max/min thresholds to 90ms and 80ms respectively */
3545 I915_WRITE(RCBMAXAVG
, 90000);
3546 I915_WRITE(RCBMINAVG
, 80000);
3548 I915_WRITE(MEMIHYST
, 1);
3550 /* Set up min, max, and cur for interrupt handling */
3551 fmax
= (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
;
3552 fmin
= (rgvmodectl
& MEMMODE_FMIN_MASK
);
3553 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
3554 MEMMODE_FSTART_SHIFT
;
3556 vstart
= (I915_READ(PXVFREQ_BASE
+ (fstart
* 4)) & PXVFREQ_PX_MASK
) >>
3559 dev_priv
->ips
.fmax
= fmax
; /* IPS callback will increase this */
3560 dev_priv
->ips
.fstart
= fstart
;
3562 dev_priv
->ips
.max_delay
= fstart
;
3563 dev_priv
->ips
.min_delay
= fmin
;
3564 dev_priv
->ips
.cur_delay
= fstart
;
3566 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
3567 fmax
, fmin
, fstart
);
3569 I915_WRITE(MEMINTREN
, MEMINT_CX_SUPR_EN
| MEMINT_EVAL_CHG_EN
);
3572 * Interrupts will be enabled in ironlake_irq_postinstall
3575 I915_WRITE(VIDSTART
, vstart
);
3576 POSTING_READ(VIDSTART
);
3578 rgvmodectl
|= MEMMODE_SWMODE_EN
;
3579 I915_WRITE(MEMMODECTL
, rgvmodectl
);
3581 if (wait_for_atomic((I915_READ(MEMSWCTL
) & MEMCTL_CMD_STS
) == 0, 10))
3582 DRM_ERROR("stuck trying to change perf mode\n");
3585 ironlake_set_drps(dev
, fstart
);
3587 dev_priv
->ips
.last_count1
= I915_READ(0x112e4) + I915_READ(0x112e8) +
3589 dev_priv
->ips
.last_time1
= jiffies_to_msecs(jiffies
);
3590 dev_priv
->ips
.last_count2
= I915_READ(0x112f4);
3591 dev_priv
->ips
.last_time2
= ktime_get_raw_ns();
3593 spin_unlock_irq(&mchdev_lock
);
3596 static void ironlake_disable_drps(struct drm_device
*dev
)
3598 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3601 spin_lock_irq(&mchdev_lock
);
3603 rgvswctl
= I915_READ16(MEMSWCTL
);
3605 /* Ack interrupts, disable EFC interrupt */
3606 I915_WRITE(MEMINTREN
, I915_READ(MEMINTREN
) & ~MEMINT_EVAL_CHG_EN
);
3607 I915_WRITE(MEMINTRSTS
, MEMINT_EVAL_CHG
);
3608 I915_WRITE(DEIER
, I915_READ(DEIER
) & ~DE_PCU_EVENT
);
3609 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
3610 I915_WRITE(DEIMR
, I915_READ(DEIMR
) | DE_PCU_EVENT
);
3612 /* Go back to the starting frequency */
3613 ironlake_set_drps(dev
, dev_priv
->ips
.fstart
);
3615 rgvswctl
|= MEMCTL_CMD_STS
;
3616 I915_WRITE(MEMSWCTL
, rgvswctl
);
3619 spin_unlock_irq(&mchdev_lock
);
3622 /* There's a funny hw issue where the hw returns all 0 when reading from
3623 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3624 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3625 * all limits and the gpu stuck at whatever frequency it is at atm).
3627 static u32
gen6_rps_limits(struct drm_i915_private
*dev_priv
, u8 val
)
3631 /* Only set the down limit when we've reached the lowest level to avoid
3632 * getting more interrupts, otherwise leave this clear. This prevents a
3633 * race in the hw when coming out of rc6: There's a tiny window where
3634 * the hw runs at the minimal clock before selecting the desired
3635 * frequency, if the down threshold expires in that window we will not
3636 * receive a down interrupt. */
3637 limits
= dev_priv
->rps
.max_freq_softlimit
<< 24;
3638 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
3639 limits
|= dev_priv
->rps
.min_freq_softlimit
<< 16;
3644 static void gen6_set_rps_thresholds(struct drm_i915_private
*dev_priv
, u8 val
)
3648 new_power
= dev_priv
->rps
.power
;
3649 switch (dev_priv
->rps
.power
) {
3651 if (val
> dev_priv
->rps
.efficient_freq
+ 1 && val
> dev_priv
->rps
.cur_freq
)
3652 new_power
= BETWEEN
;
3656 if (val
<= dev_priv
->rps
.efficient_freq
&& val
< dev_priv
->rps
.cur_freq
)
3657 new_power
= LOW_POWER
;
3658 else if (val
>= dev_priv
->rps
.rp0_freq
&& val
> dev_priv
->rps
.cur_freq
)
3659 new_power
= HIGH_POWER
;
3663 if (val
< (dev_priv
->rps
.rp1_freq
+ dev_priv
->rps
.rp0_freq
) >> 1 && val
< dev_priv
->rps
.cur_freq
)
3664 new_power
= BETWEEN
;
3667 /* Max/min bins are special */
3668 if (val
== dev_priv
->rps
.min_freq_softlimit
)
3669 new_power
= LOW_POWER
;
3670 if (val
== dev_priv
->rps
.max_freq_softlimit
)
3671 new_power
= HIGH_POWER
;
3672 if (new_power
== dev_priv
->rps
.power
)
3675 /* Note the units here are not exactly 1us, but 1280ns. */
3676 switch (new_power
) {
3678 /* Upclock if more than 95% busy over 16ms */
3679 I915_WRITE(GEN6_RP_UP_EI
, 12500);
3680 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 11800);
3682 /* Downclock if less than 85% busy over 32ms */
3683 I915_WRITE(GEN6_RP_DOWN_EI
, 25000);
3684 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 21250);
3686 I915_WRITE(GEN6_RP_CONTROL
,
3687 GEN6_RP_MEDIA_TURBO
|
3688 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
3689 GEN6_RP_MEDIA_IS_GFX
|
3691 GEN6_RP_UP_BUSY_AVG
|
3692 GEN6_RP_DOWN_IDLE_AVG
);
3696 /* Upclock if more than 90% busy over 13ms */
3697 I915_WRITE(GEN6_RP_UP_EI
, 10250);
3698 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 9225);
3700 /* Downclock if less than 75% busy over 32ms */
3701 I915_WRITE(GEN6_RP_DOWN_EI
, 25000);
3702 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 18750);
3704 I915_WRITE(GEN6_RP_CONTROL
,
3705 GEN6_RP_MEDIA_TURBO
|
3706 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
3707 GEN6_RP_MEDIA_IS_GFX
|
3709 GEN6_RP_UP_BUSY_AVG
|
3710 GEN6_RP_DOWN_IDLE_AVG
);
3714 /* Upclock if more than 85% busy over 10ms */
3715 I915_WRITE(GEN6_RP_UP_EI
, 8000);
3716 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 6800);
3718 /* Downclock if less than 60% busy over 32ms */
3719 I915_WRITE(GEN6_RP_DOWN_EI
, 25000);
3720 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 15000);
3722 I915_WRITE(GEN6_RP_CONTROL
,
3723 GEN6_RP_MEDIA_TURBO
|
3724 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
3725 GEN6_RP_MEDIA_IS_GFX
|
3727 GEN6_RP_UP_BUSY_AVG
|
3728 GEN6_RP_DOWN_IDLE_AVG
);
3732 dev_priv
->rps
.power
= new_power
;
3733 dev_priv
->rps
.last_adj
= 0;
3736 static u32
gen6_rps_pm_mask(struct drm_i915_private
*dev_priv
, u8 val
)
3740 if (val
> dev_priv
->rps
.min_freq_softlimit
)
3741 mask
|= GEN6_PM_RP_DOWN_THRESHOLD
| GEN6_PM_RP_DOWN_TIMEOUT
;
3742 if (val
< dev_priv
->rps
.max_freq_softlimit
)
3743 mask
|= GEN6_PM_RP_UP_THRESHOLD
;
3745 mask
|= dev_priv
->pm_rps_events
& (GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_UP_EI_EXPIRED
);
3746 mask
&= dev_priv
->pm_rps_events
;
3748 return gen6_sanitize_rps_pm_mask(dev_priv
, ~mask
);
3751 /* gen6_set_rps is called to update the frequency request, but should also be
3752 * called when the range (min_delay and max_delay) is modified so that we can
3753 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3754 void gen6_set_rps(struct drm_device
*dev
, u8 val
)
3756 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3758 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
3759 WARN_ON(val
> dev_priv
->rps
.max_freq_softlimit
);
3760 WARN_ON(val
< dev_priv
->rps
.min_freq_softlimit
);
3762 /* min/max delay may still have been modified so be sure to
3763 * write the limits value.
3765 if (val
!= dev_priv
->rps
.cur_freq
) {
3766 gen6_set_rps_thresholds(dev_priv
, val
);
3768 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
3769 I915_WRITE(GEN6_RPNSWREQ
,
3770 HSW_FREQUENCY(val
));
3772 I915_WRITE(GEN6_RPNSWREQ
,
3773 GEN6_FREQUENCY(val
) |
3775 GEN6_AGGRESSIVE_TURBO
);
3778 /* Make sure we continue to get interrupts
3779 * until we hit the minimum or maximum frequencies.
3781 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
, gen6_rps_limits(dev_priv
, val
));
3782 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
3784 POSTING_READ(GEN6_RPNSWREQ
);
3786 dev_priv
->rps
.cur_freq
= val
;
3787 trace_intel_gpu_freq_change(val
* 50);
3790 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3792 * * If Gfx is Idle, then
3793 * 1. Mask Turbo interrupts
3794 * 2. Bring up Gfx clock
3795 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3796 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3797 * 5. Unmask Turbo interrupts
3799 static void vlv_set_rps_idle(struct drm_i915_private
*dev_priv
)
3801 struct drm_device
*dev
= dev_priv
->dev
;
3803 /* Latest VLV doesn't need to force the gfx clock */
3804 if (dev
->pdev
->revision
>= 0xd) {
3805 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
3810 * When we are idle. Drop to min voltage state.
3813 if (dev_priv
->rps
.cur_freq
<= dev_priv
->rps
.min_freq_softlimit
)
3816 /* Mask turbo interrupt so that they will not come in between */
3817 I915_WRITE(GEN6_PMINTRMSK
,
3818 gen6_sanitize_rps_pm_mask(dev_priv
, ~0));
3820 vlv_force_gfx_clock(dev_priv
, true);
3822 dev_priv
->rps
.cur_freq
= dev_priv
->rps
.min_freq_softlimit
;
3824 vlv_punit_write(dev_priv
, PUNIT_REG_GPU_FREQ_REQ
,
3825 dev_priv
->rps
.min_freq_softlimit
);
3827 if (wait_for(((vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
))
3828 & GENFREQSTATUS
) == 0, 100))
3829 DRM_ERROR("timed out waiting for Punit\n");
3831 vlv_force_gfx_clock(dev_priv
, false);
3833 I915_WRITE(GEN6_PMINTRMSK
,
3834 gen6_rps_pm_mask(dev_priv
, dev_priv
->rps
.cur_freq
));
3837 void gen6_rps_idle(struct drm_i915_private
*dev_priv
)
3839 struct drm_device
*dev
= dev_priv
->dev
;
3841 mutex_lock(&dev_priv
->rps
.hw_lock
);
3842 if (dev_priv
->rps
.enabled
) {
3843 if (IS_CHERRYVIEW(dev
))
3844 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
3845 else if (IS_VALLEYVIEW(dev
))
3846 vlv_set_rps_idle(dev_priv
);
3848 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
3849 dev_priv
->rps
.last_adj
= 0;
3851 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3854 void gen6_rps_boost(struct drm_i915_private
*dev_priv
)
3856 struct drm_device
*dev
= dev_priv
->dev
;
3858 mutex_lock(&dev_priv
->rps
.hw_lock
);
3859 if (dev_priv
->rps
.enabled
) {
3860 if (IS_VALLEYVIEW(dev
))
3861 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.max_freq_softlimit
);
3863 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.max_freq_softlimit
);
3864 dev_priv
->rps
.last_adj
= 0;
3866 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3869 void valleyview_set_rps(struct drm_device
*dev
, u8 val
)
3871 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3873 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
3874 WARN_ON(val
> dev_priv
->rps
.max_freq_softlimit
);
3875 WARN_ON(val
< dev_priv
->rps
.min_freq_softlimit
);
3877 if (WARN_ONCE(IS_CHERRYVIEW(dev
) && (val
& 1),
3878 "Odd GPU freq value\n"))
3881 if (val
!= dev_priv
->rps
.cur_freq
)
3882 vlv_punit_write(dev_priv
, PUNIT_REG_GPU_FREQ_REQ
, val
);
3884 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
3886 dev_priv
->rps
.cur_freq
= val
;
3887 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv
, val
));
3890 static void gen9_disable_rps(struct drm_device
*dev
)
3892 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3894 I915_WRITE(GEN6_RC_CONTROL
, 0);
3897 static void gen6_disable_rps(struct drm_device
*dev
)
3899 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3901 I915_WRITE(GEN6_RC_CONTROL
, 0);
3902 I915_WRITE(GEN6_RPNSWREQ
, 1 << 31);
3905 static void cherryview_disable_rps(struct drm_device
*dev
)
3907 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3909 I915_WRITE(GEN6_RC_CONTROL
, 0);
3912 static void valleyview_disable_rps(struct drm_device
*dev
)
3914 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3916 /* we're doing forcewake before Disabling RC6,
3917 * This what the BIOS expects when going into suspend */
3918 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
3920 I915_WRITE(GEN6_RC_CONTROL
, 0);
3922 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
3925 static void intel_print_rc6_info(struct drm_device
*dev
, u32 mode
)
3927 if (IS_VALLEYVIEW(dev
)) {
3928 if (mode
& (GEN7_RC_CTL_TO_MODE
| GEN6_RC_CTL_EI_MODE(1)))
3929 mode
= GEN6_RC_CTL_RC6_ENABLE
;
3934 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
3935 (mode
& GEN6_RC_CTL_RC6_ENABLE
) ? "on" : "off",
3936 (mode
& GEN6_RC_CTL_RC6p_ENABLE
) ? "on" : "off",
3937 (mode
& GEN6_RC_CTL_RC6pp_ENABLE
) ? "on" : "off");
3940 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
3941 (mode
& GEN6_RC_CTL_RC6_ENABLE
) ? "on" : "off");
3944 static int sanitize_rc6_option(const struct drm_device
*dev
, int enable_rc6
)
3946 /* No RC6 before Ironlake */
3947 if (INTEL_INFO(dev
)->gen
< 5)
3950 /* RC6 is only on Ironlake mobile not on desktop */
3951 if (INTEL_INFO(dev
)->gen
== 5 && !IS_IRONLAKE_M(dev
))
3954 /* Respect the kernel parameter if it is set */
3955 if (enable_rc6
>= 0) {
3959 mask
= INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
|
3962 mask
= INTEL_RC6_ENABLE
;
3964 if ((enable_rc6
& mask
) != enable_rc6
)
3965 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
3966 enable_rc6
& mask
, enable_rc6
, mask
);
3968 return enable_rc6
& mask
;
3971 /* Disable RC6 on Ironlake */
3972 if (INTEL_INFO(dev
)->gen
== 5)
3975 if (IS_IVYBRIDGE(dev
))
3976 return (INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
);
3978 return INTEL_RC6_ENABLE
;
3981 int intel_enable_rc6(const struct drm_device
*dev
)
3983 return i915
.enable_rc6
;
3986 static void gen6_init_rps_frequencies(struct drm_device
*dev
)
3988 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3989 uint32_t rp_state_cap
;
3990 u32 ddcc_status
= 0;
3993 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
3994 /* All of these values are in units of 50MHz */
3995 dev_priv
->rps
.cur_freq
= 0;
3996 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
3997 dev_priv
->rps
.rp0_freq
= (rp_state_cap
>> 0) & 0xff;
3998 dev_priv
->rps
.rp1_freq
= (rp_state_cap
>> 8) & 0xff;
3999 dev_priv
->rps
.min_freq
= (rp_state_cap
>> 16) & 0xff;
4000 /* hw_max = RP0 until we check for overclocking */
4001 dev_priv
->rps
.max_freq
= dev_priv
->rps
.rp0_freq
;
4003 dev_priv
->rps
.efficient_freq
= dev_priv
->rps
.rp1_freq
;
4004 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
4005 ret
= sandybridge_pcode_read(dev_priv
,
4006 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL
,
4009 dev_priv
->rps
.efficient_freq
=
4010 (ddcc_status
>> 8) & 0xff;
4013 /* Preserve min/max settings in case of re-init */
4014 if (dev_priv
->rps
.max_freq_softlimit
== 0)
4015 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
4017 if (dev_priv
->rps
.min_freq_softlimit
== 0) {
4018 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
4019 dev_priv
->rps
.min_freq_softlimit
=
4020 /* max(RPe, 450 MHz) */
4021 max(dev_priv
->rps
.efficient_freq
, (u8
) 9);
4023 dev_priv
->rps
.min_freq_softlimit
=
4024 dev_priv
->rps
.min_freq
;
4028 static void gen9_enable_rps(struct drm_device
*dev
)
4030 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4031 struct intel_engine_cs
*ring
;
4032 uint32_t rc6_mask
= 0;
4035 /* 1a: Software RC state - RC0 */
4036 I915_WRITE(GEN6_RC_STATE
, 0);
4038 /* 1b: Get forcewake during program sequence. Although the driver
4039 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4040 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
4042 /* 2a: Disable RC states. */
4043 I915_WRITE(GEN6_RC_CONTROL
, 0);
4045 /* 2b: Program RC6 thresholds.*/
4046 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 54 << 16);
4047 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
4048 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
4049 for_each_ring(ring
, dev_priv
, unused
)
4050 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
4051 I915_WRITE(GEN6_RC_SLEEP
, 0);
4052 I915_WRITE(GEN6_RC6_THRESHOLD
, 37500); /* 37.5/125ms per EI */
4054 /* 3a: Enable RC6 */
4055 if (intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
)
4056 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
4057 DRM_INFO("RC6 %s\n", (rc6_mask
& GEN6_RC_CTL_RC6_ENABLE
) ?
4059 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
4060 GEN6_RC_CTL_EI_MODE(1) |
4063 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
4067 static void gen8_enable_rps(struct drm_device
*dev
)
4069 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4070 struct intel_engine_cs
*ring
;
4071 uint32_t rc6_mask
= 0;
4074 /* 1a: Software RC state - RC0 */
4075 I915_WRITE(GEN6_RC_STATE
, 0);
4077 /* 1c & 1d: Get forcewake during program sequence. Although the driver
4078 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4079 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
4081 /* 2a: Disable RC states. */
4082 I915_WRITE(GEN6_RC_CONTROL
, 0);
4084 /* Initialize rps frequencies */
4085 gen6_init_rps_frequencies(dev
);
4087 /* 2b: Program RC6 thresholds.*/
4088 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
4089 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
4090 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
4091 for_each_ring(ring
, dev_priv
, unused
)
4092 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
4093 I915_WRITE(GEN6_RC_SLEEP
, 0);
4094 if (IS_BROADWELL(dev
))
4095 I915_WRITE(GEN6_RC6_THRESHOLD
, 625); /* 800us/1.28 for TO */
4097 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000); /* 50/125ms per EI */
4100 if (intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
)
4101 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
4102 intel_print_rc6_info(dev
, rc6_mask
);
4103 if (IS_BROADWELL(dev
))
4104 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
4105 GEN7_RC_CTL_TO_MODE
|
4108 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
4109 GEN6_RC_CTL_EI_MODE(1) |
4112 /* 4 Program defaults and thresholds for RPS*/
4113 I915_WRITE(GEN6_RPNSWREQ
,
4114 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
4115 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
4116 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
4117 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
4118 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 100000000 / 128); /* 1 second timeout */
4120 /* Docs recommend 900MHz, and 300 MHz respectively */
4121 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
4122 dev_priv
->rps
.max_freq_softlimit
<< 24 |
4123 dev_priv
->rps
.min_freq_softlimit
<< 16);
4125 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 7600000 / 128); /* 76ms busyness per EI, 90% */
4126 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 31300000 / 128); /* 313ms busyness per EI, 70%*/
4127 I915_WRITE(GEN6_RP_UP_EI
, 66000); /* 84.48ms, XXX: random? */
4128 I915_WRITE(GEN6_RP_DOWN_EI
, 350000); /* 448ms, XXX: random? */
4130 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
4133 I915_WRITE(GEN6_RP_CONTROL
,
4134 GEN6_RP_MEDIA_TURBO
|
4135 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
4136 GEN6_RP_MEDIA_IS_GFX
|
4138 GEN6_RP_UP_BUSY_AVG
|
4139 GEN6_RP_DOWN_IDLE_AVG
);
4141 /* 6: Ring frequency + overclocking (our driver does this later */
4143 dev_priv
->rps
.power
= HIGH_POWER
; /* force a reset */
4144 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
4146 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
4149 static void gen6_enable_rps(struct drm_device
*dev
)
4151 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4152 struct intel_engine_cs
*ring
;
4153 u32 rc6vids
, pcu_mbox
= 0, rc6_mask
= 0;
4158 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4160 /* Here begins a magic sequence of register writes to enable
4161 * auto-downclocking.
4163 * Perhaps there might be some value in exposing these to
4166 I915_WRITE(GEN6_RC_STATE
, 0);
4168 /* Clear the DBG now so we don't confuse earlier errors */
4169 if ((gtfifodbg
= I915_READ(GTFIFODBG
))) {
4170 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg
);
4171 I915_WRITE(GTFIFODBG
, gtfifodbg
);
4174 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
4176 /* Initialize rps frequencies */
4177 gen6_init_rps_frequencies(dev
);
4179 /* disable the counters and set deterministic thresholds */
4180 I915_WRITE(GEN6_RC_CONTROL
, 0);
4182 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT
, 1000 << 16);
4183 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16 | 30);
4184 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT
, 30);
4185 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
4186 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
4188 for_each_ring(ring
, dev_priv
, i
)
4189 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
4191 I915_WRITE(GEN6_RC_SLEEP
, 0);
4192 I915_WRITE(GEN6_RC1e_THRESHOLD
, 1000);
4193 if (IS_IVYBRIDGE(dev
))
4194 I915_WRITE(GEN6_RC6_THRESHOLD
, 125000);
4196 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000);
4197 I915_WRITE(GEN6_RC6p_THRESHOLD
, 150000);
4198 I915_WRITE(GEN6_RC6pp_THRESHOLD
, 64000); /* unused */
4200 /* Check if we are enabling RC6 */
4201 rc6_mode
= intel_enable_rc6(dev_priv
->dev
);
4202 if (rc6_mode
& INTEL_RC6_ENABLE
)
4203 rc6_mask
|= GEN6_RC_CTL_RC6_ENABLE
;
4205 /* We don't use those on Haswell */
4206 if (!IS_HASWELL(dev
)) {
4207 if (rc6_mode
& INTEL_RC6p_ENABLE
)
4208 rc6_mask
|= GEN6_RC_CTL_RC6p_ENABLE
;
4210 if (rc6_mode
& INTEL_RC6pp_ENABLE
)
4211 rc6_mask
|= GEN6_RC_CTL_RC6pp_ENABLE
;
4214 intel_print_rc6_info(dev
, rc6_mask
);
4216 I915_WRITE(GEN6_RC_CONTROL
,
4218 GEN6_RC_CTL_EI_MODE(1) |
4219 GEN6_RC_CTL_HW_ENABLE
);
4221 /* Power down if completely idle for over 50ms */
4222 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 50000);
4223 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
4225 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_MIN_FREQ_TABLE
, 0);
4227 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
4229 ret
= sandybridge_pcode_read(dev_priv
, GEN6_READ_OC_PARAMS
, &pcu_mbox
);
4230 if (!ret
&& (pcu_mbox
& (1<<31))) { /* OC supported */
4231 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
4232 (dev_priv
->rps
.max_freq_softlimit
& 0xff) * 50,
4233 (pcu_mbox
& 0xff) * 50);
4234 dev_priv
->rps
.max_freq
= pcu_mbox
& 0xff;
4237 dev_priv
->rps
.power
= HIGH_POWER
; /* force a reset */
4238 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
4241 ret
= sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
4242 if (IS_GEN6(dev
) && ret
) {
4243 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
4244 } else if (IS_GEN6(dev
) && (GEN6_DECODE_RC6_VID(rc6vids
& 0xff) < 450)) {
4245 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
4246 GEN6_DECODE_RC6_VID(rc6vids
& 0xff), 450);
4247 rc6vids
&= 0xffff00;
4248 rc6vids
|= GEN6_ENCODE_RC6_VID(450);
4249 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_RC6VIDS
, rc6vids
);
4251 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
4254 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
4257 static void __gen6_update_ring_freq(struct drm_device
*dev
)
4259 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4261 unsigned int gpu_freq
;
4262 unsigned int max_ia_freq
, min_ring_freq
;
4263 int scaling_factor
= 180;
4264 struct cpufreq_policy
*policy
;
4266 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4268 policy
= cpufreq_cpu_get(0);
4270 max_ia_freq
= policy
->cpuinfo
.max_freq
;
4271 cpufreq_cpu_put(policy
);
4274 * Default to measured freq if none found, PCU will ensure we
4277 max_ia_freq
= tsc_khz
;
4280 /* Convert from kHz to MHz */
4281 max_ia_freq
/= 1000;
4283 min_ring_freq
= I915_READ(DCLK
) & 0xf;
4284 /* convert DDR frequency from units of 266.6MHz to bandwidth */
4285 min_ring_freq
= mult_frac(min_ring_freq
, 8, 3);
4288 * For each potential GPU frequency, load a ring frequency we'd like
4289 * to use for memory access. We do this by specifying the IA frequency
4290 * the PCU should use as a reference to determine the ring frequency.
4292 for (gpu_freq
= dev_priv
->rps
.max_freq
; gpu_freq
>= dev_priv
->rps
.min_freq
;
4294 int diff
= dev_priv
->rps
.max_freq
- gpu_freq
;
4295 unsigned int ia_freq
= 0, ring_freq
= 0;
4297 if (INTEL_INFO(dev
)->gen
>= 8) {
4298 /* max(2 * GT, DDR). NB: GT is 50MHz units */
4299 ring_freq
= max(min_ring_freq
, gpu_freq
);
4300 } else if (IS_HASWELL(dev
)) {
4301 ring_freq
= mult_frac(gpu_freq
, 5, 4);
4302 ring_freq
= max(min_ring_freq
, ring_freq
);
4303 /* leave ia_freq as the default, chosen by cpufreq */
4305 /* On older processors, there is no separate ring
4306 * clock domain, so in order to boost the bandwidth
4307 * of the ring, we need to upclock the CPU (ia_freq).
4309 * For GPU frequencies less than 750MHz,
4310 * just use the lowest ring freq.
4312 if (gpu_freq
< min_freq
)
4315 ia_freq
= max_ia_freq
- ((diff
* scaling_factor
) / 2);
4316 ia_freq
= DIV_ROUND_CLOSEST(ia_freq
, 100);
4319 sandybridge_pcode_write(dev_priv
,
4320 GEN6_PCODE_WRITE_MIN_FREQ_TABLE
,
4321 ia_freq
<< GEN6_PCODE_FREQ_IA_RATIO_SHIFT
|
4322 ring_freq
<< GEN6_PCODE_FREQ_RING_RATIO_SHIFT
|
4327 void gen6_update_ring_freq(struct drm_device
*dev
)
4329 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4331 if (INTEL_INFO(dev
)->gen
< 6 || IS_VALLEYVIEW(dev
))
4334 mutex_lock(&dev_priv
->rps
.hw_lock
);
4335 __gen6_update_ring_freq(dev
);
4336 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4339 static int cherryview_rps_max_freq(struct drm_i915_private
*dev_priv
)
4341 struct drm_device
*dev
= dev_priv
->dev
;
4344 if (dev
->pdev
->revision
>= 0x20) {
4345 val
= vlv_punit_read(dev_priv
, FB_GFX_FMAX_AT_VMAX_FUSE
);
4347 switch (INTEL_INFO(dev
)->eu_total
) {
4349 /* (2 * 4) config */
4350 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT
);
4353 /* (2 * 6) config */
4354 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT
);
4357 /* (2 * 8) config */
4359 /* Setting (2 * 8) Min RP0 for any other combination */
4360 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT
);
4363 rp0
= (rp0
& FB_GFX_FREQ_FUSE_MASK
);
4365 /* For pre-production hardware */
4366 val
= vlv_punit_read(dev_priv
, PUNIT_GPU_STATUS_REG
);
4367 rp0
= (val
>> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT
) &
4368 PUNIT_GPU_STATUS_MAX_FREQ_MASK
;
4373 static int cherryview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
4377 val
= vlv_punit_read(dev_priv
, PUNIT_GPU_DUTYCYCLE_REG
);
4378 rpe
= (val
>> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT
) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK
;
4383 static int cherryview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
4385 struct drm_device
*dev
= dev_priv
->dev
;
4388 if (dev
->pdev
->revision
>= 0x20) {
4389 val
= vlv_punit_read(dev_priv
, FB_GFX_FMAX_AT_VMAX_FUSE
);
4390 rp1
= (val
& FB_GFX_FREQ_FUSE_MASK
);
4392 /* For pre-production hardware */
4393 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
4394 rp1
= ((val
>> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT
) &
4395 PUNIT_GPU_STATUS_MAX_FREQ_MASK
);
4400 static int cherryview_rps_min_freq(struct drm_i915_private
*dev_priv
)
4402 struct drm_device
*dev
= dev_priv
->dev
;
4405 if (dev
->pdev
->revision
>= 0x20) {
4406 val
= vlv_punit_read(dev_priv
, FB_GFX_FMIN_AT_VMIN_FUSE
);
4407 rpn
= ((val
>> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT
) &
4408 FB_GFX_FREQ_FUSE_MASK
);
4409 } else { /* For pre-production hardware */
4410 val
= vlv_punit_read(dev_priv
, PUNIT_GPU_STATUS_REG
);
4411 rpn
= ((val
>> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT
) &
4412 PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK
);
4418 static int valleyview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
4422 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
4424 rp1
= (val
& FB_GFX_FGUARANTEED_FREQ_FUSE_MASK
) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT
;
4429 static int valleyview_rps_max_freq(struct drm_i915_private
*dev_priv
)
4433 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
4435 rp0
= (val
& FB_GFX_MAX_FREQ_FUSE_MASK
) >> FB_GFX_MAX_FREQ_FUSE_SHIFT
;
4437 rp0
= min_t(u32
, rp0
, 0xea);
4442 static int valleyview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
4446 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_LO
);
4447 rpe
= (val
& FB_FMAX_VMIN_FREQ_LO_MASK
) >> FB_FMAX_VMIN_FREQ_LO_SHIFT
;
4448 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_HI
);
4449 rpe
|= (val
& FB_FMAX_VMIN_FREQ_HI_MASK
) << 5;
4454 static int valleyview_rps_min_freq(struct drm_i915_private
*dev_priv
)
4456 return vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
) & 0xff;
4459 /* Check that the pctx buffer wasn't move under us. */
4460 static void valleyview_check_pctx(struct drm_i915_private
*dev_priv
)
4462 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
4464 WARN_ON(pctx_addr
!= dev_priv
->mm
.stolen_base
+
4465 dev_priv
->vlv_pctx
->stolen
->start
);
4469 /* Check that the pcbr address is not empty. */
4470 static void cherryview_check_pctx(struct drm_i915_private
*dev_priv
)
4472 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
4474 WARN_ON((pctx_addr
>> VLV_PCBR_ADDR_SHIFT
) == 0);
4477 static void cherryview_setup_pctx(struct drm_device
*dev
)
4479 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4480 unsigned long pctx_paddr
, paddr
;
4481 struct i915_gtt
*gtt
= &dev_priv
->gtt
;
4483 int pctx_size
= 32*1024;
4485 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
4487 pcbr
= I915_READ(VLV_PCBR
);
4488 if ((pcbr
>> VLV_PCBR_ADDR_SHIFT
) == 0) {
4489 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
4490 paddr
= (dev_priv
->mm
.stolen_base
+
4491 (gtt
->stolen_size
- pctx_size
));
4493 pctx_paddr
= (paddr
& (~4095));
4494 I915_WRITE(VLV_PCBR
, pctx_paddr
);
4497 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR
));
4500 static void valleyview_setup_pctx(struct drm_device
*dev
)
4502 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4503 struct drm_i915_gem_object
*pctx
;
4504 unsigned long pctx_paddr
;
4506 int pctx_size
= 24*1024;
4508 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
4510 pcbr
= I915_READ(VLV_PCBR
);
4512 /* BIOS set it up already, grab the pre-alloc'd space */
4515 pcbr_offset
= (pcbr
& (~4095)) - dev_priv
->mm
.stolen_base
;
4516 pctx
= i915_gem_object_create_stolen_for_preallocated(dev_priv
->dev
,
4518 I915_GTT_OFFSET_NONE
,
4523 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
4526 * From the Gunit register HAS:
4527 * The Gfx driver is expected to program this register and ensure
4528 * proper allocation within Gfx stolen memory. For example, this
4529 * register should be programmed such than the PCBR range does not
4530 * overlap with other ranges, such as the frame buffer, protected
4531 * memory, or any other relevant ranges.
4533 pctx
= i915_gem_object_create_stolen(dev
, pctx_size
);
4535 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
4539 pctx_paddr
= dev_priv
->mm
.stolen_base
+ pctx
->stolen
->start
;
4540 I915_WRITE(VLV_PCBR
, pctx_paddr
);
4543 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR
));
4544 dev_priv
->vlv_pctx
= pctx
;
4547 static void valleyview_cleanup_pctx(struct drm_device
*dev
)
4549 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4551 if (WARN_ON(!dev_priv
->vlv_pctx
))
4554 drm_gem_object_unreference(&dev_priv
->vlv_pctx
->base
);
4555 dev_priv
->vlv_pctx
= NULL
;
4558 static void valleyview_init_gt_powersave(struct drm_device
*dev
)
4560 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4563 valleyview_setup_pctx(dev
);
4565 mutex_lock(&dev_priv
->rps
.hw_lock
);
4567 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
4568 switch ((val
>> 6) & 3) {
4571 dev_priv
->mem_freq
= 800;
4574 dev_priv
->mem_freq
= 1066;
4577 dev_priv
->mem_freq
= 1333;
4580 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv
->mem_freq
);
4582 dev_priv
->rps
.max_freq
= valleyview_rps_max_freq(dev_priv
);
4583 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
4584 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4585 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
4586 dev_priv
->rps
.max_freq
);
4588 dev_priv
->rps
.efficient_freq
= valleyview_rps_rpe_freq(dev_priv
);
4589 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4590 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
4591 dev_priv
->rps
.efficient_freq
);
4593 dev_priv
->rps
.rp1_freq
= valleyview_rps_guar_freq(dev_priv
);
4594 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
4595 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
4596 dev_priv
->rps
.rp1_freq
);
4598 dev_priv
->rps
.min_freq
= valleyview_rps_min_freq(dev_priv
);
4599 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4600 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
4601 dev_priv
->rps
.min_freq
);
4603 /* Preserve min/max settings in case of re-init */
4604 if (dev_priv
->rps
.max_freq_softlimit
== 0)
4605 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
4607 if (dev_priv
->rps
.min_freq_softlimit
== 0)
4608 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
4610 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4613 static void cherryview_init_gt_powersave(struct drm_device
*dev
)
4615 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4618 cherryview_setup_pctx(dev
);
4620 mutex_lock(&dev_priv
->rps
.hw_lock
);
4622 mutex_lock(&dev_priv
->dpio_lock
);
4623 val
= vlv_cck_read(dev_priv
, CCK_FUSE_REG
);
4624 mutex_unlock(&dev_priv
->dpio_lock
);
4626 switch ((val
>> 2) & 0x7) {
4629 dev_priv
->rps
.cz_freq
= 200;
4630 dev_priv
->mem_freq
= 1600;
4633 dev_priv
->rps
.cz_freq
= 267;
4634 dev_priv
->mem_freq
= 1600;
4637 dev_priv
->rps
.cz_freq
= 333;
4638 dev_priv
->mem_freq
= 2000;
4641 dev_priv
->rps
.cz_freq
= 320;
4642 dev_priv
->mem_freq
= 1600;
4645 dev_priv
->rps
.cz_freq
= 400;
4646 dev_priv
->mem_freq
= 1600;
4649 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv
->mem_freq
);
4651 dev_priv
->rps
.max_freq
= cherryview_rps_max_freq(dev_priv
);
4652 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
4653 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4654 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
4655 dev_priv
->rps
.max_freq
);
4657 dev_priv
->rps
.efficient_freq
= cherryview_rps_rpe_freq(dev_priv
);
4658 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4659 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
4660 dev_priv
->rps
.efficient_freq
);
4662 dev_priv
->rps
.rp1_freq
= cherryview_rps_guar_freq(dev_priv
);
4663 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
4664 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
4665 dev_priv
->rps
.rp1_freq
);
4667 dev_priv
->rps
.min_freq
= cherryview_rps_min_freq(dev_priv
);
4668 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4669 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
4670 dev_priv
->rps
.min_freq
);
4672 WARN_ONCE((dev_priv
->rps
.max_freq
|
4673 dev_priv
->rps
.efficient_freq
|
4674 dev_priv
->rps
.rp1_freq
|
4675 dev_priv
->rps
.min_freq
) & 1,
4676 "Odd GPU freq values\n");
4678 /* Preserve min/max settings in case of re-init */
4679 if (dev_priv
->rps
.max_freq_softlimit
== 0)
4680 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
4682 if (dev_priv
->rps
.min_freq_softlimit
== 0)
4683 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
4685 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4688 static void valleyview_cleanup_gt_powersave(struct drm_device
*dev
)
4690 valleyview_cleanup_pctx(dev
);
4693 static void cherryview_enable_rps(struct drm_device
*dev
)
4695 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4696 struct intel_engine_cs
*ring
;
4697 u32 gtfifodbg
, val
, rc6_mode
= 0, pcbr
;
4700 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4702 gtfifodbg
= I915_READ(GTFIFODBG
);
4704 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4706 I915_WRITE(GTFIFODBG
, gtfifodbg
);
4709 cherryview_check_pctx(dev_priv
);
4711 /* 1a & 1b: Get forcewake during program sequence. Although the driver
4712 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4713 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
4715 /* Disable RC states. */
4716 I915_WRITE(GEN6_RC_CONTROL
, 0);
4718 /* 2a: Program RC6 thresholds.*/
4719 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
4720 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
4721 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
4723 for_each_ring(ring
, dev_priv
, i
)
4724 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
4725 I915_WRITE(GEN6_RC_SLEEP
, 0);
4727 /* TO threshold set to 1750 us ( 0x557 * 1.28 us) */
4728 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x557);
4730 /* allows RC6 residency counter to work */
4731 I915_WRITE(VLV_COUNTER_CONTROL
,
4732 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
|
4733 VLV_MEDIA_RC6_COUNT_EN
|
4734 VLV_RENDER_RC6_COUNT_EN
));
4736 /* For now we assume BIOS is allocating and populating the PCBR */
4737 pcbr
= I915_READ(VLV_PCBR
);
4740 if ((intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
) &&
4741 (pcbr
>> VLV_PCBR_ADDR_SHIFT
))
4742 rc6_mode
= GEN7_RC_CTL_TO_MODE
;
4744 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
4746 /* 4 Program defaults and thresholds for RPS*/
4747 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
4748 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
4749 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
4750 I915_WRITE(GEN6_RP_UP_EI
, 66000);
4751 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
4753 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
4755 /* WaDisablePwrmtrEvent:chv (pre-production hw) */
4756 I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
4757 I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
4760 I915_WRITE(GEN6_RP_CONTROL
,
4761 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
4762 GEN6_RP_MEDIA_IS_GFX
| /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
4764 GEN6_RP_UP_BUSY_AVG
|
4765 GEN6_RP_DOWN_IDLE_AVG
);
4767 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
4769 /* RPS code assumes GPLL is used */
4770 WARN_ONCE((val
& GPLLENABLE
) == 0, "GPLL not enabled\n");
4772 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val
& GPLLENABLE
? "yes" : "no");
4773 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
4775 dev_priv
->rps
.cur_freq
= (val
>> 8) & 0xff;
4776 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4777 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
4778 dev_priv
->rps
.cur_freq
);
4780 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4781 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
4782 dev_priv
->rps
.efficient_freq
);
4784 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.efficient_freq
);
4786 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
4789 static void valleyview_enable_rps(struct drm_device
*dev
)
4791 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4792 struct intel_engine_cs
*ring
;
4793 u32 gtfifodbg
, val
, rc6_mode
= 0;
4796 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4798 valleyview_check_pctx(dev_priv
);
4800 if ((gtfifodbg
= I915_READ(GTFIFODBG
))) {
4801 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4803 I915_WRITE(GTFIFODBG
, gtfifodbg
);
4806 /* If VLV, Forcewake all wells, else re-direct to regular path */
4807 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
4809 /* Disable RC states. */
4810 I915_WRITE(GEN6_RC_CONTROL
, 0);
4812 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
4813 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
4814 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
4815 I915_WRITE(GEN6_RP_UP_EI
, 66000);
4816 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
4818 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
4820 I915_WRITE(GEN6_RP_CONTROL
,
4821 GEN6_RP_MEDIA_TURBO
|
4822 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
4823 GEN6_RP_MEDIA_IS_GFX
|
4825 GEN6_RP_UP_BUSY_AVG
|
4826 GEN6_RP_DOWN_IDLE_CONT
);
4828 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 0x00280000);
4829 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
4830 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
4832 for_each_ring(ring
, dev_priv
, i
)
4833 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
4835 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x557);
4837 /* allows RC6 residency counter to work */
4838 I915_WRITE(VLV_COUNTER_CONTROL
,
4839 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN
|
4840 VLV_RENDER_RC0_COUNT_EN
|
4841 VLV_MEDIA_RC6_COUNT_EN
|
4842 VLV_RENDER_RC6_COUNT_EN
));
4844 if (intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
)
4845 rc6_mode
= GEN7_RC_CTL_TO_MODE
| VLV_RC_CTL_CTX_RST_PARALLEL
;
4847 intel_print_rc6_info(dev
, rc6_mode
);
4849 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
4851 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
4853 /* RPS code assumes GPLL is used */
4854 WARN_ONCE((val
& GPLLENABLE
) == 0, "GPLL not enabled\n");
4856 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val
& GPLLENABLE
? "yes" : "no");
4857 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
4859 dev_priv
->rps
.cur_freq
= (val
>> 8) & 0xff;
4860 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4861 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
4862 dev_priv
->rps
.cur_freq
);
4864 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4865 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
4866 dev_priv
->rps
.efficient_freq
);
4868 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.efficient_freq
);
4870 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
4873 void ironlake_teardown_rc6(struct drm_device
*dev
)
4875 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4877 if (dev_priv
->ips
.renderctx
) {
4878 i915_gem_object_ggtt_unpin(dev_priv
->ips
.renderctx
);
4879 drm_gem_object_unreference(&dev_priv
->ips
.renderctx
->base
);
4880 dev_priv
->ips
.renderctx
= NULL
;
4883 if (dev_priv
->ips
.pwrctx
) {
4884 i915_gem_object_ggtt_unpin(dev_priv
->ips
.pwrctx
);
4885 drm_gem_object_unreference(&dev_priv
->ips
.pwrctx
->base
);
4886 dev_priv
->ips
.pwrctx
= NULL
;
4890 static void ironlake_disable_rc6(struct drm_device
*dev
)
4892 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4894 if (I915_READ(PWRCTXA
)) {
4895 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
4896 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) | RCX_SW_EXIT
);
4897 wait_for(((I915_READ(RSTDBYCTL
) & RSX_STATUS_MASK
) == RSX_STATUS_ON
),
4900 I915_WRITE(PWRCTXA
, 0);
4901 POSTING_READ(PWRCTXA
);
4903 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) & ~RCX_SW_EXIT
);
4904 POSTING_READ(RSTDBYCTL
);
4908 static int ironlake_setup_rc6(struct drm_device
*dev
)
4910 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4912 if (dev_priv
->ips
.renderctx
== NULL
)
4913 dev_priv
->ips
.renderctx
= intel_alloc_context_page(dev
);
4914 if (!dev_priv
->ips
.renderctx
)
4917 if (dev_priv
->ips
.pwrctx
== NULL
)
4918 dev_priv
->ips
.pwrctx
= intel_alloc_context_page(dev
);
4919 if (!dev_priv
->ips
.pwrctx
) {
4920 ironlake_teardown_rc6(dev
);
4927 static void ironlake_enable_rc6(struct drm_device
*dev
)
4929 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4930 struct intel_engine_cs
*ring
= &dev_priv
->ring
[RCS
];
4931 bool was_interruptible
;
4934 /* rc6 disabled by default due to repeated reports of hanging during
4937 if (!intel_enable_rc6(dev
))
4940 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
4942 ret
= ironlake_setup_rc6(dev
);
4946 was_interruptible
= dev_priv
->mm
.interruptible
;
4947 dev_priv
->mm
.interruptible
= false;
4950 * GPU can automatically power down the render unit if given a page
4953 ret
= intel_ring_begin(ring
, 6);
4955 ironlake_teardown_rc6(dev
);
4956 dev_priv
->mm
.interruptible
= was_interruptible
;
4960 intel_ring_emit(ring
, MI_SUSPEND_FLUSH
| MI_SUSPEND_FLUSH_EN
);
4961 intel_ring_emit(ring
, MI_SET_CONTEXT
);
4962 intel_ring_emit(ring
, i915_gem_obj_ggtt_offset(dev_priv
->ips
.renderctx
) |
4964 MI_SAVE_EXT_STATE_EN
|
4965 MI_RESTORE_EXT_STATE_EN
|
4966 MI_RESTORE_INHIBIT
);
4967 intel_ring_emit(ring
, MI_SUSPEND_FLUSH
);
4968 intel_ring_emit(ring
, MI_NOOP
);
4969 intel_ring_emit(ring
, MI_FLUSH
);
4970 intel_ring_advance(ring
);
4973 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
4974 * does an implicit flush, combined with MI_FLUSH above, it should be
4975 * safe to assume that renderctx is valid
4977 ret
= intel_ring_idle(ring
);
4978 dev_priv
->mm
.interruptible
= was_interruptible
;
4980 DRM_ERROR("failed to enable ironlake power savings\n");
4981 ironlake_teardown_rc6(dev
);
4985 I915_WRITE(PWRCTXA
, i915_gem_obj_ggtt_offset(dev_priv
->ips
.pwrctx
) | PWRCTX_EN
);
4986 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) & ~RCX_SW_EXIT
);
4988 intel_print_rc6_info(dev
, GEN6_RC_CTL_RC6_ENABLE
);
4991 static unsigned long intel_pxfreq(u32 vidfreq
)
4994 int div
= (vidfreq
& 0x3f0000) >> 16;
4995 int post
= (vidfreq
& 0x3000) >> 12;
4996 int pre
= (vidfreq
& 0x7);
5001 freq
= ((div
* 133333) / ((1<<post
) * pre
));
5006 static const struct cparams
{
5012 { 1, 1333, 301, 28664 },
5013 { 1, 1066, 294, 24460 },
5014 { 1, 800, 294, 25192 },
5015 { 0, 1333, 276, 27605 },
5016 { 0, 1066, 276, 27605 },
5017 { 0, 800, 231, 23784 },
5020 static unsigned long __i915_chipset_val(struct drm_i915_private
*dev_priv
)
5022 u64 total_count
, diff
, ret
;
5023 u32 count1
, count2
, count3
, m
= 0, c
= 0;
5024 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
5027 assert_spin_locked(&mchdev_lock
);
5029 diff1
= now
- dev_priv
->ips
.last_time1
;
5031 /* Prevent division-by-zero if we are asking too fast.
5032 * Also, we don't get interesting results if we are polling
5033 * faster than once in 10ms, so just return the saved value
5037 return dev_priv
->ips
.chipset_power
;
5039 count1
= I915_READ(DMIEC
);
5040 count2
= I915_READ(DDREC
);
5041 count3
= I915_READ(CSIEC
);
5043 total_count
= count1
+ count2
+ count3
;
5045 /* FIXME: handle per-counter overflow */
5046 if (total_count
< dev_priv
->ips
.last_count1
) {
5047 diff
= ~0UL - dev_priv
->ips
.last_count1
;
5048 diff
+= total_count
;
5050 diff
= total_count
- dev_priv
->ips
.last_count1
;
5053 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
5054 if (cparams
[i
].i
== dev_priv
->ips
.c_m
&&
5055 cparams
[i
].t
== dev_priv
->ips
.r_t
) {
5062 diff
= div_u64(diff
, diff1
);
5063 ret
= ((m
* diff
) + c
);
5064 ret
= div_u64(ret
, 10);
5066 dev_priv
->ips
.last_count1
= total_count
;
5067 dev_priv
->ips
.last_time1
= now
;
5069 dev_priv
->ips
.chipset_power
= ret
;
5074 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
5076 struct drm_device
*dev
= dev_priv
->dev
;
5079 if (INTEL_INFO(dev
)->gen
!= 5)
5082 spin_lock_irq(&mchdev_lock
);
5084 val
= __i915_chipset_val(dev_priv
);
5086 spin_unlock_irq(&mchdev_lock
);
5091 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
5093 unsigned long m
, x
, b
;
5096 tsfs
= I915_READ(TSFS
);
5098 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
5099 x
= I915_READ8(TR1
);
5101 b
= tsfs
& TSFS_INTR_MASK
;
5103 return ((m
* x
) / 127) - b
;
5106 static int _pxvid_to_vd(u8 pxvid
)
5111 if (pxvid
>= 8 && pxvid
< 31)
5114 return (pxvid
+ 2) * 125;
5117 static u32
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
5119 struct drm_device
*dev
= dev_priv
->dev
;
5120 const int vd
= _pxvid_to_vd(pxvid
);
5121 const int vm
= vd
- 1125;
5123 if (INTEL_INFO(dev
)->is_mobile
)
5124 return vm
> 0 ? vm
: 0;
5129 static void __i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
5131 u64 now
, diff
, diffms
;
5134 assert_spin_locked(&mchdev_lock
);
5136 now
= ktime_get_raw_ns();
5137 diffms
= now
- dev_priv
->ips
.last_time2
;
5138 do_div(diffms
, NSEC_PER_MSEC
);
5140 /* Don't divide by 0 */
5144 count
= I915_READ(GFXEC
);
5146 if (count
< dev_priv
->ips
.last_count2
) {
5147 diff
= ~0UL - dev_priv
->ips
.last_count2
;
5150 diff
= count
- dev_priv
->ips
.last_count2
;
5153 dev_priv
->ips
.last_count2
= count
;
5154 dev_priv
->ips
.last_time2
= now
;
5156 /* More magic constants... */
5158 diff
= div_u64(diff
, diffms
* 10);
5159 dev_priv
->ips
.gfx_power
= diff
;
5162 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
5164 struct drm_device
*dev
= dev_priv
->dev
;
5166 if (INTEL_INFO(dev
)->gen
!= 5)
5169 spin_lock_irq(&mchdev_lock
);
5171 __i915_update_gfx_val(dev_priv
);
5173 spin_unlock_irq(&mchdev_lock
);
5176 static unsigned long __i915_gfx_val(struct drm_i915_private
*dev_priv
)
5178 unsigned long t
, corr
, state1
, corr2
, state2
;
5181 assert_spin_locked(&mchdev_lock
);
5183 pxvid
= I915_READ(PXVFREQ_BASE
+ (dev_priv
->rps
.cur_freq
* 4));
5184 pxvid
= (pxvid
>> 24) & 0x7f;
5185 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
5189 t
= i915_mch_val(dev_priv
);
5191 /* Revel in the empirically derived constants */
5193 /* Correction factor in 1/100000 units */
5195 corr
= ((t
* 2349) + 135940);
5197 corr
= ((t
* 964) + 29317);
5199 corr
= ((t
* 301) + 1004);
5201 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
5203 corr2
= (corr
* dev_priv
->ips
.corr
);
5205 state2
= (corr2
* state1
) / 10000;
5206 state2
/= 100; /* convert to mW */
5208 __i915_update_gfx_val(dev_priv
);
5210 return dev_priv
->ips
.gfx_power
+ state2
;
5213 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
5215 struct drm_device
*dev
= dev_priv
->dev
;
5218 if (INTEL_INFO(dev
)->gen
!= 5)
5221 spin_lock_irq(&mchdev_lock
);
5223 val
= __i915_gfx_val(dev_priv
);
5225 spin_unlock_irq(&mchdev_lock
);
5231 * i915_read_mch_val - return value for IPS use
5233 * Calculate and return a value for the IPS driver to use when deciding whether
5234 * we have thermal and power headroom to increase CPU or GPU power budget.
5236 unsigned long i915_read_mch_val(void)
5238 struct drm_i915_private
*dev_priv
;
5239 unsigned long chipset_val
, graphics_val
, ret
= 0;
5241 spin_lock_irq(&mchdev_lock
);
5244 dev_priv
= i915_mch_dev
;
5246 chipset_val
= __i915_chipset_val(dev_priv
);
5247 graphics_val
= __i915_gfx_val(dev_priv
);
5249 ret
= chipset_val
+ graphics_val
;
5252 spin_unlock_irq(&mchdev_lock
);
5256 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
5259 * i915_gpu_raise - raise GPU frequency limit
5261 * Raise the limit; IPS indicates we have thermal headroom.
5263 bool i915_gpu_raise(void)
5265 struct drm_i915_private
*dev_priv
;
5268 spin_lock_irq(&mchdev_lock
);
5269 if (!i915_mch_dev
) {
5273 dev_priv
= i915_mch_dev
;
5275 if (dev_priv
->ips
.max_delay
> dev_priv
->ips
.fmax
)
5276 dev_priv
->ips
.max_delay
--;
5279 spin_unlock_irq(&mchdev_lock
);
5283 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
5286 * i915_gpu_lower - lower GPU frequency limit
5288 * IPS indicates we're close to a thermal limit, so throttle back the GPU
5289 * frequency maximum.
5291 bool i915_gpu_lower(void)
5293 struct drm_i915_private
*dev_priv
;
5296 spin_lock_irq(&mchdev_lock
);
5297 if (!i915_mch_dev
) {
5301 dev_priv
= i915_mch_dev
;
5303 if (dev_priv
->ips
.max_delay
< dev_priv
->ips
.min_delay
)
5304 dev_priv
->ips
.max_delay
++;
5307 spin_unlock_irq(&mchdev_lock
);
5311 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
5314 * i915_gpu_busy - indicate GPU business to IPS
5316 * Tell the IPS driver whether or not the GPU is busy.
5318 bool i915_gpu_busy(void)
5320 struct drm_i915_private
*dev_priv
;
5321 struct intel_engine_cs
*ring
;
5325 spin_lock_irq(&mchdev_lock
);
5328 dev_priv
= i915_mch_dev
;
5330 for_each_ring(ring
, dev_priv
, i
)
5331 ret
|= !list_empty(&ring
->request_list
);
5334 spin_unlock_irq(&mchdev_lock
);
5338 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
5341 * i915_gpu_turbo_disable - disable graphics turbo
5343 * Disable graphics turbo by resetting the max frequency and setting the
5344 * current frequency to the default.
5346 bool i915_gpu_turbo_disable(void)
5348 struct drm_i915_private
*dev_priv
;
5351 spin_lock_irq(&mchdev_lock
);
5352 if (!i915_mch_dev
) {
5356 dev_priv
= i915_mch_dev
;
5358 dev_priv
->ips
.max_delay
= dev_priv
->ips
.fstart
;
5360 if (!ironlake_set_drps(dev_priv
->dev
, dev_priv
->ips
.fstart
))
5364 spin_unlock_irq(&mchdev_lock
);
5368 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
5371 * Tells the intel_ips driver that the i915 driver is now loaded, if
5372 * IPS got loaded first.
5374 * This awkward dance is so that neither module has to depend on the
5375 * other in order for IPS to do the appropriate communication of
5376 * GPU turbo limits to i915.
5379 ips_ping_for_i915_load(void)
5383 link
= symbol_get(ips_link_to_i915_driver
);
5386 symbol_put(ips_link_to_i915_driver
);
5390 void intel_gpu_ips_init(struct drm_i915_private
*dev_priv
)
5392 /* We only register the i915 ips part with intel-ips once everything is
5393 * set up, to avoid intel-ips sneaking in and reading bogus values. */
5394 spin_lock_irq(&mchdev_lock
);
5395 i915_mch_dev
= dev_priv
;
5396 spin_unlock_irq(&mchdev_lock
);
5398 ips_ping_for_i915_load();
5401 void intel_gpu_ips_teardown(void)
5403 spin_lock_irq(&mchdev_lock
);
5404 i915_mch_dev
= NULL
;
5405 spin_unlock_irq(&mchdev_lock
);
5408 static void intel_init_emon(struct drm_device
*dev
)
5410 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5415 /* Disable to program */
5419 /* Program energy weights for various events */
5420 I915_WRITE(SDEW
, 0x15040d00);
5421 I915_WRITE(CSIEW0
, 0x007f0000);
5422 I915_WRITE(CSIEW1
, 0x1e220004);
5423 I915_WRITE(CSIEW2
, 0x04000004);
5425 for (i
= 0; i
< 5; i
++)
5426 I915_WRITE(PEW
+ (i
* 4), 0);
5427 for (i
= 0; i
< 3; i
++)
5428 I915_WRITE(DEW
+ (i
* 4), 0);
5430 /* Program P-state weights to account for frequency power adjustment */
5431 for (i
= 0; i
< 16; i
++) {
5432 u32 pxvidfreq
= I915_READ(PXVFREQ_BASE
+ (i
* 4));
5433 unsigned long freq
= intel_pxfreq(pxvidfreq
);
5434 unsigned long vid
= (pxvidfreq
& PXVFREQ_PX_MASK
) >>
5439 val
*= (freq
/ 1000);
5441 val
/= (127*127*900);
5443 DRM_ERROR("bad pxval: %ld\n", val
);
5446 /* Render standby states get 0 weight */
5450 for (i
= 0; i
< 4; i
++) {
5451 u32 val
= (pxw
[i
*4] << 24) | (pxw
[(i
*4)+1] << 16) |
5452 (pxw
[(i
*4)+2] << 8) | (pxw
[(i
*4)+3]);
5453 I915_WRITE(PXW
+ (i
* 4), val
);
5456 /* Adjust magic regs to magic values (more experimental results) */
5457 I915_WRITE(OGW0
, 0);
5458 I915_WRITE(OGW1
, 0);
5459 I915_WRITE(EG0
, 0x00007f00);
5460 I915_WRITE(EG1
, 0x0000000e);
5461 I915_WRITE(EG2
, 0x000e0000);
5462 I915_WRITE(EG3
, 0x68000300);
5463 I915_WRITE(EG4
, 0x42000000);
5464 I915_WRITE(EG5
, 0x00140031);
5468 for (i
= 0; i
< 8; i
++)
5469 I915_WRITE(PXWL
+ (i
* 4), 0);
5471 /* Enable PMON + select events */
5472 I915_WRITE(ECR
, 0x80000019);
5474 lcfuse
= I915_READ(LCFUSE02
);
5476 dev_priv
->ips
.corr
= (lcfuse
& LCFUSE_HIV_MASK
);
5479 void intel_init_gt_powersave(struct drm_device
*dev
)
5481 i915
.enable_rc6
= sanitize_rc6_option(dev
, i915
.enable_rc6
);
5483 if (IS_CHERRYVIEW(dev
))
5484 cherryview_init_gt_powersave(dev
);
5485 else if (IS_VALLEYVIEW(dev
))
5486 valleyview_init_gt_powersave(dev
);
5489 void intel_cleanup_gt_powersave(struct drm_device
*dev
)
5491 if (IS_CHERRYVIEW(dev
))
5493 else if (IS_VALLEYVIEW(dev
))
5494 valleyview_cleanup_gt_powersave(dev
);
5497 static void gen6_suspend_rps(struct drm_device
*dev
)
5499 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5501 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
5504 * TODO: disable RPS interrupts on GEN9+ too once RPS support
5507 if (INTEL_INFO(dev
)->gen
< 9)
5508 gen6_disable_rps_interrupts(dev
);
5512 * intel_suspend_gt_powersave - suspend PM work and helper threads
5515 * We don't want to disable RC6 or other features here, we just want
5516 * to make sure any work we've queued has finished and won't bother
5517 * us while we're suspended.
5519 void intel_suspend_gt_powersave(struct drm_device
*dev
)
5521 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5523 if (INTEL_INFO(dev
)->gen
< 6)
5526 gen6_suspend_rps(dev
);
5528 /* Force GPU to min freq during suspend */
5529 gen6_rps_idle(dev_priv
);
5532 void intel_disable_gt_powersave(struct drm_device
*dev
)
5534 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5536 if (IS_IRONLAKE_M(dev
)) {
5537 ironlake_disable_drps(dev
);
5538 ironlake_disable_rc6(dev
);
5539 } else if (INTEL_INFO(dev
)->gen
>= 6) {
5540 intel_suspend_gt_powersave(dev
);
5542 mutex_lock(&dev_priv
->rps
.hw_lock
);
5543 if (INTEL_INFO(dev
)->gen
>= 9)
5544 gen9_disable_rps(dev
);
5545 else if (IS_CHERRYVIEW(dev
))
5546 cherryview_disable_rps(dev
);
5547 else if (IS_VALLEYVIEW(dev
))
5548 valleyview_disable_rps(dev
);
5550 gen6_disable_rps(dev
);
5552 dev_priv
->rps
.enabled
= false;
5553 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5557 static void intel_gen6_powersave_work(struct work_struct
*work
)
5559 struct drm_i915_private
*dev_priv
=
5560 container_of(work
, struct drm_i915_private
,
5561 rps
.delayed_resume_work
.work
);
5562 struct drm_device
*dev
= dev_priv
->dev
;
5564 mutex_lock(&dev_priv
->rps
.hw_lock
);
5567 * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
5570 if (INTEL_INFO(dev
)->gen
< 9)
5571 gen6_reset_rps_interrupts(dev
);
5573 if (IS_CHERRYVIEW(dev
)) {
5574 cherryview_enable_rps(dev
);
5575 } else if (IS_VALLEYVIEW(dev
)) {
5576 valleyview_enable_rps(dev
);
5577 } else if (INTEL_INFO(dev
)->gen
>= 9) {
5578 gen9_enable_rps(dev
);
5579 } else if (IS_BROADWELL(dev
)) {
5580 gen8_enable_rps(dev
);
5581 __gen6_update_ring_freq(dev
);
5583 gen6_enable_rps(dev
);
5584 __gen6_update_ring_freq(dev
);
5586 dev_priv
->rps
.enabled
= true;
5588 if (INTEL_INFO(dev
)->gen
< 9)
5589 gen6_enable_rps_interrupts(dev
);
5591 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5593 intel_runtime_pm_put(dev_priv
);
5596 void intel_enable_gt_powersave(struct drm_device
*dev
)
5598 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5600 if (IS_IRONLAKE_M(dev
)) {
5601 mutex_lock(&dev
->struct_mutex
);
5602 ironlake_enable_drps(dev
);
5603 ironlake_enable_rc6(dev
);
5604 intel_init_emon(dev
);
5605 mutex_unlock(&dev
->struct_mutex
);
5606 } else if (INTEL_INFO(dev
)->gen
>= 6) {
5608 * PCU communication is slow and this doesn't need to be
5609 * done at any specific time, so do this out of our fast path
5610 * to make resume and init faster.
5612 * We depend on the HW RC6 power context save/restore
5613 * mechanism when entering D3 through runtime PM suspend. So
5614 * disable RPM until RPS/RC6 is properly setup. We can only
5615 * get here via the driver load/system resume/runtime resume
5616 * paths, so the _noresume version is enough (and in case of
5617 * runtime resume it's necessary).
5619 if (schedule_delayed_work(&dev_priv
->rps
.delayed_resume_work
,
5620 round_jiffies_up_relative(HZ
)))
5621 intel_runtime_pm_get_noresume(dev_priv
);
5625 void intel_reset_gt_powersave(struct drm_device
*dev
)
5627 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5629 if (INTEL_INFO(dev
)->gen
< 6)
5632 gen6_suspend_rps(dev
);
5633 dev_priv
->rps
.enabled
= false;
5636 static void ibx_init_clock_gating(struct drm_device
*dev
)
5638 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5641 * On Ibex Peak and Cougar Point, we need to disable clock
5642 * gating for the panel power sequencer or it will fail to
5643 * start up when no ports are active.
5645 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
5648 static void g4x_disable_trickle_feed(struct drm_device
*dev
)
5650 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5653 for_each_pipe(dev_priv
, pipe
) {
5654 I915_WRITE(DSPCNTR(pipe
),
5655 I915_READ(DSPCNTR(pipe
)) |
5656 DISPPLANE_TRICKLE_FEED_DISABLE
);
5657 intel_flush_primary_plane(dev_priv
, pipe
);
5661 static void ilk_init_lp_watermarks(struct drm_device
*dev
)
5663 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5665 I915_WRITE(WM3_LP_ILK
, I915_READ(WM3_LP_ILK
) & ~WM1_LP_SR_EN
);
5666 I915_WRITE(WM2_LP_ILK
, I915_READ(WM2_LP_ILK
) & ~WM1_LP_SR_EN
);
5667 I915_WRITE(WM1_LP_ILK
, I915_READ(WM1_LP_ILK
) & ~WM1_LP_SR_EN
);
5670 * Don't touch WM1S_LP_EN here.
5671 * Doing so could cause underruns.
5675 static void ironlake_init_clock_gating(struct drm_device
*dev
)
5677 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5678 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
5682 * WaFbcDisableDpfcClockGating:ilk
5684 dspclk_gate
|= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE
|
5685 ILK_DPFCUNIT_CLOCK_GATE_DISABLE
|
5686 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
;
5688 I915_WRITE(PCH_3DCGDIS0
,
5689 MARIUNIT_CLOCK_GATE_DISABLE
|
5690 SVSMUNIT_CLOCK_GATE_DISABLE
);
5691 I915_WRITE(PCH_3DCGDIS1
,
5692 VFMUNIT_CLOCK_GATE_DISABLE
);
5695 * According to the spec the following bits should be set in
5696 * order to enable memory self-refresh
5697 * The bit 22/21 of 0x42004
5698 * The bit 5 of 0x42020
5699 * The bit 15 of 0x45000
5701 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
5702 (I915_READ(ILK_DISPLAY_CHICKEN2
) |
5703 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
));
5704 dspclk_gate
|= ILK_DPARBUNIT_CLOCK_GATE_ENABLE
;
5705 I915_WRITE(DISP_ARB_CTL
,
5706 (I915_READ(DISP_ARB_CTL
) |
5709 ilk_init_lp_watermarks(dev
);
5712 * Based on the document from hardware guys the following bits
5713 * should be set unconditionally in order to enable FBC.
5714 * The bit 22 of 0x42000
5715 * The bit 22 of 0x42004
5716 * The bit 7,8,9 of 0x42020.
5718 if (IS_IRONLAKE_M(dev
)) {
5719 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
5720 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
5721 I915_READ(ILK_DISPLAY_CHICKEN1
) |
5723 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
5724 I915_READ(ILK_DISPLAY_CHICKEN2
) |
5728 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
5730 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
5731 I915_READ(ILK_DISPLAY_CHICKEN2
) |
5732 ILK_ELPIN_409_SELECT
);
5733 I915_WRITE(_3D_CHICKEN2
,
5734 _3D_CHICKEN2_WM_READ_PIPELINED
<< 16 |
5735 _3D_CHICKEN2_WM_READ_PIPELINED
);
5737 /* WaDisableRenderCachePipelinedFlush:ilk */
5738 I915_WRITE(CACHE_MODE_0
,
5739 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
5741 /* WaDisable_RenderCache_OperationalFlush:ilk */
5742 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
5744 g4x_disable_trickle_feed(dev
);
5746 ibx_init_clock_gating(dev
);
5749 static void cpt_init_clock_gating(struct drm_device
*dev
)
5751 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5756 * On Ibex Peak and Cougar Point, we need to disable clock
5757 * gating for the panel power sequencer or it will fail to
5758 * start up when no ports are active.
5760 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
|
5761 PCH_DPLUNIT_CLOCK_GATE_DISABLE
|
5762 PCH_CPUNIT_CLOCK_GATE_DISABLE
);
5763 I915_WRITE(SOUTH_CHICKEN2
, I915_READ(SOUTH_CHICKEN2
) |
5764 DPLS_EDP_PPS_FIX_DIS
);
5765 /* The below fixes the weird display corruption, a few pixels shifted
5766 * downward, on (only) LVDS of some HP laptops with IVY.
5768 for_each_pipe(dev_priv
, pipe
) {
5769 val
= I915_READ(TRANS_CHICKEN2(pipe
));
5770 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
5771 val
&= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
5772 if (dev_priv
->vbt
.fdi_rx_polarity_inverted
)
5773 val
|= TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
5774 val
&= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK
;
5775 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER
;
5776 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH
;
5777 I915_WRITE(TRANS_CHICKEN2(pipe
), val
);
5779 /* WADP0ClockGatingDisable */
5780 for_each_pipe(dev_priv
, pipe
) {
5781 I915_WRITE(TRANS_CHICKEN1(pipe
),
5782 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
5786 static void gen6_check_mch_setup(struct drm_device
*dev
)
5788 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5791 tmp
= I915_READ(MCH_SSKPD
);
5792 if ((tmp
& MCH_SSKPD_WM0_MASK
) != MCH_SSKPD_WM0_VAL
)
5793 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
5797 static void gen6_init_clock_gating(struct drm_device
*dev
)
5799 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5800 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
5802 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
5804 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
5805 I915_READ(ILK_DISPLAY_CHICKEN2
) |
5806 ILK_ELPIN_409_SELECT
);
5808 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
5809 I915_WRITE(_3D_CHICKEN
,
5810 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB
));
5812 /* WaDisable_RenderCache_OperationalFlush:snb */
5813 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
5816 * BSpec recoomends 8x4 when MSAA is used,
5817 * however in practice 16x4 seems fastest.
5819 * Note that PS/WM thread counts depend on the WIZ hashing
5820 * disable bit, which we don't touch here, but it's good
5821 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5823 I915_WRITE(GEN6_GT_MODE
,
5824 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
5826 ilk_init_lp_watermarks(dev
);
5828 I915_WRITE(CACHE_MODE_0
,
5829 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
5831 I915_WRITE(GEN6_UCGCTL1
,
5832 I915_READ(GEN6_UCGCTL1
) |
5833 GEN6_BLBUNIT_CLOCK_GATE_DISABLE
|
5834 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
5836 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5837 * gating disable must be set. Failure to set it results in
5838 * flickering pixels due to Z write ordering failures after
5839 * some amount of runtime in the Mesa "fire" demo, and Unigine
5840 * Sanctuary and Tropics, and apparently anything else with
5841 * alpha test or pixel discard.
5843 * According to the spec, bit 11 (RCCUNIT) must also be set,
5844 * but we didn't debug actual testcases to find it out.
5846 * WaDisableRCCUnitClockGating:snb
5847 * WaDisableRCPBUnitClockGating:snb
5849 I915_WRITE(GEN6_UCGCTL2
,
5850 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
5851 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
5853 /* WaStripsFansDisableFastClipPerformanceFix:snb */
5854 I915_WRITE(_3D_CHICKEN3
,
5855 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL
));
5859 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
5860 * 3DSTATE_SF number of SF output attributes is more than 16."
5862 I915_WRITE(_3D_CHICKEN3
,
5863 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH
));
5866 * According to the spec the following bits should be
5867 * set in order to enable memory self-refresh and fbc:
5868 * The bit21 and bit22 of 0x42000
5869 * The bit21 and bit22 of 0x42004
5870 * The bit5 and bit7 of 0x42020
5871 * The bit14 of 0x70180
5872 * The bit14 of 0x71180
5874 * WaFbcAsynchFlipDisableFbcQueue:snb
5876 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
5877 I915_READ(ILK_DISPLAY_CHICKEN1
) |
5878 ILK_FBCQ_DIS
| ILK_PABSTRETCH_DIS
);
5879 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
5880 I915_READ(ILK_DISPLAY_CHICKEN2
) |
5881 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
);
5882 I915_WRITE(ILK_DSPCLK_GATE_D
,
5883 I915_READ(ILK_DSPCLK_GATE_D
) |
5884 ILK_DPARBUNIT_CLOCK_GATE_ENABLE
|
5885 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
);
5887 g4x_disable_trickle_feed(dev
);
5889 cpt_init_clock_gating(dev
);
5891 gen6_check_mch_setup(dev
);
5894 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private
*dev_priv
)
5896 uint32_t reg
= I915_READ(GEN7_FF_THREAD_MODE
);
5899 * WaVSThreadDispatchOverride:ivb,vlv
5901 * This actually overrides the dispatch
5902 * mode for all thread types.
5904 reg
&= ~GEN7_FF_SCHED_MASK
;
5905 reg
|= GEN7_FF_TS_SCHED_HW
;
5906 reg
|= GEN7_FF_VS_SCHED_HW
;
5907 reg
|= GEN7_FF_DS_SCHED_HW
;
5909 I915_WRITE(GEN7_FF_THREAD_MODE
, reg
);
5912 static void lpt_init_clock_gating(struct drm_device
*dev
)
5914 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5917 * TODO: this bit should only be enabled when really needed, then
5918 * disabled when not needed anymore in order to save power.
5920 if (dev_priv
->pch_id
== INTEL_PCH_LPT_LP_DEVICE_ID_TYPE
)
5921 I915_WRITE(SOUTH_DSPCLK_GATE_D
,
5922 I915_READ(SOUTH_DSPCLK_GATE_D
) |
5923 PCH_LP_PARTITION_LEVEL_DISABLE
);
5925 /* WADPOClockGatingDisable:hsw */
5926 I915_WRITE(_TRANSA_CHICKEN1
,
5927 I915_READ(_TRANSA_CHICKEN1
) |
5928 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
5931 static void lpt_suspend_hw(struct drm_device
*dev
)
5933 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5935 if (dev_priv
->pch_id
== INTEL_PCH_LPT_LP_DEVICE_ID_TYPE
) {
5936 uint32_t val
= I915_READ(SOUTH_DSPCLK_GATE_D
);
5938 val
&= ~PCH_LP_PARTITION_LEVEL_DISABLE
;
5939 I915_WRITE(SOUTH_DSPCLK_GATE_D
, val
);
5943 static void broadwell_init_clock_gating(struct drm_device
*dev
)
5945 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5948 I915_WRITE(WM3_LP_ILK
, 0);
5949 I915_WRITE(WM2_LP_ILK
, 0);
5950 I915_WRITE(WM1_LP_ILK
, 0);
5952 /* WaSwitchSolVfFArbitrationPriority:bdw */
5953 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
5955 /* WaPsrDPAMaskVBlankInSRD:bdw */
5956 I915_WRITE(CHICKEN_PAR1_1
,
5957 I915_READ(CHICKEN_PAR1_1
) | DPA_MASK_VBLANK_SRD
);
5959 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
5960 for_each_pipe(dev_priv
, pipe
) {
5961 I915_WRITE(CHICKEN_PIPESL_1(pipe
),
5962 I915_READ(CHICKEN_PIPESL_1(pipe
)) |
5963 BDW_DPRS_MASK_VBLANK_SRD
);
5966 /* WaVSRefCountFullforceMissDisable:bdw */
5967 /* WaDSRefCountFullforceMissDisable:bdw */
5968 I915_WRITE(GEN7_FF_THREAD_MODE
,
5969 I915_READ(GEN7_FF_THREAD_MODE
) &
5970 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
5972 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
5973 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
5975 /* WaDisableSDEUnitClockGating:bdw */
5976 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
5977 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
5979 lpt_init_clock_gating(dev
);
5982 static void haswell_init_clock_gating(struct drm_device
*dev
)
5984 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5986 ilk_init_lp_watermarks(dev
);
5988 /* L3 caching of data atomics doesn't work -- disable it. */
5989 I915_WRITE(HSW_SCRATCH1
, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE
);
5990 I915_WRITE(HSW_ROW_CHICKEN3
,
5991 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
));
5993 /* This is required by WaCatErrorRejectionIssue:hsw */
5994 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
5995 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
5996 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
5998 /* WaVSRefCountFullforceMissDisable:hsw */
5999 I915_WRITE(GEN7_FF_THREAD_MODE
,
6000 I915_READ(GEN7_FF_THREAD_MODE
) & ~GEN7_FF_VS_REF_CNT_FFME
);
6002 /* WaDisable_RenderCache_OperationalFlush:hsw */
6003 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6005 /* enable HiZ Raw Stall Optimization */
6006 I915_WRITE(CACHE_MODE_0_GEN7
,
6007 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
6009 /* WaDisable4x2SubspanOptimization:hsw */
6010 I915_WRITE(CACHE_MODE_1
,
6011 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
6014 * BSpec recommends 8x4 when MSAA is used,
6015 * however in practice 16x4 seems fastest.
6017 * Note that PS/WM thread counts depend on the WIZ hashing
6018 * disable bit, which we don't touch here, but it's good
6019 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6021 I915_WRITE(GEN7_GT_MODE
,
6022 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
6024 /* WaSampleCChickenBitEnable:hsw */
6025 I915_WRITE(HALF_SLICE_CHICKEN3
,
6026 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE
));
6028 /* WaSwitchSolVfFArbitrationPriority:hsw */
6029 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
6031 /* WaRsPkgCStateDisplayPMReq:hsw */
6032 I915_WRITE(CHICKEN_PAR1_1
,
6033 I915_READ(CHICKEN_PAR1_1
) | FORCE_ARB_IDLE_PLANES
);
6035 lpt_init_clock_gating(dev
);
6038 static void ivybridge_init_clock_gating(struct drm_device
*dev
)
6040 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6043 ilk_init_lp_watermarks(dev
);
6045 I915_WRITE(ILK_DSPCLK_GATE_D
, ILK_VRHUNIT_CLOCK_GATE_DISABLE
);
6047 /* WaDisableEarlyCull:ivb */
6048 I915_WRITE(_3D_CHICKEN3
,
6049 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
6051 /* WaDisableBackToBackFlipFix:ivb */
6052 I915_WRITE(IVB_CHICKEN3
,
6053 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
6054 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
6056 /* WaDisablePSDDualDispatchEnable:ivb */
6057 if (IS_IVB_GT1(dev
))
6058 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
6059 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
6061 /* WaDisable_RenderCache_OperationalFlush:ivb */
6062 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6064 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6065 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
6066 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
6068 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6069 I915_WRITE(GEN7_L3CNTLREG1
,
6070 GEN7_WA_FOR_GEN7_L3_CONTROL
);
6071 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
6072 GEN7_WA_L3_CHICKEN_MODE
);
6073 if (IS_IVB_GT1(dev
))
6074 I915_WRITE(GEN7_ROW_CHICKEN2
,
6075 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6077 /* must write both registers */
6078 I915_WRITE(GEN7_ROW_CHICKEN2
,
6079 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6080 I915_WRITE(GEN7_ROW_CHICKEN2_GT2
,
6081 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6084 /* WaForceL3Serialization:ivb */
6085 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
6086 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
6089 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6090 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
6092 I915_WRITE(GEN6_UCGCTL2
,
6093 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
6095 /* This is required by WaCatErrorRejectionIssue:ivb */
6096 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
6097 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
6098 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
6100 g4x_disable_trickle_feed(dev
);
6102 gen7_setup_fixed_func_scheduler(dev_priv
);
6104 if (0) { /* causes HiZ corruption on ivb:gt1 */
6105 /* enable HiZ Raw Stall Optimization */
6106 I915_WRITE(CACHE_MODE_0_GEN7
,
6107 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
6110 /* WaDisable4x2SubspanOptimization:ivb */
6111 I915_WRITE(CACHE_MODE_1
,
6112 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
6115 * BSpec recommends 8x4 when MSAA is used,
6116 * however in practice 16x4 seems fastest.
6118 * Note that PS/WM thread counts depend on the WIZ hashing
6119 * disable bit, which we don't touch here, but it's good
6120 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6122 I915_WRITE(GEN7_GT_MODE
,
6123 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
6125 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
6126 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
6127 snpcr
|= GEN6_MBC_SNPCR_MED
;
6128 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
6130 if (!HAS_PCH_NOP(dev
))
6131 cpt_init_clock_gating(dev
);
6133 gen6_check_mch_setup(dev
);
6136 static void valleyview_init_clock_gating(struct drm_device
*dev
)
6138 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6140 I915_WRITE(DSPCLK_GATE_D
, VRHUNIT_CLOCK_GATE_DISABLE
);
6142 /* WaDisableEarlyCull:vlv */
6143 I915_WRITE(_3D_CHICKEN3
,
6144 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
6146 /* WaDisableBackToBackFlipFix:vlv */
6147 I915_WRITE(IVB_CHICKEN3
,
6148 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
6149 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
6151 /* WaPsdDispatchEnable:vlv */
6152 /* WaDisablePSDDualDispatchEnable:vlv */
6153 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
6154 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP
|
6155 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
6157 /* WaDisable_RenderCache_OperationalFlush:vlv */
6158 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6160 /* WaForceL3Serialization:vlv */
6161 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
6162 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
6164 /* WaDisableDopClockGating:vlv */
6165 I915_WRITE(GEN7_ROW_CHICKEN2
,
6166 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6168 /* This is required by WaCatErrorRejectionIssue:vlv */
6169 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
6170 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
6171 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
6173 gen7_setup_fixed_func_scheduler(dev_priv
);
6176 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6177 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
6179 I915_WRITE(GEN6_UCGCTL2
,
6180 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
6182 /* WaDisableL3Bank2xClockGate:vlv
6183 * Disabling L3 clock gating- MMIO 940c[25] = 1
6184 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
6185 I915_WRITE(GEN7_UCGCTL4
,
6186 I915_READ(GEN7_UCGCTL4
) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE
);
6188 I915_WRITE(MI_ARB_VLV
, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
);
6191 * BSpec says this must be set, even though
6192 * WaDisable4x2SubspanOptimization isn't listed for VLV.
6194 I915_WRITE(CACHE_MODE_1
,
6195 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
6198 * WaIncreaseL3CreditsForVLVB0:vlv
6199 * This is the hardware default actually.
6201 I915_WRITE(GEN7_L3SQCREG1
, VLV_B0_WA_L3SQCREG1_VALUE
);
6204 * WaDisableVLVClockGating_VBIIssue:vlv
6205 * Disable clock gating on th GCFG unit to prevent a delay
6206 * in the reporting of vblank events.
6208 I915_WRITE(VLV_GUNIT_CLOCK_GATE
, GCFG_DIS
);
6211 static void cherryview_init_clock_gating(struct drm_device
*dev
)
6213 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6215 I915_WRITE(DSPCLK_GATE_D
, VRHUNIT_CLOCK_GATE_DISABLE
);
6217 I915_WRITE(MI_ARB_VLV
, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
);
6219 /* WaVSRefCountFullforceMissDisable:chv */
6220 /* WaDSRefCountFullforceMissDisable:chv */
6221 I915_WRITE(GEN7_FF_THREAD_MODE
,
6222 I915_READ(GEN7_FF_THREAD_MODE
) &
6223 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
6225 /* WaDisableSemaphoreAndSyncFlipWait:chv */
6226 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
6227 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
6229 /* WaDisableCSUnitClockGating:chv */
6230 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
6231 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
6233 /* WaDisableSDEUnitClockGating:chv */
6234 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
6235 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
6238 static void g4x_init_clock_gating(struct drm_device
*dev
)
6240 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6241 uint32_t dspclk_gate
;
6243 I915_WRITE(RENCLK_GATE_D1
, 0);
6244 I915_WRITE(RENCLK_GATE_D2
, VF_UNIT_CLOCK_GATE_DISABLE
|
6245 GS_UNIT_CLOCK_GATE_DISABLE
|
6246 CL_UNIT_CLOCK_GATE_DISABLE
);
6247 I915_WRITE(RAMCLK_GATE_D
, 0);
6248 dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
|
6249 OVRUNIT_CLOCK_GATE_DISABLE
|
6250 OVCUNIT_CLOCK_GATE_DISABLE
;
6252 dspclk_gate
|= DSSUNIT_CLOCK_GATE_DISABLE
;
6253 I915_WRITE(DSPCLK_GATE_D
, dspclk_gate
);
6255 /* WaDisableRenderCachePipelinedFlush */
6256 I915_WRITE(CACHE_MODE_0
,
6257 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
6259 /* WaDisable_RenderCache_OperationalFlush:g4x */
6260 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6262 g4x_disable_trickle_feed(dev
);
6265 static void crestline_init_clock_gating(struct drm_device
*dev
)
6267 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6269 I915_WRITE(RENCLK_GATE_D1
, I965_RCC_CLOCK_GATE_DISABLE
);
6270 I915_WRITE(RENCLK_GATE_D2
, 0);
6271 I915_WRITE(DSPCLK_GATE_D
, 0);
6272 I915_WRITE(RAMCLK_GATE_D
, 0);
6273 I915_WRITE16(DEUC
, 0);
6274 I915_WRITE(MI_ARB_STATE
,
6275 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
6277 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6278 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6281 static void broadwater_init_clock_gating(struct drm_device
*dev
)
6283 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6285 I915_WRITE(RENCLK_GATE_D1
, I965_RCZ_CLOCK_GATE_DISABLE
|
6286 I965_RCC_CLOCK_GATE_DISABLE
|
6287 I965_RCPB_CLOCK_GATE_DISABLE
|
6288 I965_ISC_CLOCK_GATE_DISABLE
|
6289 I965_FBC_CLOCK_GATE_DISABLE
);
6290 I915_WRITE(RENCLK_GATE_D2
, 0);
6291 I915_WRITE(MI_ARB_STATE
,
6292 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
6294 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6295 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6298 static void gen3_init_clock_gating(struct drm_device
*dev
)
6300 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6301 u32 dstate
= I915_READ(D_STATE
);
6303 dstate
|= DSTATE_PLL_D3_OFF
| DSTATE_GFX_CLOCK_GATING
|
6304 DSTATE_DOT_CLOCK_GATING
;
6305 I915_WRITE(D_STATE
, dstate
);
6307 if (IS_PINEVIEW(dev
))
6308 I915_WRITE(ECOSKPD
, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY
));
6310 /* IIR "flip pending" means done if this bit is set */
6311 I915_WRITE(ECOSKPD
, _MASKED_BIT_DISABLE(ECO_FLIP_DONE
));
6313 /* interrupts should cause a wake up from C3 */
6314 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN
));
6316 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
6317 I915_WRITE(MI_ARB_STATE
, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE
));
6319 I915_WRITE(MI_ARB_STATE
,
6320 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
6323 static void i85x_init_clock_gating(struct drm_device
*dev
)
6325 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6327 I915_WRITE(RENCLK_GATE_D1
, SV_CLOCK_GATE_DISABLE
);
6329 /* interrupts should cause a wake up from C3 */
6330 I915_WRITE(MI_STATE
, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN
) |
6331 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE
));
6333 I915_WRITE(MEM_MODE
,
6334 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE
));
6337 static void i830_init_clock_gating(struct drm_device
*dev
)
6339 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6341 I915_WRITE(DSPCLK_GATE_D
, OVRUNIT_CLOCK_GATE_DISABLE
);
6343 I915_WRITE(MEM_MODE
,
6344 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE
) |
6345 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE
));
6348 void intel_init_clock_gating(struct drm_device
*dev
)
6350 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6352 dev_priv
->display
.init_clock_gating(dev
);
6355 void intel_suspend_hw(struct drm_device
*dev
)
6357 if (HAS_PCH_LPT(dev
))
6358 lpt_suspend_hw(dev
);
6361 /* Set up chip specific power management-related functions */
6362 void intel_init_pm(struct drm_device
*dev
)
6364 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6366 intel_fbc_init(dev_priv
);
6369 if (IS_PINEVIEW(dev
))
6370 i915_pineview_get_mem_freq(dev
);
6371 else if (IS_GEN5(dev
))
6372 i915_ironlake_get_mem_freq(dev
);
6374 /* For FIFO watermark updates */
6375 if (INTEL_INFO(dev
)->gen
>= 9) {
6376 skl_setup_wm_latency(dev
);
6378 dev_priv
->display
.init_clock_gating
= gen9_init_clock_gating
;
6379 dev_priv
->display
.update_wm
= skl_update_wm
;
6380 dev_priv
->display
.update_sprite_wm
= skl_update_sprite_wm
;
6381 } else if (HAS_PCH_SPLIT(dev
)) {
6382 ilk_setup_wm_latency(dev
);
6384 if ((IS_GEN5(dev
) && dev_priv
->wm
.pri_latency
[1] &&
6385 dev_priv
->wm
.spr_latency
[1] && dev_priv
->wm
.cur_latency
[1]) ||
6386 (!IS_GEN5(dev
) && dev_priv
->wm
.pri_latency
[0] &&
6387 dev_priv
->wm
.spr_latency
[0] && dev_priv
->wm
.cur_latency
[0])) {
6388 dev_priv
->display
.update_wm
= ilk_update_wm
;
6389 dev_priv
->display
.update_sprite_wm
= ilk_update_sprite_wm
;
6391 DRM_DEBUG_KMS("Failed to read display plane latency. "
6396 dev_priv
->display
.init_clock_gating
= ironlake_init_clock_gating
;
6397 else if (IS_GEN6(dev
))
6398 dev_priv
->display
.init_clock_gating
= gen6_init_clock_gating
;
6399 else if (IS_IVYBRIDGE(dev
))
6400 dev_priv
->display
.init_clock_gating
= ivybridge_init_clock_gating
;
6401 else if (IS_HASWELL(dev
))
6402 dev_priv
->display
.init_clock_gating
= haswell_init_clock_gating
;
6403 else if (INTEL_INFO(dev
)->gen
== 8)
6404 dev_priv
->display
.init_clock_gating
= broadwell_init_clock_gating
;
6405 } else if (IS_CHERRYVIEW(dev
)) {
6406 dev_priv
->display
.update_wm
= cherryview_update_wm
;
6407 dev_priv
->display
.update_sprite_wm
= valleyview_update_sprite_wm
;
6408 dev_priv
->display
.init_clock_gating
=
6409 cherryview_init_clock_gating
;
6410 } else if (IS_VALLEYVIEW(dev
)) {
6411 dev_priv
->display
.update_wm
= valleyview_update_wm
;
6412 dev_priv
->display
.update_sprite_wm
= valleyview_update_sprite_wm
;
6413 dev_priv
->display
.init_clock_gating
=
6414 valleyview_init_clock_gating
;
6415 } else if (IS_PINEVIEW(dev
)) {
6416 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev
),
6419 dev_priv
->mem_freq
)) {
6420 DRM_INFO("failed to find known CxSR latency "
6421 "(found ddr%s fsb freq %d, mem freq %d), "
6423 (dev_priv
->is_ddr3
== 1) ? "3" : "2",
6424 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
6425 /* Disable CxSR and never update its watermark again */
6426 intel_set_memory_cxsr(dev_priv
, false);
6427 dev_priv
->display
.update_wm
= NULL
;
6429 dev_priv
->display
.update_wm
= pineview_update_wm
;
6430 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
6431 } else if (IS_G4X(dev
)) {
6432 dev_priv
->display
.update_wm
= g4x_update_wm
;
6433 dev_priv
->display
.init_clock_gating
= g4x_init_clock_gating
;
6434 } else if (IS_GEN4(dev
)) {
6435 dev_priv
->display
.update_wm
= i965_update_wm
;
6436 if (IS_CRESTLINE(dev
))
6437 dev_priv
->display
.init_clock_gating
= crestline_init_clock_gating
;
6438 else if (IS_BROADWATER(dev
))
6439 dev_priv
->display
.init_clock_gating
= broadwater_init_clock_gating
;
6440 } else if (IS_GEN3(dev
)) {
6441 dev_priv
->display
.update_wm
= i9xx_update_wm
;
6442 dev_priv
->display
.get_fifo_size
= i9xx_get_fifo_size
;
6443 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
6444 } else if (IS_GEN2(dev
)) {
6445 if (INTEL_INFO(dev
)->num_pipes
== 1) {
6446 dev_priv
->display
.update_wm
= i845_update_wm
;
6447 dev_priv
->display
.get_fifo_size
= i845_get_fifo_size
;
6449 dev_priv
->display
.update_wm
= i9xx_update_wm
;
6450 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
6453 if (IS_I85X(dev
) || IS_I865G(dev
))
6454 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
6456 dev_priv
->display
.init_clock_gating
= i830_init_clock_gating
;
6458 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
6462 int sandybridge_pcode_read(struct drm_i915_private
*dev_priv
, u32 mbox
, u32
*val
)
6464 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
6466 if (I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
6467 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
6471 I915_WRITE(GEN6_PCODE_DATA
, *val
);
6472 I915_WRITE(GEN6_PCODE_DATA1
, 0);
6473 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
6475 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
6477 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox
);
6481 *val
= I915_READ(GEN6_PCODE_DATA
);
6482 I915_WRITE(GEN6_PCODE_DATA
, 0);
6487 int sandybridge_pcode_write(struct drm_i915_private
*dev_priv
, u32 mbox
, u32 val
)
6489 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
6491 if (I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
6492 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
6496 I915_WRITE(GEN6_PCODE_DATA
, val
);
6497 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
6499 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
6501 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox
);
6505 I915_WRITE(GEN6_PCODE_DATA
, 0);
6510 static int vlv_gpu_freq_div(unsigned int czclk_freq
)
6512 switch (czclk_freq
) {
6527 static int byt_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
6529 int div
, czclk_freq
= DIV_ROUND_CLOSEST(dev_priv
->mem_freq
, 4);
6531 div
= vlv_gpu_freq_div(czclk_freq
);
6535 return DIV_ROUND_CLOSEST(czclk_freq
* (val
+ 6 - 0xbd), div
);
6538 static int byt_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
6540 int mul
, czclk_freq
= DIV_ROUND_CLOSEST(dev_priv
->mem_freq
, 4);
6542 mul
= vlv_gpu_freq_div(czclk_freq
);
6546 return DIV_ROUND_CLOSEST(mul
* val
, czclk_freq
) + 0xbd - 6;
6549 static int chv_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
6551 int div
, czclk_freq
= dev_priv
->rps
.cz_freq
;
6553 div
= vlv_gpu_freq_div(czclk_freq
) / 2;
6557 return DIV_ROUND_CLOSEST(czclk_freq
* val
, 2 * div
) / 2;
6560 static int chv_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
6562 int mul
, czclk_freq
= dev_priv
->rps
.cz_freq
;
6564 mul
= vlv_gpu_freq_div(czclk_freq
) / 2;
6568 /* CHV needs even values */
6569 return DIV_ROUND_CLOSEST(val
* 2 * mul
, czclk_freq
) * 2;
6572 int vlv_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
6576 if (IS_CHERRYVIEW(dev_priv
->dev
))
6577 ret
= chv_gpu_freq(dev_priv
, val
);
6578 else if (IS_VALLEYVIEW(dev_priv
->dev
))
6579 ret
= byt_gpu_freq(dev_priv
, val
);
6584 int vlv_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
6588 if (IS_CHERRYVIEW(dev_priv
->dev
))
6589 ret
= chv_freq_opcode(dev_priv
, val
);
6590 else if (IS_VALLEYVIEW(dev_priv
->dev
))
6591 ret
= byt_freq_opcode(dev_priv
, val
);
6596 void intel_pm_setup(struct drm_device
*dev
)
6598 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6600 mutex_init(&dev_priv
->rps
.hw_lock
);
6602 INIT_DELAYED_WORK(&dev_priv
->rps
.delayed_resume_work
,
6603 intel_gen6_powersave_work
);
6605 dev_priv
->pm
.suspended
= false;