2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
37 * RC6 is a special power stage which allows the GPU to enter an very
38 * low-voltage mode when idle, using down to 0V while at this stage. This
39 * stage is entered automatically when the GPU is idle when RC6 support is
40 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
42 * There are different RC6 modes available in Intel GPU, which differentiate
43 * among each other with the latency required to enter and leave RC6 and
44 * voltage consumed by the GPU in different states.
46 * The combination of the following flags define which states GPU is allowed
47 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
48 * RC6pp is deepest RC6. Their support by hardware varies according to the
49 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
50 * which brings the most power savings; deeper states save more power, but
51 * require higher latency to switch to and wake up.
53 #define INTEL_RC6_ENABLE (1<<0)
54 #define INTEL_RC6p_ENABLE (1<<1)
55 #define INTEL_RC6pp_ENABLE (1<<2)
57 static void gen9_init_clock_gating(struct drm_device
*dev
)
59 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
62 I915_WRITE(CHICKEN_PAR1_1
,
63 I915_READ(CHICKEN_PAR1_1
) | SKL_EDP_PSR_FIX_RDWRAP
);
65 I915_WRITE(GEN8_CONFIG0
,
66 I915_READ(GEN8_CONFIG0
) | GEN9_DEFAULT_FIXES
);
68 /* WaEnableChickenDCPR:skl,bxt,kbl */
69 I915_WRITE(GEN8_CHICKEN_DCPR_1
,
70 I915_READ(GEN8_CHICKEN_DCPR_1
) | MASK_WAKEMEM
);
72 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
73 /* WaFbcWakeMemOn:skl,bxt,kbl */
74 I915_WRITE(DISP_ARB_CTL
, I915_READ(DISP_ARB_CTL
) |
76 DISP_FBC_MEMORY_WAKE
);
78 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
79 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
80 ILK_DPFC_DISABLE_DUMMY0
);
83 static void bxt_init_clock_gating(struct drm_device
*dev
)
85 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
87 gen9_init_clock_gating(dev
);
89 /* WaDisableSDEUnitClockGating:bxt */
90 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
91 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
95 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
97 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
98 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ
);
101 * Wa: Backlight PWM may stop in the asserted state, causing backlight
104 if (IS_BXT_REVID(dev_priv
, BXT_REVID_B0
, REVID_FOREVER
))
105 I915_WRITE(GEN9_CLKGATE_DIS_0
, I915_READ(GEN9_CLKGATE_DIS_0
) |
106 PWM1_GATING_DIS
| PWM2_GATING_DIS
);
109 static void i915_pineview_get_mem_freq(struct drm_device
*dev
)
111 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
114 tmp
= I915_READ(CLKCFG
);
116 switch (tmp
& CLKCFG_FSB_MASK
) {
118 dev_priv
->fsb_freq
= 533; /* 133*4 */
121 dev_priv
->fsb_freq
= 800; /* 200*4 */
124 dev_priv
->fsb_freq
= 667; /* 167*4 */
127 dev_priv
->fsb_freq
= 400; /* 100*4 */
131 switch (tmp
& CLKCFG_MEM_MASK
) {
133 dev_priv
->mem_freq
= 533;
136 dev_priv
->mem_freq
= 667;
139 dev_priv
->mem_freq
= 800;
143 /* detect pineview DDR3 setting */
144 tmp
= I915_READ(CSHRDDR3CTL
);
145 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
148 static void i915_ironlake_get_mem_freq(struct drm_device
*dev
)
150 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
153 ddrpll
= I915_READ16(DDRMPLL1
);
154 csipll
= I915_READ16(CSIPLL0
);
156 switch (ddrpll
& 0xff) {
158 dev_priv
->mem_freq
= 800;
161 dev_priv
->mem_freq
= 1066;
164 dev_priv
->mem_freq
= 1333;
167 dev_priv
->mem_freq
= 1600;
170 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
172 dev_priv
->mem_freq
= 0;
176 dev_priv
->ips
.r_t
= dev_priv
->mem_freq
;
178 switch (csipll
& 0x3ff) {
180 dev_priv
->fsb_freq
= 3200;
183 dev_priv
->fsb_freq
= 3733;
186 dev_priv
->fsb_freq
= 4266;
189 dev_priv
->fsb_freq
= 4800;
192 dev_priv
->fsb_freq
= 5333;
195 dev_priv
->fsb_freq
= 5866;
198 dev_priv
->fsb_freq
= 6400;
201 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
203 dev_priv
->fsb_freq
= 0;
207 if (dev_priv
->fsb_freq
== 3200) {
208 dev_priv
->ips
.c_m
= 0;
209 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
210 dev_priv
->ips
.c_m
= 1;
212 dev_priv
->ips
.c_m
= 2;
216 static const struct cxsr_latency cxsr_latency_table
[] = {
217 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
218 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
219 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
220 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
221 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
223 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
224 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
225 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
226 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
227 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
229 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
230 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
231 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
232 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
233 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
235 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
236 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
237 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
238 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
239 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
241 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
242 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
243 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
244 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
245 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
247 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
248 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
249 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
250 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
251 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
254 static const struct cxsr_latency
*intel_get_cxsr_latency(int is_desktop
,
259 const struct cxsr_latency
*latency
;
262 if (fsb
== 0 || mem
== 0)
265 for (i
= 0; i
< ARRAY_SIZE(cxsr_latency_table
); i
++) {
266 latency
= &cxsr_latency_table
[i
];
267 if (is_desktop
== latency
->is_desktop
&&
268 is_ddr3
== latency
->is_ddr3
&&
269 fsb
== latency
->fsb_freq
&& mem
== latency
->mem_freq
)
273 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
278 static void chv_set_memory_dvfs(struct drm_i915_private
*dev_priv
, bool enable
)
282 mutex_lock(&dev_priv
->rps
.hw_lock
);
284 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
286 val
&= ~FORCE_DDR_HIGH_FREQ
;
288 val
|= FORCE_DDR_HIGH_FREQ
;
289 val
&= ~FORCE_DDR_LOW_FREQ
;
290 val
|= FORCE_DDR_FREQ_REQ_ACK
;
291 vlv_punit_write(dev_priv
, PUNIT_REG_DDR_SETUP2
, val
);
293 if (wait_for((vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
) &
294 FORCE_DDR_FREQ_REQ_ACK
) == 0, 3))
295 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
297 mutex_unlock(&dev_priv
->rps
.hw_lock
);
300 static void chv_set_memory_pm5(struct drm_i915_private
*dev_priv
, bool enable
)
304 mutex_lock(&dev_priv
->rps
.hw_lock
);
306 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
);
308 val
|= DSP_MAXFIFO_PM5_ENABLE
;
310 val
&= ~DSP_MAXFIFO_PM5_ENABLE
;
311 vlv_punit_write(dev_priv
, PUNIT_REG_DSPFREQ
, val
);
313 mutex_unlock(&dev_priv
->rps
.hw_lock
);
316 #define FW_WM(value, plane) \
317 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
319 void intel_set_memory_cxsr(struct drm_i915_private
*dev_priv
, bool enable
)
321 struct drm_device
*dev
= dev_priv
->dev
;
324 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
)) {
325 I915_WRITE(FW_BLC_SELF_VLV
, enable
? FW_CSPWRDWNEN
: 0);
326 POSTING_READ(FW_BLC_SELF_VLV
);
327 dev_priv
->wm
.vlv
.cxsr
= enable
;
328 } else if (IS_G4X(dev
) || IS_CRESTLINE(dev
)) {
329 I915_WRITE(FW_BLC_SELF
, enable
? FW_BLC_SELF_EN
: 0);
330 POSTING_READ(FW_BLC_SELF
);
331 } else if (IS_PINEVIEW(dev
)) {
332 val
= I915_READ(DSPFW3
) & ~PINEVIEW_SELF_REFRESH_EN
;
333 val
|= enable
? PINEVIEW_SELF_REFRESH_EN
: 0;
334 I915_WRITE(DSPFW3
, val
);
335 POSTING_READ(DSPFW3
);
336 } else if (IS_I945G(dev
) || IS_I945GM(dev
)) {
337 val
= enable
? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN
) :
338 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN
);
339 I915_WRITE(FW_BLC_SELF
, val
);
340 POSTING_READ(FW_BLC_SELF
);
341 } else if (IS_I915GM(dev
)) {
342 val
= enable
? _MASKED_BIT_ENABLE(INSTPM_SELF_EN
) :
343 _MASKED_BIT_DISABLE(INSTPM_SELF_EN
);
344 I915_WRITE(INSTPM
, val
);
345 POSTING_READ(INSTPM
);
350 DRM_DEBUG_KMS("memory self-refresh is %s\n",
351 enable
? "enabled" : "disabled");
356 * Latency for FIFO fetches is dependent on several factors:
357 * - memory configuration (speed, channels)
359 * - current MCH state
360 * It can be fairly high in some situations, so here we assume a fairly
361 * pessimal value. It's a tradeoff between extra memory fetches (if we
362 * set this value too high, the FIFO will fetch frequently to stay full)
363 * and power consumption (set it too low to save power and we might see
364 * FIFO underruns and display "flicker").
366 * A value of 5us seems to be a good balance; safe for very low end
367 * platforms but not overly aggressive on lower latency configs.
369 static const int pessimal_latency_ns
= 5000;
371 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
372 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
374 static int vlv_get_fifo_size(struct drm_device
*dev
,
375 enum pipe pipe
, int plane
)
377 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
378 int sprite0_start
, sprite1_start
, size
;
381 uint32_t dsparb
, dsparb2
, dsparb3
;
383 dsparb
= I915_READ(DSPARB
);
384 dsparb2
= I915_READ(DSPARB2
);
385 sprite0_start
= VLV_FIFO_START(dsparb
, dsparb2
, 0, 0);
386 sprite1_start
= VLV_FIFO_START(dsparb
, dsparb2
, 8, 4);
389 dsparb
= I915_READ(DSPARB
);
390 dsparb2
= I915_READ(DSPARB2
);
391 sprite0_start
= VLV_FIFO_START(dsparb
, dsparb2
, 16, 8);
392 sprite1_start
= VLV_FIFO_START(dsparb
, dsparb2
, 24, 12);
395 dsparb2
= I915_READ(DSPARB2
);
396 dsparb3
= I915_READ(DSPARB3
);
397 sprite0_start
= VLV_FIFO_START(dsparb3
, dsparb2
, 0, 16);
398 sprite1_start
= VLV_FIFO_START(dsparb3
, dsparb2
, 8, 20);
406 size
= sprite0_start
;
409 size
= sprite1_start
- sprite0_start
;
412 size
= 512 - 1 - sprite1_start
;
418 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
419 pipe_name(pipe
), plane
== 0 ? "primary" : "sprite",
420 plane
== 0 ? plane_name(pipe
) : sprite_name(pipe
, plane
- 1),
426 static int i9xx_get_fifo_size(struct drm_device
*dev
, int plane
)
428 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
429 uint32_t dsparb
= I915_READ(DSPARB
);
432 size
= dsparb
& 0x7f;
434 size
= ((dsparb
>> DSPARB_CSTART_SHIFT
) & 0x7f) - size
;
436 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
437 plane
? "B" : "A", size
);
442 static int i830_get_fifo_size(struct drm_device
*dev
, int plane
)
444 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
445 uint32_t dsparb
= I915_READ(DSPARB
);
448 size
= dsparb
& 0x1ff;
450 size
= ((dsparb
>> DSPARB_BEND_SHIFT
) & 0x1ff) - size
;
451 size
>>= 1; /* Convert to cachelines */
453 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
454 plane
? "B" : "A", size
);
459 static int i845_get_fifo_size(struct drm_device
*dev
, int plane
)
461 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
462 uint32_t dsparb
= I915_READ(DSPARB
);
465 size
= dsparb
& 0x7f;
466 size
>>= 2; /* Convert to cachelines */
468 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
475 /* Pineview has different values for various configs */
476 static const struct intel_watermark_params pineview_display_wm
= {
477 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
478 .max_wm
= PINEVIEW_MAX_WM
,
479 .default_wm
= PINEVIEW_DFT_WM
,
480 .guard_size
= PINEVIEW_GUARD_WM
,
481 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
483 static const struct intel_watermark_params pineview_display_hplloff_wm
= {
484 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
485 .max_wm
= PINEVIEW_MAX_WM
,
486 .default_wm
= PINEVIEW_DFT_HPLLOFF_WM
,
487 .guard_size
= PINEVIEW_GUARD_WM
,
488 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
490 static const struct intel_watermark_params pineview_cursor_wm
= {
491 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
492 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
493 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
494 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
495 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
497 static const struct intel_watermark_params pineview_cursor_hplloff_wm
= {
498 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
499 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
500 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
501 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
502 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
504 static const struct intel_watermark_params g4x_wm_info
= {
505 .fifo_size
= G4X_FIFO_SIZE
,
506 .max_wm
= G4X_MAX_WM
,
507 .default_wm
= G4X_MAX_WM
,
509 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
511 static const struct intel_watermark_params g4x_cursor_wm_info
= {
512 .fifo_size
= I965_CURSOR_FIFO
,
513 .max_wm
= I965_CURSOR_MAX_WM
,
514 .default_wm
= I965_CURSOR_DFT_WM
,
516 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
518 static const struct intel_watermark_params i965_cursor_wm_info
= {
519 .fifo_size
= I965_CURSOR_FIFO
,
520 .max_wm
= I965_CURSOR_MAX_WM
,
521 .default_wm
= I965_CURSOR_DFT_WM
,
523 .cacheline_size
= I915_FIFO_LINE_SIZE
,
525 static const struct intel_watermark_params i945_wm_info
= {
526 .fifo_size
= I945_FIFO_SIZE
,
527 .max_wm
= I915_MAX_WM
,
530 .cacheline_size
= I915_FIFO_LINE_SIZE
,
532 static const struct intel_watermark_params i915_wm_info
= {
533 .fifo_size
= I915_FIFO_SIZE
,
534 .max_wm
= I915_MAX_WM
,
537 .cacheline_size
= I915_FIFO_LINE_SIZE
,
539 static const struct intel_watermark_params i830_a_wm_info
= {
540 .fifo_size
= I855GM_FIFO_SIZE
,
541 .max_wm
= I915_MAX_WM
,
544 .cacheline_size
= I830_FIFO_LINE_SIZE
,
546 static const struct intel_watermark_params i830_bc_wm_info
= {
547 .fifo_size
= I855GM_FIFO_SIZE
,
548 .max_wm
= I915_MAX_WM
/2,
551 .cacheline_size
= I830_FIFO_LINE_SIZE
,
553 static const struct intel_watermark_params i845_wm_info
= {
554 .fifo_size
= I830_FIFO_SIZE
,
555 .max_wm
= I915_MAX_WM
,
558 .cacheline_size
= I830_FIFO_LINE_SIZE
,
562 * intel_calculate_wm - calculate watermark level
563 * @clock_in_khz: pixel clock
564 * @wm: chip FIFO params
565 * @cpp: bytes per pixel
566 * @latency_ns: memory latency for the platform
568 * Calculate the watermark level (the level at which the display plane will
569 * start fetching from memory again). Each chip has a different display
570 * FIFO size and allocation, so the caller needs to figure that out and pass
571 * in the correct intel_watermark_params structure.
573 * As the pixel clock runs, the FIFO will be drained at a rate that depends
574 * on the pixel size. When it reaches the watermark level, it'll start
575 * fetching FIFO line sized based chunks from memory until the FIFO fills
576 * past the watermark point. If the FIFO drains completely, a FIFO underrun
577 * will occur, and a display engine hang could result.
579 static unsigned long intel_calculate_wm(unsigned long clock_in_khz
,
580 const struct intel_watermark_params
*wm
,
581 int fifo_size
, int cpp
,
582 unsigned long latency_ns
)
584 long entries_required
, wm_size
;
587 * Note: we need to make sure we don't overflow for various clock &
589 * clocks go from a few thousand to several hundred thousand.
590 * latency is usually a few thousand
592 entries_required
= ((clock_in_khz
/ 1000) * cpp
* latency_ns
) /
594 entries_required
= DIV_ROUND_UP(entries_required
, wm
->cacheline_size
);
596 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required
);
598 wm_size
= fifo_size
- (entries_required
+ wm
->guard_size
);
600 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size
);
602 /* Don't promote wm_size to unsigned... */
603 if (wm_size
> (long)wm
->max_wm
)
604 wm_size
= wm
->max_wm
;
606 wm_size
= wm
->default_wm
;
609 * Bspec seems to indicate that the value shouldn't be lower than
610 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
611 * Lets go for 8 which is the burst size since certain platforms
612 * already use a hardcoded 8 (which is what the spec says should be
621 static struct drm_crtc
*single_enabled_crtc(struct drm_device
*dev
)
623 struct drm_crtc
*crtc
, *enabled
= NULL
;
625 for_each_crtc(dev
, crtc
) {
626 if (intel_crtc_active(crtc
)) {
636 static void pineview_update_wm(struct drm_crtc
*unused_crtc
)
638 struct drm_device
*dev
= unused_crtc
->dev
;
639 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
640 struct drm_crtc
*crtc
;
641 const struct cxsr_latency
*latency
;
645 latency
= intel_get_cxsr_latency(IS_PINEVIEW_G(dev
), dev_priv
->is_ddr3
,
646 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
648 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
649 intel_set_memory_cxsr(dev_priv
, false);
653 crtc
= single_enabled_crtc(dev
);
655 const struct drm_display_mode
*adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
656 int cpp
= drm_format_plane_cpp(crtc
->primary
->state
->fb
->pixel_format
, 0);
657 int clock
= adjusted_mode
->crtc_clock
;
660 wm
= intel_calculate_wm(clock
, &pineview_display_wm
,
661 pineview_display_wm
.fifo_size
,
662 cpp
, latency
->display_sr
);
663 reg
= I915_READ(DSPFW1
);
664 reg
&= ~DSPFW_SR_MASK
;
665 reg
|= FW_WM(wm
, SR
);
666 I915_WRITE(DSPFW1
, reg
);
667 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg
);
670 wm
= intel_calculate_wm(clock
, &pineview_cursor_wm
,
671 pineview_display_wm
.fifo_size
,
672 cpp
, latency
->cursor_sr
);
673 reg
= I915_READ(DSPFW3
);
674 reg
&= ~DSPFW_CURSOR_SR_MASK
;
675 reg
|= FW_WM(wm
, CURSOR_SR
);
676 I915_WRITE(DSPFW3
, reg
);
678 /* Display HPLL off SR */
679 wm
= intel_calculate_wm(clock
, &pineview_display_hplloff_wm
,
680 pineview_display_hplloff_wm
.fifo_size
,
681 cpp
, latency
->display_hpll_disable
);
682 reg
= I915_READ(DSPFW3
);
683 reg
&= ~DSPFW_HPLL_SR_MASK
;
684 reg
|= FW_WM(wm
, HPLL_SR
);
685 I915_WRITE(DSPFW3
, reg
);
687 /* cursor HPLL off SR */
688 wm
= intel_calculate_wm(clock
, &pineview_cursor_hplloff_wm
,
689 pineview_display_hplloff_wm
.fifo_size
,
690 cpp
, latency
->cursor_hpll_disable
);
691 reg
= I915_READ(DSPFW3
);
692 reg
&= ~DSPFW_HPLL_CURSOR_MASK
;
693 reg
|= FW_WM(wm
, HPLL_CURSOR
);
694 I915_WRITE(DSPFW3
, reg
);
695 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg
);
697 intel_set_memory_cxsr(dev_priv
, true);
699 intel_set_memory_cxsr(dev_priv
, false);
703 static bool g4x_compute_wm0(struct drm_device
*dev
,
705 const struct intel_watermark_params
*display
,
706 int display_latency_ns
,
707 const struct intel_watermark_params
*cursor
,
708 int cursor_latency_ns
,
712 struct drm_crtc
*crtc
;
713 const struct drm_display_mode
*adjusted_mode
;
714 int htotal
, hdisplay
, clock
, cpp
;
715 int line_time_us
, line_count
;
716 int entries
, tlb_miss
;
718 crtc
= intel_get_crtc_for_plane(dev
, plane
);
719 if (!intel_crtc_active(crtc
)) {
720 *cursor_wm
= cursor
->guard_size
;
721 *plane_wm
= display
->guard_size
;
725 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
726 clock
= adjusted_mode
->crtc_clock
;
727 htotal
= adjusted_mode
->crtc_htotal
;
728 hdisplay
= to_intel_crtc(crtc
)->config
->pipe_src_w
;
729 cpp
= drm_format_plane_cpp(crtc
->primary
->state
->fb
->pixel_format
, 0);
731 /* Use the small buffer method to calculate plane watermark */
732 entries
= ((clock
* cpp
/ 1000) * display_latency_ns
) / 1000;
733 tlb_miss
= display
->fifo_size
*display
->cacheline_size
- hdisplay
* 8;
736 entries
= DIV_ROUND_UP(entries
, display
->cacheline_size
);
737 *plane_wm
= entries
+ display
->guard_size
;
738 if (*plane_wm
> (int)display
->max_wm
)
739 *plane_wm
= display
->max_wm
;
741 /* Use the large buffer method to calculate cursor watermark */
742 line_time_us
= max(htotal
* 1000 / clock
, 1);
743 line_count
= (cursor_latency_ns
/ line_time_us
+ 1000) / 1000;
744 entries
= line_count
* crtc
->cursor
->state
->crtc_w
* cpp
;
745 tlb_miss
= cursor
->fifo_size
*cursor
->cacheline_size
- hdisplay
* 8;
748 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
749 *cursor_wm
= entries
+ cursor
->guard_size
;
750 if (*cursor_wm
> (int)cursor
->max_wm
)
751 *cursor_wm
= (int)cursor
->max_wm
;
757 * Check the wm result.
759 * If any calculated watermark values is larger than the maximum value that
760 * can be programmed into the associated watermark register, that watermark
763 static bool g4x_check_srwm(struct drm_device
*dev
,
764 int display_wm
, int cursor_wm
,
765 const struct intel_watermark_params
*display
,
766 const struct intel_watermark_params
*cursor
)
768 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
769 display_wm
, cursor_wm
);
771 if (display_wm
> display
->max_wm
) {
772 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
773 display_wm
, display
->max_wm
);
777 if (cursor_wm
> cursor
->max_wm
) {
778 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
779 cursor_wm
, cursor
->max_wm
);
783 if (!(display_wm
|| cursor_wm
)) {
784 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
791 static bool g4x_compute_srwm(struct drm_device
*dev
,
794 const struct intel_watermark_params
*display
,
795 const struct intel_watermark_params
*cursor
,
796 int *display_wm
, int *cursor_wm
)
798 struct drm_crtc
*crtc
;
799 const struct drm_display_mode
*adjusted_mode
;
800 int hdisplay
, htotal
, cpp
, clock
;
801 unsigned long line_time_us
;
802 int line_count
, line_size
;
807 *display_wm
= *cursor_wm
= 0;
811 crtc
= intel_get_crtc_for_plane(dev
, plane
);
812 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
813 clock
= adjusted_mode
->crtc_clock
;
814 htotal
= adjusted_mode
->crtc_htotal
;
815 hdisplay
= to_intel_crtc(crtc
)->config
->pipe_src_w
;
816 cpp
= drm_format_plane_cpp(crtc
->primary
->state
->fb
->pixel_format
, 0);
818 line_time_us
= max(htotal
* 1000 / clock
, 1);
819 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
820 line_size
= hdisplay
* cpp
;
822 /* Use the minimum of the small and large buffer method for primary */
823 small
= ((clock
* cpp
/ 1000) * latency_ns
) / 1000;
824 large
= line_count
* line_size
;
826 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
827 *display_wm
= entries
+ display
->guard_size
;
829 /* calculate the self-refresh watermark for display cursor */
830 entries
= line_count
* cpp
* crtc
->cursor
->state
->crtc_w
;
831 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
832 *cursor_wm
= entries
+ cursor
->guard_size
;
834 return g4x_check_srwm(dev
,
835 *display_wm
, *cursor_wm
,
839 #define FW_WM_VLV(value, plane) \
840 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
842 static void vlv_write_wm_values(struct intel_crtc
*crtc
,
843 const struct vlv_wm_values
*wm
)
845 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
846 enum pipe pipe
= crtc
->pipe
;
848 I915_WRITE(VLV_DDL(pipe
),
849 (wm
->ddl
[pipe
].cursor
<< DDL_CURSOR_SHIFT
) |
850 (wm
->ddl
[pipe
].sprite
[1] << DDL_SPRITE_SHIFT(1)) |
851 (wm
->ddl
[pipe
].sprite
[0] << DDL_SPRITE_SHIFT(0)) |
852 (wm
->ddl
[pipe
].primary
<< DDL_PLANE_SHIFT
));
855 FW_WM(wm
->sr
.plane
, SR
) |
856 FW_WM(wm
->pipe
[PIPE_B
].cursor
, CURSORB
) |
857 FW_WM_VLV(wm
->pipe
[PIPE_B
].primary
, PLANEB
) |
858 FW_WM_VLV(wm
->pipe
[PIPE_A
].primary
, PLANEA
));
860 FW_WM_VLV(wm
->pipe
[PIPE_A
].sprite
[1], SPRITEB
) |
861 FW_WM(wm
->pipe
[PIPE_A
].cursor
, CURSORA
) |
862 FW_WM_VLV(wm
->pipe
[PIPE_A
].sprite
[0], SPRITEA
));
864 FW_WM(wm
->sr
.cursor
, CURSOR_SR
));
866 if (IS_CHERRYVIEW(dev_priv
)) {
867 I915_WRITE(DSPFW7_CHV
,
868 FW_WM_VLV(wm
->pipe
[PIPE_B
].sprite
[1], SPRITED
) |
869 FW_WM_VLV(wm
->pipe
[PIPE_B
].sprite
[0], SPRITEC
));
870 I915_WRITE(DSPFW8_CHV
,
871 FW_WM_VLV(wm
->pipe
[PIPE_C
].sprite
[1], SPRITEF
) |
872 FW_WM_VLV(wm
->pipe
[PIPE_C
].sprite
[0], SPRITEE
));
873 I915_WRITE(DSPFW9_CHV
,
874 FW_WM_VLV(wm
->pipe
[PIPE_C
].primary
, PLANEC
) |
875 FW_WM(wm
->pipe
[PIPE_C
].cursor
, CURSORC
));
877 FW_WM(wm
->sr
.plane
>> 9, SR_HI
) |
878 FW_WM(wm
->pipe
[PIPE_C
].sprite
[1] >> 8, SPRITEF_HI
) |
879 FW_WM(wm
->pipe
[PIPE_C
].sprite
[0] >> 8, SPRITEE_HI
) |
880 FW_WM(wm
->pipe
[PIPE_C
].primary
>> 8, PLANEC_HI
) |
881 FW_WM(wm
->pipe
[PIPE_B
].sprite
[1] >> 8, SPRITED_HI
) |
882 FW_WM(wm
->pipe
[PIPE_B
].sprite
[0] >> 8, SPRITEC_HI
) |
883 FW_WM(wm
->pipe
[PIPE_B
].primary
>> 8, PLANEB_HI
) |
884 FW_WM(wm
->pipe
[PIPE_A
].sprite
[1] >> 8, SPRITEB_HI
) |
885 FW_WM(wm
->pipe
[PIPE_A
].sprite
[0] >> 8, SPRITEA_HI
) |
886 FW_WM(wm
->pipe
[PIPE_A
].primary
>> 8, PLANEA_HI
));
889 FW_WM_VLV(wm
->pipe
[PIPE_B
].sprite
[1], SPRITED
) |
890 FW_WM_VLV(wm
->pipe
[PIPE_B
].sprite
[0], SPRITEC
));
892 FW_WM(wm
->sr
.plane
>> 9, SR_HI
) |
893 FW_WM(wm
->pipe
[PIPE_B
].sprite
[1] >> 8, SPRITED_HI
) |
894 FW_WM(wm
->pipe
[PIPE_B
].sprite
[0] >> 8, SPRITEC_HI
) |
895 FW_WM(wm
->pipe
[PIPE_B
].primary
>> 8, PLANEB_HI
) |
896 FW_WM(wm
->pipe
[PIPE_A
].sprite
[1] >> 8, SPRITEB_HI
) |
897 FW_WM(wm
->pipe
[PIPE_A
].sprite
[0] >> 8, SPRITEA_HI
) |
898 FW_WM(wm
->pipe
[PIPE_A
].primary
>> 8, PLANEA_HI
));
901 /* zero (unused) WM1 watermarks */
902 I915_WRITE(DSPFW4
, 0);
903 I915_WRITE(DSPFW5
, 0);
904 I915_WRITE(DSPFW6
, 0);
905 I915_WRITE(DSPHOWM1
, 0);
907 POSTING_READ(DSPFW1
);
915 VLV_WM_LEVEL_DDR_DVFS
,
918 /* latency must be in 0.1us units. */
919 static unsigned int vlv_wm_method2(unsigned int pixel_rate
,
920 unsigned int pipe_htotal
,
921 unsigned int horiz_pixels
,
923 unsigned int latency
)
927 ret
= (latency
* pixel_rate
) / (pipe_htotal
* 10000);
928 ret
= (ret
+ 1) * horiz_pixels
* cpp
;
929 ret
= DIV_ROUND_UP(ret
, 64);
934 static void vlv_setup_wm_latency(struct drm_device
*dev
)
936 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
938 /* all latencies in usec */
939 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_PM2
] = 3;
941 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_PM2
;
943 if (IS_CHERRYVIEW(dev_priv
)) {
944 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_PM5
] = 12;
945 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_DDR_DVFS
] = 33;
947 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_DDR_DVFS
;
951 static uint16_t vlv_compute_wm_level(struct intel_plane
*plane
,
952 struct intel_crtc
*crtc
,
953 const struct intel_plane_state
*state
,
956 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
957 int clock
, htotal
, cpp
, width
, wm
;
959 if (dev_priv
->wm
.pri_latency
[level
] == 0)
965 cpp
= drm_format_plane_cpp(state
->base
.fb
->pixel_format
, 0);
966 clock
= crtc
->config
->base
.adjusted_mode
.crtc_clock
;
967 htotal
= crtc
->config
->base
.adjusted_mode
.crtc_htotal
;
968 width
= crtc
->config
->pipe_src_w
;
969 if (WARN_ON(htotal
== 0))
972 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
) {
974 * FIXME the formula gives values that are
975 * too big for the cursor FIFO, and hence we
976 * would never be able to use cursors. For
977 * now just hardcode the watermark.
981 wm
= vlv_wm_method2(clock
, htotal
, width
, cpp
,
982 dev_priv
->wm
.pri_latency
[level
] * 10);
985 return min_t(int, wm
, USHRT_MAX
);
988 static void vlv_compute_fifo(struct intel_crtc
*crtc
)
990 struct drm_device
*dev
= crtc
->base
.dev
;
991 struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
992 struct intel_plane
*plane
;
993 unsigned int total_rate
= 0;
994 const int fifo_size
= 512 - 1;
995 int fifo_extra
, fifo_left
= fifo_size
;
997 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
998 struct intel_plane_state
*state
=
999 to_intel_plane_state(plane
->base
.state
);
1001 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
)
1004 if (state
->visible
) {
1005 wm_state
->num_active_planes
++;
1006 total_rate
+= drm_format_plane_cpp(state
->base
.fb
->pixel_format
, 0);
1010 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1011 struct intel_plane_state
*state
=
1012 to_intel_plane_state(plane
->base
.state
);
1015 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
) {
1016 plane
->wm
.fifo_size
= 63;
1020 if (!state
->visible
) {
1021 plane
->wm
.fifo_size
= 0;
1025 rate
= drm_format_plane_cpp(state
->base
.fb
->pixel_format
, 0);
1026 plane
->wm
.fifo_size
= fifo_size
* rate
/ total_rate
;
1027 fifo_left
-= plane
->wm
.fifo_size
;
1030 fifo_extra
= DIV_ROUND_UP(fifo_left
, wm_state
->num_active_planes
?: 1);
1032 /* spread the remainder evenly */
1033 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1039 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
)
1042 /* give it all to the first plane if none are active */
1043 if (plane
->wm
.fifo_size
== 0 &&
1044 wm_state
->num_active_planes
)
1047 plane_extra
= min(fifo_extra
, fifo_left
);
1048 plane
->wm
.fifo_size
+= plane_extra
;
1049 fifo_left
-= plane_extra
;
1052 WARN_ON(fifo_left
!= 0);
1055 static void vlv_invert_wms(struct intel_crtc
*crtc
)
1057 struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
1060 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
1061 struct drm_device
*dev
= crtc
->base
.dev
;
1062 const int sr_fifo_size
= INTEL_INFO(dev
)->num_pipes
* 512 - 1;
1063 struct intel_plane
*plane
;
1065 wm_state
->sr
[level
].plane
= sr_fifo_size
- wm_state
->sr
[level
].plane
;
1066 wm_state
->sr
[level
].cursor
= 63 - wm_state
->sr
[level
].cursor
;
1068 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1069 switch (plane
->base
.type
) {
1071 case DRM_PLANE_TYPE_CURSOR
:
1072 wm_state
->wm
[level
].cursor
= plane
->wm
.fifo_size
-
1073 wm_state
->wm
[level
].cursor
;
1075 case DRM_PLANE_TYPE_PRIMARY
:
1076 wm_state
->wm
[level
].primary
= plane
->wm
.fifo_size
-
1077 wm_state
->wm
[level
].primary
;
1079 case DRM_PLANE_TYPE_OVERLAY
:
1080 sprite
= plane
->plane
;
1081 wm_state
->wm
[level
].sprite
[sprite
] = plane
->wm
.fifo_size
-
1082 wm_state
->wm
[level
].sprite
[sprite
];
1089 static void vlv_compute_wm(struct intel_crtc
*crtc
)
1091 struct drm_device
*dev
= crtc
->base
.dev
;
1092 struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
1093 struct intel_plane
*plane
;
1094 int sr_fifo_size
= INTEL_INFO(dev
)->num_pipes
* 512 - 1;
1097 memset(wm_state
, 0, sizeof(*wm_state
));
1099 wm_state
->cxsr
= crtc
->pipe
!= PIPE_C
&& crtc
->wm
.cxsr_allowed
;
1100 wm_state
->num_levels
= to_i915(dev
)->wm
.max_level
+ 1;
1102 wm_state
->num_active_planes
= 0;
1104 vlv_compute_fifo(crtc
);
1106 if (wm_state
->num_active_planes
!= 1)
1107 wm_state
->cxsr
= false;
1109 if (wm_state
->cxsr
) {
1110 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
1111 wm_state
->sr
[level
].plane
= sr_fifo_size
;
1112 wm_state
->sr
[level
].cursor
= 63;
1116 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1117 struct intel_plane_state
*state
=
1118 to_intel_plane_state(plane
->base
.state
);
1120 if (!state
->visible
)
1123 /* normal watermarks */
1124 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
1125 int wm
= vlv_compute_wm_level(plane
, crtc
, state
, level
);
1126 int max_wm
= plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
? 63 : 511;
1129 if (WARN_ON(level
== 0 && wm
> max_wm
))
1132 if (wm
> plane
->wm
.fifo_size
)
1135 switch (plane
->base
.type
) {
1137 case DRM_PLANE_TYPE_CURSOR
:
1138 wm_state
->wm
[level
].cursor
= wm
;
1140 case DRM_PLANE_TYPE_PRIMARY
:
1141 wm_state
->wm
[level
].primary
= wm
;
1143 case DRM_PLANE_TYPE_OVERLAY
:
1144 sprite
= plane
->plane
;
1145 wm_state
->wm
[level
].sprite
[sprite
] = wm
;
1150 wm_state
->num_levels
= level
;
1152 if (!wm_state
->cxsr
)
1155 /* maxfifo watermarks */
1156 switch (plane
->base
.type
) {
1158 case DRM_PLANE_TYPE_CURSOR
:
1159 for (level
= 0; level
< wm_state
->num_levels
; level
++)
1160 wm_state
->sr
[level
].cursor
=
1161 wm_state
->wm
[level
].cursor
;
1163 case DRM_PLANE_TYPE_PRIMARY
:
1164 for (level
= 0; level
< wm_state
->num_levels
; level
++)
1165 wm_state
->sr
[level
].plane
=
1166 min(wm_state
->sr
[level
].plane
,
1167 wm_state
->wm
[level
].primary
);
1169 case DRM_PLANE_TYPE_OVERLAY
:
1170 sprite
= plane
->plane
;
1171 for (level
= 0; level
< wm_state
->num_levels
; level
++)
1172 wm_state
->sr
[level
].plane
=
1173 min(wm_state
->sr
[level
].plane
,
1174 wm_state
->wm
[level
].sprite
[sprite
]);
1179 /* clear any (partially) filled invalid levels */
1180 for (level
= wm_state
->num_levels
; level
< to_i915(dev
)->wm
.max_level
+ 1; level
++) {
1181 memset(&wm_state
->wm
[level
], 0, sizeof(wm_state
->wm
[level
]));
1182 memset(&wm_state
->sr
[level
], 0, sizeof(wm_state
->sr
[level
]));
1185 vlv_invert_wms(crtc
);
1188 #define VLV_FIFO(plane, value) \
1189 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1191 static void vlv_pipe_set_fifo_size(struct intel_crtc
*crtc
)
1193 struct drm_device
*dev
= crtc
->base
.dev
;
1194 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1195 struct intel_plane
*plane
;
1196 int sprite0_start
= 0, sprite1_start
= 0, fifo_size
= 0;
1198 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1199 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
) {
1200 WARN_ON(plane
->wm
.fifo_size
!= 63);
1204 if (plane
->base
.type
== DRM_PLANE_TYPE_PRIMARY
)
1205 sprite0_start
= plane
->wm
.fifo_size
;
1206 else if (plane
->plane
== 0)
1207 sprite1_start
= sprite0_start
+ plane
->wm
.fifo_size
;
1209 fifo_size
= sprite1_start
+ plane
->wm
.fifo_size
;
1212 WARN_ON(fifo_size
!= 512 - 1);
1214 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1215 pipe_name(crtc
->pipe
), sprite0_start
,
1216 sprite1_start
, fifo_size
);
1218 switch (crtc
->pipe
) {
1219 uint32_t dsparb
, dsparb2
, dsparb3
;
1221 dsparb
= I915_READ(DSPARB
);
1222 dsparb2
= I915_READ(DSPARB2
);
1224 dsparb
&= ~(VLV_FIFO(SPRITEA
, 0xff) |
1225 VLV_FIFO(SPRITEB
, 0xff));
1226 dsparb
|= (VLV_FIFO(SPRITEA
, sprite0_start
) |
1227 VLV_FIFO(SPRITEB
, sprite1_start
));
1229 dsparb2
&= ~(VLV_FIFO(SPRITEA_HI
, 0x1) |
1230 VLV_FIFO(SPRITEB_HI
, 0x1));
1231 dsparb2
|= (VLV_FIFO(SPRITEA_HI
, sprite0_start
>> 8) |
1232 VLV_FIFO(SPRITEB_HI
, sprite1_start
>> 8));
1234 I915_WRITE(DSPARB
, dsparb
);
1235 I915_WRITE(DSPARB2
, dsparb2
);
1238 dsparb
= I915_READ(DSPARB
);
1239 dsparb2
= I915_READ(DSPARB2
);
1241 dsparb
&= ~(VLV_FIFO(SPRITEC
, 0xff) |
1242 VLV_FIFO(SPRITED
, 0xff));
1243 dsparb
|= (VLV_FIFO(SPRITEC
, sprite0_start
) |
1244 VLV_FIFO(SPRITED
, sprite1_start
));
1246 dsparb2
&= ~(VLV_FIFO(SPRITEC_HI
, 0xff) |
1247 VLV_FIFO(SPRITED_HI
, 0xff));
1248 dsparb2
|= (VLV_FIFO(SPRITEC_HI
, sprite0_start
>> 8) |
1249 VLV_FIFO(SPRITED_HI
, sprite1_start
>> 8));
1251 I915_WRITE(DSPARB
, dsparb
);
1252 I915_WRITE(DSPARB2
, dsparb2
);
1255 dsparb3
= I915_READ(DSPARB3
);
1256 dsparb2
= I915_READ(DSPARB2
);
1258 dsparb3
&= ~(VLV_FIFO(SPRITEE
, 0xff) |
1259 VLV_FIFO(SPRITEF
, 0xff));
1260 dsparb3
|= (VLV_FIFO(SPRITEE
, sprite0_start
) |
1261 VLV_FIFO(SPRITEF
, sprite1_start
));
1263 dsparb2
&= ~(VLV_FIFO(SPRITEE_HI
, 0xff) |
1264 VLV_FIFO(SPRITEF_HI
, 0xff));
1265 dsparb2
|= (VLV_FIFO(SPRITEE_HI
, sprite0_start
>> 8) |
1266 VLV_FIFO(SPRITEF_HI
, sprite1_start
>> 8));
1268 I915_WRITE(DSPARB3
, dsparb3
);
1269 I915_WRITE(DSPARB2
, dsparb2
);
1278 static void vlv_merge_wm(struct drm_device
*dev
,
1279 struct vlv_wm_values
*wm
)
1281 struct intel_crtc
*crtc
;
1282 int num_active_crtcs
= 0;
1284 wm
->level
= to_i915(dev
)->wm
.max_level
;
1287 for_each_intel_crtc(dev
, crtc
) {
1288 const struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
1293 if (!wm_state
->cxsr
)
1297 wm
->level
= min_t(int, wm
->level
, wm_state
->num_levels
- 1);
1300 if (num_active_crtcs
!= 1)
1303 if (num_active_crtcs
> 1)
1304 wm
->level
= VLV_WM_LEVEL_PM2
;
1306 for_each_intel_crtc(dev
, crtc
) {
1307 struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
1308 enum pipe pipe
= crtc
->pipe
;
1313 wm
->pipe
[pipe
] = wm_state
->wm
[wm
->level
];
1315 wm
->sr
= wm_state
->sr
[wm
->level
];
1317 wm
->ddl
[pipe
].primary
= DDL_PRECISION_HIGH
| 2;
1318 wm
->ddl
[pipe
].sprite
[0] = DDL_PRECISION_HIGH
| 2;
1319 wm
->ddl
[pipe
].sprite
[1] = DDL_PRECISION_HIGH
| 2;
1320 wm
->ddl
[pipe
].cursor
= DDL_PRECISION_HIGH
| 2;
1324 static void vlv_update_wm(struct drm_crtc
*crtc
)
1326 struct drm_device
*dev
= crtc
->dev
;
1327 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1328 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1329 enum pipe pipe
= intel_crtc
->pipe
;
1330 struct vlv_wm_values wm
= {};
1332 vlv_compute_wm(intel_crtc
);
1333 vlv_merge_wm(dev
, &wm
);
1335 if (memcmp(&dev_priv
->wm
.vlv
, &wm
, sizeof(wm
)) == 0) {
1336 /* FIXME should be part of crtc atomic commit */
1337 vlv_pipe_set_fifo_size(intel_crtc
);
1341 if (wm
.level
< VLV_WM_LEVEL_DDR_DVFS
&&
1342 dev_priv
->wm
.vlv
.level
>= VLV_WM_LEVEL_DDR_DVFS
)
1343 chv_set_memory_dvfs(dev_priv
, false);
1345 if (wm
.level
< VLV_WM_LEVEL_PM5
&&
1346 dev_priv
->wm
.vlv
.level
>= VLV_WM_LEVEL_PM5
)
1347 chv_set_memory_pm5(dev_priv
, false);
1349 if (!wm
.cxsr
&& dev_priv
->wm
.vlv
.cxsr
)
1350 intel_set_memory_cxsr(dev_priv
, false);
1352 /* FIXME should be part of crtc atomic commit */
1353 vlv_pipe_set_fifo_size(intel_crtc
);
1355 vlv_write_wm_values(intel_crtc
, &wm
);
1357 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1358 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1359 pipe_name(pipe
), wm
.pipe
[pipe
].primary
, wm
.pipe
[pipe
].cursor
,
1360 wm
.pipe
[pipe
].sprite
[0], wm
.pipe
[pipe
].sprite
[1],
1361 wm
.sr
.plane
, wm
.sr
.cursor
, wm
.level
, wm
.cxsr
);
1363 if (wm
.cxsr
&& !dev_priv
->wm
.vlv
.cxsr
)
1364 intel_set_memory_cxsr(dev_priv
, true);
1366 if (wm
.level
>= VLV_WM_LEVEL_PM5
&&
1367 dev_priv
->wm
.vlv
.level
< VLV_WM_LEVEL_PM5
)
1368 chv_set_memory_pm5(dev_priv
, true);
1370 if (wm
.level
>= VLV_WM_LEVEL_DDR_DVFS
&&
1371 dev_priv
->wm
.vlv
.level
< VLV_WM_LEVEL_DDR_DVFS
)
1372 chv_set_memory_dvfs(dev_priv
, true);
1374 dev_priv
->wm
.vlv
= wm
;
1377 #define single_plane_enabled(mask) is_power_of_2(mask)
1379 static void g4x_update_wm(struct drm_crtc
*crtc
)
1381 struct drm_device
*dev
= crtc
->dev
;
1382 static const int sr_latency_ns
= 12000;
1383 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1384 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1385 int plane_sr
, cursor_sr
;
1386 unsigned int enabled
= 0;
1389 if (g4x_compute_wm0(dev
, PIPE_A
,
1390 &g4x_wm_info
, pessimal_latency_ns
,
1391 &g4x_cursor_wm_info
, pessimal_latency_ns
,
1392 &planea_wm
, &cursora_wm
))
1393 enabled
|= 1 << PIPE_A
;
1395 if (g4x_compute_wm0(dev
, PIPE_B
,
1396 &g4x_wm_info
, pessimal_latency_ns
,
1397 &g4x_cursor_wm_info
, pessimal_latency_ns
,
1398 &planeb_wm
, &cursorb_wm
))
1399 enabled
|= 1 << PIPE_B
;
1401 if (single_plane_enabled(enabled
) &&
1402 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1405 &g4x_cursor_wm_info
,
1406 &plane_sr
, &cursor_sr
)) {
1407 cxsr_enabled
= true;
1409 cxsr_enabled
= false;
1410 intel_set_memory_cxsr(dev_priv
, false);
1411 plane_sr
= cursor_sr
= 0;
1414 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1415 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1416 planea_wm
, cursora_wm
,
1417 planeb_wm
, cursorb_wm
,
1418 plane_sr
, cursor_sr
);
1421 FW_WM(plane_sr
, SR
) |
1422 FW_WM(cursorb_wm
, CURSORB
) |
1423 FW_WM(planeb_wm
, PLANEB
) |
1424 FW_WM(planea_wm
, PLANEA
));
1426 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1427 FW_WM(cursora_wm
, CURSORA
));
1428 /* HPLL off in SR has some issues on G4x... disable it */
1430 (I915_READ(DSPFW3
) & ~(DSPFW_HPLL_SR_EN
| DSPFW_CURSOR_SR_MASK
)) |
1431 FW_WM(cursor_sr
, CURSOR_SR
));
1434 intel_set_memory_cxsr(dev_priv
, true);
1437 static void i965_update_wm(struct drm_crtc
*unused_crtc
)
1439 struct drm_device
*dev
= unused_crtc
->dev
;
1440 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1441 struct drm_crtc
*crtc
;
1446 /* Calc sr entries for one plane configs */
1447 crtc
= single_enabled_crtc(dev
);
1449 /* self-refresh has much higher latency */
1450 static const int sr_latency_ns
= 12000;
1451 const struct drm_display_mode
*adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
1452 int clock
= adjusted_mode
->crtc_clock
;
1453 int htotal
= adjusted_mode
->crtc_htotal
;
1454 int hdisplay
= to_intel_crtc(crtc
)->config
->pipe_src_w
;
1455 int cpp
= drm_format_plane_cpp(crtc
->primary
->state
->fb
->pixel_format
, 0);
1456 unsigned long line_time_us
;
1459 line_time_us
= max(htotal
* 1000 / clock
, 1);
1461 /* Use ns/us then divide to preserve precision */
1462 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1464 entries
= DIV_ROUND_UP(entries
, I915_FIFO_LINE_SIZE
);
1465 srwm
= I965_FIFO_SIZE
- entries
;
1469 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1472 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1473 cpp
* crtc
->cursor
->state
->crtc_w
;
1474 entries
= DIV_ROUND_UP(entries
,
1475 i965_cursor_wm_info
.cacheline_size
);
1476 cursor_sr
= i965_cursor_wm_info
.fifo_size
-
1477 (entries
+ i965_cursor_wm_info
.guard_size
);
1479 if (cursor_sr
> i965_cursor_wm_info
.max_wm
)
1480 cursor_sr
= i965_cursor_wm_info
.max_wm
;
1482 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1483 "cursor %d\n", srwm
, cursor_sr
);
1485 cxsr_enabled
= true;
1487 cxsr_enabled
= false;
1488 /* Turn off self refresh if both pipes are enabled */
1489 intel_set_memory_cxsr(dev_priv
, false);
1492 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1495 /* 965 has limitations... */
1496 I915_WRITE(DSPFW1
, FW_WM(srwm
, SR
) |
1500 I915_WRITE(DSPFW2
, FW_WM(8, CURSORA
) |
1501 FW_WM(8, PLANEC_OLD
));
1502 /* update cursor SR watermark */
1503 I915_WRITE(DSPFW3
, FW_WM(cursor_sr
, CURSOR_SR
));
1506 intel_set_memory_cxsr(dev_priv
, true);
1511 static void i9xx_update_wm(struct drm_crtc
*unused_crtc
)
1513 struct drm_device
*dev
= unused_crtc
->dev
;
1514 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1515 const struct intel_watermark_params
*wm_info
;
1520 int planea_wm
, planeb_wm
;
1521 struct drm_crtc
*crtc
, *enabled
= NULL
;
1524 wm_info
= &i945_wm_info
;
1525 else if (!IS_GEN2(dev
))
1526 wm_info
= &i915_wm_info
;
1528 wm_info
= &i830_a_wm_info
;
1530 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 0);
1531 crtc
= intel_get_crtc_for_plane(dev
, 0);
1532 if (intel_crtc_active(crtc
)) {
1533 const struct drm_display_mode
*adjusted_mode
;
1534 int cpp
= drm_format_plane_cpp(crtc
->primary
->state
->fb
->pixel_format
, 0);
1538 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
1539 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1540 wm_info
, fifo_size
, cpp
,
1541 pessimal_latency_ns
);
1544 planea_wm
= fifo_size
- wm_info
->guard_size
;
1545 if (planea_wm
> (long)wm_info
->max_wm
)
1546 planea_wm
= wm_info
->max_wm
;
1550 wm_info
= &i830_bc_wm_info
;
1552 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 1);
1553 crtc
= intel_get_crtc_for_plane(dev
, 1);
1554 if (intel_crtc_active(crtc
)) {
1555 const struct drm_display_mode
*adjusted_mode
;
1556 int cpp
= drm_format_plane_cpp(crtc
->primary
->state
->fb
->pixel_format
, 0);
1560 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
1561 planeb_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1562 wm_info
, fifo_size
, cpp
,
1563 pessimal_latency_ns
);
1564 if (enabled
== NULL
)
1569 planeb_wm
= fifo_size
- wm_info
->guard_size
;
1570 if (planeb_wm
> (long)wm_info
->max_wm
)
1571 planeb_wm
= wm_info
->max_wm
;
1574 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm
, planeb_wm
);
1576 if (IS_I915GM(dev
) && enabled
) {
1577 struct drm_i915_gem_object
*obj
;
1579 obj
= intel_fb_obj(enabled
->primary
->state
->fb
);
1581 /* self-refresh seems busted with untiled */
1582 if (obj
->tiling_mode
== I915_TILING_NONE
)
1587 * Overlay gets an aggressive default since video jitter is bad.
1591 /* Play safe and disable self-refresh before adjusting watermarks. */
1592 intel_set_memory_cxsr(dev_priv
, false);
1594 /* Calc sr entries for one plane configs */
1595 if (HAS_FW_BLC(dev
) && enabled
) {
1596 /* self-refresh has much higher latency */
1597 static const int sr_latency_ns
= 6000;
1598 const struct drm_display_mode
*adjusted_mode
= &to_intel_crtc(enabled
)->config
->base
.adjusted_mode
;
1599 int clock
= adjusted_mode
->crtc_clock
;
1600 int htotal
= adjusted_mode
->crtc_htotal
;
1601 int hdisplay
= to_intel_crtc(enabled
)->config
->pipe_src_w
;
1602 int cpp
= drm_format_plane_cpp(enabled
->primary
->state
->fb
->pixel_format
, 0);
1603 unsigned long line_time_us
;
1606 line_time_us
= max(htotal
* 1000 / clock
, 1);
1608 /* Use ns/us then divide to preserve precision */
1609 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1611 entries
= DIV_ROUND_UP(entries
, wm_info
->cacheline_size
);
1612 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries
);
1613 srwm
= wm_info
->fifo_size
- entries
;
1617 if (IS_I945G(dev
) || IS_I945GM(dev
))
1618 I915_WRITE(FW_BLC_SELF
,
1619 FW_BLC_SELF_FIFO_MASK
| (srwm
& 0xff));
1620 else if (IS_I915GM(dev
))
1621 I915_WRITE(FW_BLC_SELF
, srwm
& 0x3f);
1624 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1625 planea_wm
, planeb_wm
, cwm
, srwm
);
1627 fwater_lo
= ((planeb_wm
& 0x3f) << 16) | (planea_wm
& 0x3f);
1628 fwater_hi
= (cwm
& 0x1f);
1630 /* Set request length to 8 cachelines per fetch */
1631 fwater_lo
= fwater_lo
| (1 << 24) | (1 << 8);
1632 fwater_hi
= fwater_hi
| (1 << 8);
1634 I915_WRITE(FW_BLC
, fwater_lo
);
1635 I915_WRITE(FW_BLC2
, fwater_hi
);
1638 intel_set_memory_cxsr(dev_priv
, true);
1641 static void i845_update_wm(struct drm_crtc
*unused_crtc
)
1643 struct drm_device
*dev
= unused_crtc
->dev
;
1644 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1645 struct drm_crtc
*crtc
;
1646 const struct drm_display_mode
*adjusted_mode
;
1650 crtc
= single_enabled_crtc(dev
);
1654 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
1655 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1657 dev_priv
->display
.get_fifo_size(dev
, 0),
1658 4, pessimal_latency_ns
);
1659 fwater_lo
= I915_READ(FW_BLC
) & ~0xfff;
1660 fwater_lo
|= (3<<8) | planea_wm
;
1662 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm
);
1664 I915_WRITE(FW_BLC
, fwater_lo
);
1667 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state
*pipe_config
)
1669 uint32_t pixel_rate
;
1671 pixel_rate
= pipe_config
->base
.adjusted_mode
.crtc_clock
;
1673 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1674 * adjust the pixel_rate here. */
1676 if (pipe_config
->pch_pfit
.enabled
) {
1677 uint64_t pipe_w
, pipe_h
, pfit_w
, pfit_h
;
1678 uint32_t pfit_size
= pipe_config
->pch_pfit
.size
;
1680 pipe_w
= pipe_config
->pipe_src_w
;
1681 pipe_h
= pipe_config
->pipe_src_h
;
1683 pfit_w
= (pfit_size
>> 16) & 0xFFFF;
1684 pfit_h
= pfit_size
& 0xFFFF;
1685 if (pipe_w
< pfit_w
)
1687 if (pipe_h
< pfit_h
)
1690 if (WARN_ON(!pfit_w
|| !pfit_h
))
1693 pixel_rate
= div_u64((uint64_t) pixel_rate
* pipe_w
* pipe_h
,
1700 /* latency must be in 0.1us units. */
1701 static uint32_t ilk_wm_method1(uint32_t pixel_rate
, uint8_t cpp
, uint32_t latency
)
1705 if (WARN(latency
== 0, "Latency value missing\n"))
1708 ret
= (uint64_t) pixel_rate
* cpp
* latency
;
1709 ret
= DIV_ROUND_UP_ULL(ret
, 64 * 10000) + 2;
1714 /* latency must be in 0.1us units. */
1715 static uint32_t ilk_wm_method2(uint32_t pixel_rate
, uint32_t pipe_htotal
,
1716 uint32_t horiz_pixels
, uint8_t cpp
,
1721 if (WARN(latency
== 0, "Latency value missing\n"))
1723 if (WARN_ON(!pipe_htotal
))
1726 ret
= (latency
* pixel_rate
) / (pipe_htotal
* 10000);
1727 ret
= (ret
+ 1) * horiz_pixels
* cpp
;
1728 ret
= DIV_ROUND_UP(ret
, 64) + 2;
1732 static uint32_t ilk_wm_fbc(uint32_t pri_val
, uint32_t horiz_pixels
,
1736 * Neither of these should be possible since this function shouldn't be
1737 * called if the CRTC is off or the plane is invisible. But let's be
1738 * extra paranoid to avoid a potential divide-by-zero if we screw up
1739 * elsewhere in the driver.
1743 if (WARN_ON(!horiz_pixels
))
1746 return DIV_ROUND_UP(pri_val
* 64, horiz_pixels
* cpp
) + 2;
1749 struct ilk_wm_maximums
{
1757 * For both WM_PIPE and WM_LP.
1758 * mem_value must be in 0.1us units.
1760 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state
*cstate
,
1761 const struct intel_plane_state
*pstate
,
1765 int cpp
= pstate
->base
.fb
?
1766 drm_format_plane_cpp(pstate
->base
.fb
->pixel_format
, 0) : 0;
1767 uint32_t method1
, method2
;
1769 if (!cstate
->base
.active
|| !pstate
->visible
)
1772 method1
= ilk_wm_method1(ilk_pipe_pixel_rate(cstate
), cpp
, mem_value
);
1777 method2
= ilk_wm_method2(ilk_pipe_pixel_rate(cstate
),
1778 cstate
->base
.adjusted_mode
.crtc_htotal
,
1779 drm_rect_width(&pstate
->dst
),
1782 return min(method1
, method2
);
1786 * For both WM_PIPE and WM_LP.
1787 * mem_value must be in 0.1us units.
1789 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state
*cstate
,
1790 const struct intel_plane_state
*pstate
,
1793 int cpp
= pstate
->base
.fb
?
1794 drm_format_plane_cpp(pstate
->base
.fb
->pixel_format
, 0) : 0;
1795 uint32_t method1
, method2
;
1797 if (!cstate
->base
.active
|| !pstate
->visible
)
1800 method1
= ilk_wm_method1(ilk_pipe_pixel_rate(cstate
), cpp
, mem_value
);
1801 method2
= ilk_wm_method2(ilk_pipe_pixel_rate(cstate
),
1802 cstate
->base
.adjusted_mode
.crtc_htotal
,
1803 drm_rect_width(&pstate
->dst
),
1805 return min(method1
, method2
);
1809 * For both WM_PIPE and WM_LP.
1810 * mem_value must be in 0.1us units.
1812 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state
*cstate
,
1813 const struct intel_plane_state
*pstate
,
1817 * We treat the cursor plane as always-on for the purposes of watermark
1818 * calculation. Until we have two-stage watermark programming merged,
1819 * this is necessary to avoid flickering.
1822 int width
= pstate
->visible
? pstate
->base
.crtc_w
: 64;
1824 if (!cstate
->base
.active
)
1827 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate
),
1828 cstate
->base
.adjusted_mode
.crtc_htotal
,
1829 width
, cpp
, mem_value
);
1832 /* Only for WM_LP. */
1833 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state
*cstate
,
1834 const struct intel_plane_state
*pstate
,
1837 int cpp
= pstate
->base
.fb
?
1838 drm_format_plane_cpp(pstate
->base
.fb
->pixel_format
, 0) : 0;
1840 if (!cstate
->base
.active
|| !pstate
->visible
)
1843 return ilk_wm_fbc(pri_val
, drm_rect_width(&pstate
->dst
), cpp
);
1846 static unsigned int ilk_display_fifo_size(const struct drm_device
*dev
)
1848 if (INTEL_INFO(dev
)->gen
>= 8)
1850 else if (INTEL_INFO(dev
)->gen
>= 7)
1856 static unsigned int ilk_plane_wm_reg_max(const struct drm_device
*dev
,
1857 int level
, bool is_sprite
)
1859 if (INTEL_INFO(dev
)->gen
>= 8)
1860 /* BDW primary/sprite plane watermarks */
1861 return level
== 0 ? 255 : 2047;
1862 else if (INTEL_INFO(dev
)->gen
>= 7)
1863 /* IVB/HSW primary/sprite plane watermarks */
1864 return level
== 0 ? 127 : 1023;
1865 else if (!is_sprite
)
1866 /* ILK/SNB primary plane watermarks */
1867 return level
== 0 ? 127 : 511;
1869 /* ILK/SNB sprite plane watermarks */
1870 return level
== 0 ? 63 : 255;
1873 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device
*dev
,
1876 if (INTEL_INFO(dev
)->gen
>= 7)
1877 return level
== 0 ? 63 : 255;
1879 return level
== 0 ? 31 : 63;
1882 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device
*dev
)
1884 if (INTEL_INFO(dev
)->gen
>= 8)
1890 /* Calculate the maximum primary/sprite plane watermark */
1891 static unsigned int ilk_plane_wm_max(const struct drm_device
*dev
,
1893 const struct intel_wm_config
*config
,
1894 enum intel_ddb_partitioning ddb_partitioning
,
1897 unsigned int fifo_size
= ilk_display_fifo_size(dev
);
1899 /* if sprites aren't enabled, sprites get nothing */
1900 if (is_sprite
&& !config
->sprites_enabled
)
1903 /* HSW allows LP1+ watermarks even with multiple pipes */
1904 if (level
== 0 || config
->num_pipes_active
> 1) {
1905 fifo_size
/= INTEL_INFO(dev
)->num_pipes
;
1908 * For some reason the non self refresh
1909 * FIFO size is only half of the self
1910 * refresh FIFO size on ILK/SNB.
1912 if (INTEL_INFO(dev
)->gen
<= 6)
1916 if (config
->sprites_enabled
) {
1917 /* level 0 is always calculated with 1:1 split */
1918 if (level
> 0 && ddb_partitioning
== INTEL_DDB_PART_5_6
) {
1927 /* clamp to max that the registers can hold */
1928 return min(fifo_size
, ilk_plane_wm_reg_max(dev
, level
, is_sprite
));
1931 /* Calculate the maximum cursor plane watermark */
1932 static unsigned int ilk_cursor_wm_max(const struct drm_device
*dev
,
1934 const struct intel_wm_config
*config
)
1936 /* HSW LP1+ watermarks w/ multiple pipes */
1937 if (level
> 0 && config
->num_pipes_active
> 1)
1940 /* otherwise just report max that registers can hold */
1941 return ilk_cursor_wm_reg_max(dev
, level
);
1944 static void ilk_compute_wm_maximums(const struct drm_device
*dev
,
1946 const struct intel_wm_config
*config
,
1947 enum intel_ddb_partitioning ddb_partitioning
,
1948 struct ilk_wm_maximums
*max
)
1950 max
->pri
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, false);
1951 max
->spr
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, true);
1952 max
->cur
= ilk_cursor_wm_max(dev
, level
, config
);
1953 max
->fbc
= ilk_fbc_wm_reg_max(dev
);
1956 static void ilk_compute_wm_reg_maximums(struct drm_device
*dev
,
1958 struct ilk_wm_maximums
*max
)
1960 max
->pri
= ilk_plane_wm_reg_max(dev
, level
, false);
1961 max
->spr
= ilk_plane_wm_reg_max(dev
, level
, true);
1962 max
->cur
= ilk_cursor_wm_reg_max(dev
, level
);
1963 max
->fbc
= ilk_fbc_wm_reg_max(dev
);
1966 static bool ilk_validate_wm_level(int level
,
1967 const struct ilk_wm_maximums
*max
,
1968 struct intel_wm_level
*result
)
1972 /* already determined to be invalid? */
1973 if (!result
->enable
)
1976 result
->enable
= result
->pri_val
<= max
->pri
&&
1977 result
->spr_val
<= max
->spr
&&
1978 result
->cur_val
<= max
->cur
;
1980 ret
= result
->enable
;
1983 * HACK until we can pre-compute everything,
1984 * and thus fail gracefully if LP0 watermarks
1987 if (level
== 0 && !result
->enable
) {
1988 if (result
->pri_val
> max
->pri
)
1989 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1990 level
, result
->pri_val
, max
->pri
);
1991 if (result
->spr_val
> max
->spr
)
1992 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1993 level
, result
->spr_val
, max
->spr
);
1994 if (result
->cur_val
> max
->cur
)
1995 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1996 level
, result
->cur_val
, max
->cur
);
1998 result
->pri_val
= min_t(uint32_t, result
->pri_val
, max
->pri
);
1999 result
->spr_val
= min_t(uint32_t, result
->spr_val
, max
->spr
);
2000 result
->cur_val
= min_t(uint32_t, result
->cur_val
, max
->cur
);
2001 result
->enable
= true;
2007 static void ilk_compute_wm_level(const struct drm_i915_private
*dev_priv
,
2008 const struct intel_crtc
*intel_crtc
,
2010 struct intel_crtc_state
*cstate
,
2011 struct intel_plane_state
*pristate
,
2012 struct intel_plane_state
*sprstate
,
2013 struct intel_plane_state
*curstate
,
2014 struct intel_wm_level
*result
)
2016 uint16_t pri_latency
= dev_priv
->wm
.pri_latency
[level
];
2017 uint16_t spr_latency
= dev_priv
->wm
.spr_latency
[level
];
2018 uint16_t cur_latency
= dev_priv
->wm
.cur_latency
[level
];
2020 /* WM1+ latency values stored in 0.5us units */
2028 result
->pri_val
= ilk_compute_pri_wm(cstate
, pristate
,
2029 pri_latency
, level
);
2030 result
->fbc_val
= ilk_compute_fbc_wm(cstate
, pristate
, result
->pri_val
);
2034 result
->spr_val
= ilk_compute_spr_wm(cstate
, sprstate
, spr_latency
);
2037 result
->cur_val
= ilk_compute_cur_wm(cstate
, curstate
, cur_latency
);
2039 result
->enable
= true;
2043 hsw_compute_linetime_wm(struct drm_device
*dev
,
2044 struct intel_crtc_state
*cstate
)
2046 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2047 const struct drm_display_mode
*adjusted_mode
=
2048 &cstate
->base
.adjusted_mode
;
2049 u32 linetime
, ips_linetime
;
2051 if (!cstate
->base
.active
)
2053 if (WARN_ON(adjusted_mode
->crtc_clock
== 0))
2055 if (WARN_ON(dev_priv
->cdclk_freq
== 0))
2058 /* The WM are computed with base on how long it takes to fill a single
2059 * row at the given clock rate, multiplied by 8.
2061 linetime
= DIV_ROUND_CLOSEST(adjusted_mode
->crtc_htotal
* 1000 * 8,
2062 adjusted_mode
->crtc_clock
);
2063 ips_linetime
= DIV_ROUND_CLOSEST(adjusted_mode
->crtc_htotal
* 1000 * 8,
2064 dev_priv
->cdclk_freq
);
2066 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime
) |
2067 PIPE_WM_LINETIME_TIME(linetime
);
2070 static void intel_read_wm_latency(struct drm_device
*dev
, uint16_t wm
[8])
2072 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2077 int level
, max_level
= ilk_wm_max_level(dev
);
2079 /* read the first set of memory latencies[0:3] */
2080 val
= 0; /* data0 to be programmed to 0 for first set */
2081 mutex_lock(&dev_priv
->rps
.hw_lock
);
2082 ret
= sandybridge_pcode_read(dev_priv
,
2083 GEN9_PCODE_READ_MEM_LATENCY
,
2085 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2088 DRM_ERROR("SKL Mailbox read error = %d\n", ret
);
2092 wm
[0] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2093 wm
[1] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2094 GEN9_MEM_LATENCY_LEVEL_MASK
;
2095 wm
[2] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2096 GEN9_MEM_LATENCY_LEVEL_MASK
;
2097 wm
[3] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2098 GEN9_MEM_LATENCY_LEVEL_MASK
;
2100 /* read the second set of memory latencies[4:7] */
2101 val
= 1; /* data0 to be programmed to 1 for second set */
2102 mutex_lock(&dev_priv
->rps
.hw_lock
);
2103 ret
= sandybridge_pcode_read(dev_priv
,
2104 GEN9_PCODE_READ_MEM_LATENCY
,
2106 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2108 DRM_ERROR("SKL Mailbox read error = %d\n", ret
);
2112 wm
[4] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2113 wm
[5] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2114 GEN9_MEM_LATENCY_LEVEL_MASK
;
2115 wm
[6] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2116 GEN9_MEM_LATENCY_LEVEL_MASK
;
2117 wm
[7] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2118 GEN9_MEM_LATENCY_LEVEL_MASK
;
2121 * WaWmMemoryReadLatency:skl
2123 * punit doesn't take into account the read latency so we need
2124 * to add 2us to the various latency levels we retrieve from
2126 * - W0 is a bit special in that it's the only level that
2127 * can't be disabled if we want to have display working, so
2128 * we always add 2us there.
2129 * - For levels >=1, punit returns 0us latency when they are
2130 * disabled, so we respect that and don't add 2us then
2132 * Additionally, if a level n (n > 1) has a 0us latency, all
2133 * levels m (m >= n) need to be disabled. We make sure to
2134 * sanitize the values out of the punit to satisfy this
2138 for (level
= 1; level
<= max_level
; level
++)
2142 for (i
= level
+ 1; i
<= max_level
; i
++)
2147 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
2148 uint64_t sskpd
= I915_READ64(MCH_SSKPD
);
2150 wm
[0] = (sskpd
>> 56) & 0xFF;
2152 wm
[0] = sskpd
& 0xF;
2153 wm
[1] = (sskpd
>> 4) & 0xFF;
2154 wm
[2] = (sskpd
>> 12) & 0xFF;
2155 wm
[3] = (sskpd
>> 20) & 0x1FF;
2156 wm
[4] = (sskpd
>> 32) & 0x1FF;
2157 } else if (INTEL_INFO(dev
)->gen
>= 6) {
2158 uint32_t sskpd
= I915_READ(MCH_SSKPD
);
2160 wm
[0] = (sskpd
>> SSKPD_WM0_SHIFT
) & SSKPD_WM_MASK
;
2161 wm
[1] = (sskpd
>> SSKPD_WM1_SHIFT
) & SSKPD_WM_MASK
;
2162 wm
[2] = (sskpd
>> SSKPD_WM2_SHIFT
) & SSKPD_WM_MASK
;
2163 wm
[3] = (sskpd
>> SSKPD_WM3_SHIFT
) & SSKPD_WM_MASK
;
2164 } else if (INTEL_INFO(dev
)->gen
>= 5) {
2165 uint32_t mltr
= I915_READ(MLTR_ILK
);
2167 /* ILK primary LP0 latency is 700 ns */
2169 wm
[1] = (mltr
>> MLTR_WM1_SHIFT
) & ILK_SRLT_MASK
;
2170 wm
[2] = (mltr
>> MLTR_WM2_SHIFT
) & ILK_SRLT_MASK
;
2174 static void intel_fixup_spr_wm_latency(struct drm_device
*dev
, uint16_t wm
[5])
2176 /* ILK sprite LP0 latency is 1300 ns */
2177 if (INTEL_INFO(dev
)->gen
== 5)
2181 static void intel_fixup_cur_wm_latency(struct drm_device
*dev
, uint16_t wm
[5])
2183 /* ILK cursor LP0 latency is 1300 ns */
2184 if (INTEL_INFO(dev
)->gen
== 5)
2187 /* WaDoubleCursorLP3Latency:ivb */
2188 if (IS_IVYBRIDGE(dev
))
2192 int ilk_wm_max_level(const struct drm_device
*dev
)
2194 /* how many WM levels are we expecting */
2195 if (INTEL_INFO(dev
)->gen
>= 9)
2197 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2199 else if (INTEL_INFO(dev
)->gen
>= 6)
2205 static void intel_print_wm_latency(struct drm_device
*dev
,
2207 const uint16_t wm
[8])
2209 int level
, max_level
= ilk_wm_max_level(dev
);
2211 for (level
= 0; level
<= max_level
; level
++) {
2212 unsigned int latency
= wm
[level
];
2215 DRM_ERROR("%s WM%d latency not provided\n",
2221 * - latencies are in us on gen9.
2222 * - before then, WM1+ latency values are in 0.5us units
2229 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2230 name
, level
, wm
[level
],
2231 latency
/ 10, latency
% 10);
2235 static bool ilk_increase_wm_latency(struct drm_i915_private
*dev_priv
,
2236 uint16_t wm
[5], uint16_t min
)
2238 int level
, max_level
= ilk_wm_max_level(dev_priv
->dev
);
2243 wm
[0] = max(wm
[0], min
);
2244 for (level
= 1; level
<= max_level
; level
++)
2245 wm
[level
] = max_t(uint16_t, wm
[level
], DIV_ROUND_UP(min
, 5));
2250 static void snb_wm_latency_quirk(struct drm_device
*dev
)
2252 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2256 * The BIOS provided WM memory latency values are often
2257 * inadequate for high resolution displays. Adjust them.
2259 changed
= ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
, 12) |
2260 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
, 12) |
2261 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
, 12);
2266 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2267 intel_print_wm_latency(dev
, "Primary", dev_priv
->wm
.pri_latency
);
2268 intel_print_wm_latency(dev
, "Sprite", dev_priv
->wm
.spr_latency
);
2269 intel_print_wm_latency(dev
, "Cursor", dev_priv
->wm
.cur_latency
);
2272 static void ilk_setup_wm_latency(struct drm_device
*dev
)
2274 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2276 intel_read_wm_latency(dev
, dev_priv
->wm
.pri_latency
);
2278 memcpy(dev_priv
->wm
.spr_latency
, dev_priv
->wm
.pri_latency
,
2279 sizeof(dev_priv
->wm
.pri_latency
));
2280 memcpy(dev_priv
->wm
.cur_latency
, dev_priv
->wm
.pri_latency
,
2281 sizeof(dev_priv
->wm
.pri_latency
));
2283 intel_fixup_spr_wm_latency(dev
, dev_priv
->wm
.spr_latency
);
2284 intel_fixup_cur_wm_latency(dev
, dev_priv
->wm
.cur_latency
);
2286 intel_print_wm_latency(dev
, "Primary", dev_priv
->wm
.pri_latency
);
2287 intel_print_wm_latency(dev
, "Sprite", dev_priv
->wm
.spr_latency
);
2288 intel_print_wm_latency(dev
, "Cursor", dev_priv
->wm
.cur_latency
);
2291 snb_wm_latency_quirk(dev
);
2294 static void skl_setup_wm_latency(struct drm_device
*dev
)
2296 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2298 intel_read_wm_latency(dev
, dev_priv
->wm
.skl_latency
);
2299 intel_print_wm_latency(dev
, "Gen9 Plane", dev_priv
->wm
.skl_latency
);
2302 static bool ilk_validate_pipe_wm(struct drm_device
*dev
,
2303 struct intel_pipe_wm
*pipe_wm
)
2305 /* LP0 watermark maximums depend on this pipe alone */
2306 const struct intel_wm_config config
= {
2307 .num_pipes_active
= 1,
2308 .sprites_enabled
= pipe_wm
->sprites_enabled
,
2309 .sprites_scaled
= pipe_wm
->sprites_scaled
,
2311 struct ilk_wm_maximums max
;
2313 /* LP0 watermarks always use 1/2 DDB partitioning */
2314 ilk_compute_wm_maximums(dev
, 0, &config
, INTEL_DDB_PART_1_2
, &max
);
2316 /* At least LP0 must be valid */
2317 if (!ilk_validate_wm_level(0, &max
, &pipe_wm
->wm
[0])) {
2318 DRM_DEBUG_KMS("LP0 watermark invalid\n");
2325 /* Compute new watermarks for the pipe */
2326 static int ilk_compute_pipe_wm(struct intel_crtc_state
*cstate
)
2328 struct drm_atomic_state
*state
= cstate
->base
.state
;
2329 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
2330 struct intel_pipe_wm
*pipe_wm
;
2331 struct drm_device
*dev
= state
->dev
;
2332 const struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2333 struct intel_plane
*intel_plane
;
2334 struct intel_plane_state
*pristate
= NULL
;
2335 struct intel_plane_state
*sprstate
= NULL
;
2336 struct intel_plane_state
*curstate
= NULL
;
2337 int level
, max_level
= ilk_wm_max_level(dev
), usable_level
;
2338 struct ilk_wm_maximums max
;
2340 pipe_wm
= &cstate
->wm
.optimal
.ilk
;
2342 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
2343 struct intel_plane_state
*ps
;
2345 ps
= intel_atomic_get_existing_plane_state(state
,
2350 if (intel_plane
->base
.type
== DRM_PLANE_TYPE_PRIMARY
)
2352 else if (intel_plane
->base
.type
== DRM_PLANE_TYPE_OVERLAY
)
2354 else if (intel_plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
)
2358 pipe_wm
->pipe_enabled
= cstate
->base
.active
;
2360 pipe_wm
->sprites_enabled
= sprstate
->visible
;
2361 pipe_wm
->sprites_scaled
= sprstate
->visible
&&
2362 (drm_rect_width(&sprstate
->dst
) != drm_rect_width(&sprstate
->src
) >> 16 ||
2363 drm_rect_height(&sprstate
->dst
) != drm_rect_height(&sprstate
->src
) >> 16);
2366 usable_level
= max_level
;
2368 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2369 if (INTEL_INFO(dev
)->gen
<= 6 && pipe_wm
->sprites_enabled
)
2372 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2373 if (pipe_wm
->sprites_scaled
)
2376 ilk_compute_wm_level(dev_priv
, intel_crtc
, 0, cstate
,
2377 pristate
, sprstate
, curstate
, &pipe_wm
->raw_wm
[0]);
2379 memset(&pipe_wm
->wm
, 0, sizeof(pipe_wm
->wm
));
2380 pipe_wm
->wm
[0] = pipe_wm
->raw_wm
[0];
2382 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2383 pipe_wm
->linetime
= hsw_compute_linetime_wm(dev
, cstate
);
2385 if (!ilk_validate_pipe_wm(dev
, pipe_wm
))
2388 ilk_compute_wm_reg_maximums(dev
, 1, &max
);
2390 for (level
= 1; level
<= max_level
; level
++) {
2391 struct intel_wm_level
*wm
= &pipe_wm
->raw_wm
[level
];
2393 ilk_compute_wm_level(dev_priv
, intel_crtc
, level
, cstate
,
2394 pristate
, sprstate
, curstate
, wm
);
2397 * Disable any watermark level that exceeds the
2398 * register maximums since such watermarks are
2401 if (level
> usable_level
)
2404 if (ilk_validate_wm_level(level
, &max
, wm
))
2405 pipe_wm
->wm
[level
] = *wm
;
2407 usable_level
= level
;
2414 * Build a set of 'intermediate' watermark values that satisfy both the old
2415 * state and the new state. These can be programmed to the hardware
2418 static int ilk_compute_intermediate_wm(struct drm_device
*dev
,
2419 struct intel_crtc
*intel_crtc
,
2420 struct intel_crtc_state
*newstate
)
2422 struct intel_pipe_wm
*a
= &newstate
->wm
.intermediate
;
2423 struct intel_pipe_wm
*b
= &intel_crtc
->wm
.active
.ilk
;
2424 int level
, max_level
= ilk_wm_max_level(dev
);
2427 * Start with the final, target watermarks, then combine with the
2428 * currently active watermarks to get values that are safe both before
2429 * and after the vblank.
2431 *a
= newstate
->wm
.optimal
.ilk
;
2432 a
->pipe_enabled
|= b
->pipe_enabled
;
2433 a
->sprites_enabled
|= b
->sprites_enabled
;
2434 a
->sprites_scaled
|= b
->sprites_scaled
;
2436 for (level
= 0; level
<= max_level
; level
++) {
2437 struct intel_wm_level
*a_wm
= &a
->wm
[level
];
2438 const struct intel_wm_level
*b_wm
= &b
->wm
[level
];
2440 a_wm
->enable
&= b_wm
->enable
;
2441 a_wm
->pri_val
= max(a_wm
->pri_val
, b_wm
->pri_val
);
2442 a_wm
->spr_val
= max(a_wm
->spr_val
, b_wm
->spr_val
);
2443 a_wm
->cur_val
= max(a_wm
->cur_val
, b_wm
->cur_val
);
2444 a_wm
->fbc_val
= max(a_wm
->fbc_val
, b_wm
->fbc_val
);
2448 * We need to make sure that these merged watermark values are
2449 * actually a valid configuration themselves. If they're not,
2450 * there's no safe way to transition from the old state to
2451 * the new state, so we need to fail the atomic transaction.
2453 if (!ilk_validate_pipe_wm(dev
, a
))
2457 * If our intermediate WM are identical to the final WM, then we can
2458 * omit the post-vblank programming; only update if it's different.
2460 if (memcmp(a
, &newstate
->wm
.optimal
.ilk
, sizeof(*a
)) == 0)
2461 newstate
->wm
.need_postvbl_update
= false;
2467 * Merge the watermarks from all active pipes for a specific level.
2469 static void ilk_merge_wm_level(struct drm_device
*dev
,
2471 struct intel_wm_level
*ret_wm
)
2473 const struct intel_crtc
*intel_crtc
;
2475 ret_wm
->enable
= true;
2477 for_each_intel_crtc(dev
, intel_crtc
) {
2478 const struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
.ilk
;
2479 const struct intel_wm_level
*wm
= &active
->wm
[level
];
2481 if (!active
->pipe_enabled
)
2485 * The watermark values may have been used in the past,
2486 * so we must maintain them in the registers for some
2487 * time even if the level is now disabled.
2490 ret_wm
->enable
= false;
2492 ret_wm
->pri_val
= max(ret_wm
->pri_val
, wm
->pri_val
);
2493 ret_wm
->spr_val
= max(ret_wm
->spr_val
, wm
->spr_val
);
2494 ret_wm
->cur_val
= max(ret_wm
->cur_val
, wm
->cur_val
);
2495 ret_wm
->fbc_val
= max(ret_wm
->fbc_val
, wm
->fbc_val
);
2500 * Merge all low power watermarks for all active pipes.
2502 static void ilk_wm_merge(struct drm_device
*dev
,
2503 const struct intel_wm_config
*config
,
2504 const struct ilk_wm_maximums
*max
,
2505 struct intel_pipe_wm
*merged
)
2507 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2508 int level
, max_level
= ilk_wm_max_level(dev
);
2509 int last_enabled_level
= max_level
;
2511 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2512 if ((INTEL_INFO(dev
)->gen
<= 6 || IS_IVYBRIDGE(dev
)) &&
2513 config
->num_pipes_active
> 1)
2514 last_enabled_level
= 0;
2516 /* ILK: FBC WM must be disabled always */
2517 merged
->fbc_wm_enabled
= INTEL_INFO(dev
)->gen
>= 6;
2519 /* merge each WM1+ level */
2520 for (level
= 1; level
<= max_level
; level
++) {
2521 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2523 ilk_merge_wm_level(dev
, level
, wm
);
2525 if (level
> last_enabled_level
)
2527 else if (!ilk_validate_wm_level(level
, max
, wm
))
2528 /* make sure all following levels get disabled */
2529 last_enabled_level
= level
- 1;
2532 * The spec says it is preferred to disable
2533 * FBC WMs instead of disabling a WM level.
2535 if (wm
->fbc_val
> max
->fbc
) {
2537 merged
->fbc_wm_enabled
= false;
2542 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2544 * FIXME this is racy. FBC might get enabled later.
2545 * What we should check here is whether FBC can be
2546 * enabled sometime later.
2548 if (IS_GEN5(dev
) && !merged
->fbc_wm_enabled
&&
2549 intel_fbc_is_active(dev_priv
)) {
2550 for (level
= 2; level
<= max_level
; level
++) {
2551 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2558 static int ilk_wm_lp_to_level(int wm_lp
, const struct intel_pipe_wm
*pipe_wm
)
2560 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2561 return wm_lp
+ (wm_lp
>= 2 && pipe_wm
->wm
[4].enable
);
2564 /* The value we need to program into the WM_LPx latency field */
2565 static unsigned int ilk_wm_lp_latency(struct drm_device
*dev
, int level
)
2567 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2569 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2572 return dev_priv
->wm
.pri_latency
[level
];
2575 static void ilk_compute_wm_results(struct drm_device
*dev
,
2576 const struct intel_pipe_wm
*merged
,
2577 enum intel_ddb_partitioning partitioning
,
2578 struct ilk_wm_values
*results
)
2580 struct intel_crtc
*intel_crtc
;
2583 results
->enable_fbc_wm
= merged
->fbc_wm_enabled
;
2584 results
->partitioning
= partitioning
;
2586 /* LP1+ register values */
2587 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2588 const struct intel_wm_level
*r
;
2590 level
= ilk_wm_lp_to_level(wm_lp
, merged
);
2592 r
= &merged
->wm
[level
];
2595 * Maintain the watermark values even if the level is
2596 * disabled. Doing otherwise could cause underruns.
2598 results
->wm_lp
[wm_lp
- 1] =
2599 (ilk_wm_lp_latency(dev
, level
) << WM1_LP_LATENCY_SHIFT
) |
2600 (r
->pri_val
<< WM1_LP_SR_SHIFT
) |
2604 results
->wm_lp
[wm_lp
- 1] |= WM1_LP_SR_EN
;
2606 if (INTEL_INFO(dev
)->gen
>= 8)
2607 results
->wm_lp
[wm_lp
- 1] |=
2608 r
->fbc_val
<< WM1_LP_FBC_SHIFT_BDW
;
2610 results
->wm_lp
[wm_lp
- 1] |=
2611 r
->fbc_val
<< WM1_LP_FBC_SHIFT
;
2614 * Always set WM1S_LP_EN when spr_val != 0, even if the
2615 * level is disabled. Doing otherwise could cause underruns.
2617 if (INTEL_INFO(dev
)->gen
<= 6 && r
->spr_val
) {
2618 WARN_ON(wm_lp
!= 1);
2619 results
->wm_lp_spr
[wm_lp
- 1] = WM1S_LP_EN
| r
->spr_val
;
2621 results
->wm_lp_spr
[wm_lp
- 1] = r
->spr_val
;
2624 /* LP0 register values */
2625 for_each_intel_crtc(dev
, intel_crtc
) {
2626 enum pipe pipe
= intel_crtc
->pipe
;
2627 const struct intel_wm_level
*r
=
2628 &intel_crtc
->wm
.active
.ilk
.wm
[0];
2630 if (WARN_ON(!r
->enable
))
2633 results
->wm_linetime
[pipe
] = intel_crtc
->wm
.active
.ilk
.linetime
;
2635 results
->wm_pipe
[pipe
] =
2636 (r
->pri_val
<< WM0_PIPE_PLANE_SHIFT
) |
2637 (r
->spr_val
<< WM0_PIPE_SPRITE_SHIFT
) |
2642 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2643 * case both are at the same level. Prefer r1 in case they're the same. */
2644 static struct intel_pipe_wm
*ilk_find_best_result(struct drm_device
*dev
,
2645 struct intel_pipe_wm
*r1
,
2646 struct intel_pipe_wm
*r2
)
2648 int level
, max_level
= ilk_wm_max_level(dev
);
2649 int level1
= 0, level2
= 0;
2651 for (level
= 1; level
<= max_level
; level
++) {
2652 if (r1
->wm
[level
].enable
)
2654 if (r2
->wm
[level
].enable
)
2658 if (level1
== level2
) {
2659 if (r2
->fbc_wm_enabled
&& !r1
->fbc_wm_enabled
)
2663 } else if (level1
> level2
) {
2670 /* dirty bits used to track which watermarks need changes */
2671 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2672 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2673 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2674 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2675 #define WM_DIRTY_FBC (1 << 24)
2676 #define WM_DIRTY_DDB (1 << 25)
2678 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private
*dev_priv
,
2679 const struct ilk_wm_values
*old
,
2680 const struct ilk_wm_values
*new)
2682 unsigned int dirty
= 0;
2686 for_each_pipe(dev_priv
, pipe
) {
2687 if (old
->wm_linetime
[pipe
] != new->wm_linetime
[pipe
]) {
2688 dirty
|= WM_DIRTY_LINETIME(pipe
);
2689 /* Must disable LP1+ watermarks too */
2690 dirty
|= WM_DIRTY_LP_ALL
;
2693 if (old
->wm_pipe
[pipe
] != new->wm_pipe
[pipe
]) {
2694 dirty
|= WM_DIRTY_PIPE(pipe
);
2695 /* Must disable LP1+ watermarks too */
2696 dirty
|= WM_DIRTY_LP_ALL
;
2700 if (old
->enable_fbc_wm
!= new->enable_fbc_wm
) {
2701 dirty
|= WM_DIRTY_FBC
;
2702 /* Must disable LP1+ watermarks too */
2703 dirty
|= WM_DIRTY_LP_ALL
;
2706 if (old
->partitioning
!= new->partitioning
) {
2707 dirty
|= WM_DIRTY_DDB
;
2708 /* Must disable LP1+ watermarks too */
2709 dirty
|= WM_DIRTY_LP_ALL
;
2712 /* LP1+ watermarks already deemed dirty, no need to continue */
2713 if (dirty
& WM_DIRTY_LP_ALL
)
2716 /* Find the lowest numbered LP1+ watermark in need of an update... */
2717 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2718 if (old
->wm_lp
[wm_lp
- 1] != new->wm_lp
[wm_lp
- 1] ||
2719 old
->wm_lp_spr
[wm_lp
- 1] != new->wm_lp_spr
[wm_lp
- 1])
2723 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2724 for (; wm_lp
<= 3; wm_lp
++)
2725 dirty
|= WM_DIRTY_LP(wm_lp
);
2730 static bool _ilk_disable_lp_wm(struct drm_i915_private
*dev_priv
,
2733 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2734 bool changed
= false;
2736 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] & WM1_LP_SR_EN
) {
2737 previous
->wm_lp
[2] &= ~WM1_LP_SR_EN
;
2738 I915_WRITE(WM3_LP_ILK
, previous
->wm_lp
[2]);
2741 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] & WM1_LP_SR_EN
) {
2742 previous
->wm_lp
[1] &= ~WM1_LP_SR_EN
;
2743 I915_WRITE(WM2_LP_ILK
, previous
->wm_lp
[1]);
2746 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] & WM1_LP_SR_EN
) {
2747 previous
->wm_lp
[0] &= ~WM1_LP_SR_EN
;
2748 I915_WRITE(WM1_LP_ILK
, previous
->wm_lp
[0]);
2753 * Don't touch WM1S_LP_EN here.
2754 * Doing so could cause underruns.
2761 * The spec says we shouldn't write when we don't need, because every write
2762 * causes WMs to be re-evaluated, expending some power.
2764 static void ilk_write_wm_values(struct drm_i915_private
*dev_priv
,
2765 struct ilk_wm_values
*results
)
2767 struct drm_device
*dev
= dev_priv
->dev
;
2768 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2772 dirty
= ilk_compute_wm_dirty(dev_priv
, previous
, results
);
2776 _ilk_disable_lp_wm(dev_priv
, dirty
);
2778 if (dirty
& WM_DIRTY_PIPE(PIPE_A
))
2779 I915_WRITE(WM0_PIPEA_ILK
, results
->wm_pipe
[0]);
2780 if (dirty
& WM_DIRTY_PIPE(PIPE_B
))
2781 I915_WRITE(WM0_PIPEB_ILK
, results
->wm_pipe
[1]);
2782 if (dirty
& WM_DIRTY_PIPE(PIPE_C
))
2783 I915_WRITE(WM0_PIPEC_IVB
, results
->wm_pipe
[2]);
2785 if (dirty
& WM_DIRTY_LINETIME(PIPE_A
))
2786 I915_WRITE(PIPE_WM_LINETIME(PIPE_A
), results
->wm_linetime
[0]);
2787 if (dirty
& WM_DIRTY_LINETIME(PIPE_B
))
2788 I915_WRITE(PIPE_WM_LINETIME(PIPE_B
), results
->wm_linetime
[1]);
2789 if (dirty
& WM_DIRTY_LINETIME(PIPE_C
))
2790 I915_WRITE(PIPE_WM_LINETIME(PIPE_C
), results
->wm_linetime
[2]);
2792 if (dirty
& WM_DIRTY_DDB
) {
2793 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
2794 val
= I915_READ(WM_MISC
);
2795 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
2796 val
&= ~WM_MISC_DATA_PARTITION_5_6
;
2798 val
|= WM_MISC_DATA_PARTITION_5_6
;
2799 I915_WRITE(WM_MISC
, val
);
2801 val
= I915_READ(DISP_ARB_CTL2
);
2802 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
2803 val
&= ~DISP_DATA_PARTITION_5_6
;
2805 val
|= DISP_DATA_PARTITION_5_6
;
2806 I915_WRITE(DISP_ARB_CTL2
, val
);
2810 if (dirty
& WM_DIRTY_FBC
) {
2811 val
= I915_READ(DISP_ARB_CTL
);
2812 if (results
->enable_fbc_wm
)
2813 val
&= ~DISP_FBC_WM_DIS
;
2815 val
|= DISP_FBC_WM_DIS
;
2816 I915_WRITE(DISP_ARB_CTL
, val
);
2819 if (dirty
& WM_DIRTY_LP(1) &&
2820 previous
->wm_lp_spr
[0] != results
->wm_lp_spr
[0])
2821 I915_WRITE(WM1S_LP_ILK
, results
->wm_lp_spr
[0]);
2823 if (INTEL_INFO(dev
)->gen
>= 7) {
2824 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp_spr
[1] != results
->wm_lp_spr
[1])
2825 I915_WRITE(WM2S_LP_IVB
, results
->wm_lp_spr
[1]);
2826 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp_spr
[2] != results
->wm_lp_spr
[2])
2827 I915_WRITE(WM3S_LP_IVB
, results
->wm_lp_spr
[2]);
2830 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] != results
->wm_lp
[0])
2831 I915_WRITE(WM1_LP_ILK
, results
->wm_lp
[0]);
2832 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] != results
->wm_lp
[1])
2833 I915_WRITE(WM2_LP_ILK
, results
->wm_lp
[1]);
2834 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] != results
->wm_lp
[2])
2835 I915_WRITE(WM3_LP_ILK
, results
->wm_lp
[2]);
2837 dev_priv
->wm
.hw
= *results
;
2840 bool ilk_disable_lp_wm(struct drm_device
*dev
)
2842 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2844 return _ilk_disable_lp_wm(dev_priv
, WM_DIRTY_LP_ALL
);
2848 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2849 * different active planes.
2852 #define SKL_DDB_SIZE 896 /* in blocks */
2853 #define BXT_DDB_SIZE 512
2856 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
2857 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2858 * other universal planes are in indices 1..n. Note that this may leave unused
2859 * indices between the top "sprite" plane and the cursor.
2862 skl_wm_plane_id(const struct intel_plane
*plane
)
2864 switch (plane
->base
.type
) {
2865 case DRM_PLANE_TYPE_PRIMARY
:
2867 case DRM_PLANE_TYPE_CURSOR
:
2868 return PLANE_CURSOR
;
2869 case DRM_PLANE_TYPE_OVERLAY
:
2870 return plane
->plane
+ 1;
2872 MISSING_CASE(plane
->base
.type
);
2873 return plane
->plane
;
2878 skl_ddb_get_pipe_allocation_limits(struct drm_device
*dev
,
2879 const struct intel_crtc_state
*cstate
,
2880 const struct intel_wm_config
*config
,
2881 struct skl_ddb_entry
*alloc
/* out */)
2883 struct drm_crtc
*for_crtc
= cstate
->base
.crtc
;
2884 struct drm_crtc
*crtc
;
2885 unsigned int pipe_size
, ddb_size
;
2886 int nth_active_pipe
;
2888 if (!cstate
->base
.active
) {
2894 if (IS_BROXTON(dev
))
2895 ddb_size
= BXT_DDB_SIZE
;
2897 ddb_size
= SKL_DDB_SIZE
;
2899 ddb_size
-= 4; /* 4 blocks for bypass path allocation */
2901 nth_active_pipe
= 0;
2902 for_each_crtc(dev
, crtc
) {
2903 if (!to_intel_crtc(crtc
)->active
)
2906 if (crtc
== for_crtc
)
2912 pipe_size
= ddb_size
/ config
->num_pipes_active
;
2913 alloc
->start
= nth_active_pipe
* ddb_size
/ config
->num_pipes_active
;
2914 alloc
->end
= alloc
->start
+ pipe_size
;
2917 static unsigned int skl_cursor_allocation(const struct intel_wm_config
*config
)
2919 if (config
->num_pipes_active
== 1)
2925 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry
*entry
, u32 reg
)
2927 entry
->start
= reg
& 0x3ff;
2928 entry
->end
= (reg
>> 16) & 0x3ff;
2933 void skl_ddb_get_hw_state(struct drm_i915_private
*dev_priv
,
2934 struct skl_ddb_allocation
*ddb
/* out */)
2940 memset(ddb
, 0, sizeof(*ddb
));
2942 for_each_pipe(dev_priv
, pipe
) {
2943 enum intel_display_power_domain power_domain
;
2945 power_domain
= POWER_DOMAIN_PIPE(pipe
);
2946 if (!intel_display_power_get_if_enabled(dev_priv
, power_domain
))
2949 for_each_plane(dev_priv
, pipe
, plane
) {
2950 val
= I915_READ(PLANE_BUF_CFG(pipe
, plane
));
2951 skl_ddb_entry_init_from_hw(&ddb
->plane
[pipe
][plane
],
2955 val
= I915_READ(CUR_BUF_CFG(pipe
));
2956 skl_ddb_entry_init_from_hw(&ddb
->plane
[pipe
][PLANE_CURSOR
],
2959 intel_display_power_put(dev_priv
, power_domain
);
2964 skl_plane_relative_data_rate(const struct intel_crtc_state
*cstate
,
2965 const struct drm_plane_state
*pstate
,
2968 struct intel_plane_state
*intel_pstate
= to_intel_plane_state(pstate
);
2969 struct drm_framebuffer
*fb
= pstate
->fb
;
2970 uint32_t width
= 0, height
= 0;
2972 width
= drm_rect_width(&intel_pstate
->src
) >> 16;
2973 height
= drm_rect_height(&intel_pstate
->src
) >> 16;
2975 if (intel_rotation_90_or_270(pstate
->rotation
))
2976 swap(width
, height
);
2978 /* for planar format */
2979 if (fb
->pixel_format
== DRM_FORMAT_NV12
) {
2980 if (y
) /* y-plane data rate */
2981 return width
* height
*
2982 drm_format_plane_cpp(fb
->pixel_format
, 0);
2983 else /* uv-plane data rate */
2984 return (width
/ 2) * (height
/ 2) *
2985 drm_format_plane_cpp(fb
->pixel_format
, 1);
2988 /* for packed formats */
2989 return width
* height
* drm_format_plane_cpp(fb
->pixel_format
, 0);
2993 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
2994 * a 8192x4096@32bpp framebuffer:
2995 * 3 * 4096 * 8192 * 4 < 2^32
2998 skl_get_total_relative_data_rate(const struct intel_crtc_state
*cstate
)
3000 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
3001 struct drm_device
*dev
= intel_crtc
->base
.dev
;
3002 const struct intel_plane
*intel_plane
;
3003 unsigned int total_data_rate
= 0;
3005 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
3006 const struct drm_plane_state
*pstate
= intel_plane
->base
.state
;
3008 if (pstate
->fb
== NULL
)
3011 if (intel_plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
)
3015 total_data_rate
+= skl_plane_relative_data_rate(cstate
,
3019 if (pstate
->fb
->pixel_format
== DRM_FORMAT_NV12
)
3021 total_data_rate
+= skl_plane_relative_data_rate(cstate
,
3026 return total_data_rate
;
3030 skl_allocate_pipe_ddb(struct intel_crtc_state
*cstate
,
3031 struct skl_ddb_allocation
*ddb
/* out */)
3033 struct drm_crtc
*crtc
= cstate
->base
.crtc
;
3034 struct drm_device
*dev
= crtc
->dev
;
3035 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3036 struct intel_wm_config
*config
= &dev_priv
->wm
.config
;
3037 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3038 struct intel_plane
*intel_plane
;
3039 enum pipe pipe
= intel_crtc
->pipe
;
3040 struct skl_ddb_entry
*alloc
= &ddb
->pipe
[pipe
];
3041 uint16_t alloc_size
, start
, cursor_blocks
;
3042 uint16_t minimum
[I915_MAX_PLANES
];
3043 uint16_t y_minimum
[I915_MAX_PLANES
];
3044 unsigned int total_data_rate
;
3046 skl_ddb_get_pipe_allocation_limits(dev
, cstate
, config
, alloc
);
3047 alloc_size
= skl_ddb_entry_size(alloc
);
3048 if (alloc_size
== 0) {
3049 memset(ddb
->plane
[pipe
], 0, sizeof(ddb
->plane
[pipe
]));
3050 memset(&ddb
->plane
[pipe
][PLANE_CURSOR
], 0,
3051 sizeof(ddb
->plane
[pipe
][PLANE_CURSOR
]));
3055 cursor_blocks
= skl_cursor_allocation(config
);
3056 ddb
->plane
[pipe
][PLANE_CURSOR
].start
= alloc
->end
- cursor_blocks
;
3057 ddb
->plane
[pipe
][PLANE_CURSOR
].end
= alloc
->end
;
3059 alloc_size
-= cursor_blocks
;
3060 alloc
->end
-= cursor_blocks
;
3062 /* 1. Allocate the mininum required blocks for each active plane */
3063 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
3064 struct drm_plane
*plane
= &intel_plane
->base
;
3065 struct drm_framebuffer
*fb
= plane
->state
->fb
;
3066 int id
= skl_wm_plane_id(intel_plane
);
3068 if (!to_intel_plane_state(plane
->state
)->visible
)
3071 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
3075 alloc_size
-= minimum
[id
];
3076 y_minimum
[id
] = (fb
->pixel_format
== DRM_FORMAT_NV12
) ? 8 : 0;
3077 alloc_size
-= y_minimum
[id
];
3081 * 2. Distribute the remaining space in proportion to the amount of
3082 * data each plane needs to fetch from memory.
3084 * FIXME: we may not allocate every single block here.
3086 total_data_rate
= skl_get_total_relative_data_rate(cstate
);
3088 start
= alloc
->start
;
3089 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
3090 struct drm_plane
*plane
= &intel_plane
->base
;
3091 struct drm_plane_state
*pstate
= intel_plane
->base
.state
;
3092 unsigned int data_rate
, y_data_rate
;
3093 uint16_t plane_blocks
, y_plane_blocks
= 0;
3094 int id
= skl_wm_plane_id(intel_plane
);
3096 if (!to_intel_plane_state(pstate
)->visible
)
3098 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
3101 data_rate
= skl_plane_relative_data_rate(cstate
, pstate
, 0);
3104 * allocation for (packed formats) or (uv-plane part of planar format):
3105 * promote the expression to 64 bits to avoid overflowing, the
3106 * result is < available as data_rate / total_data_rate < 1
3108 plane_blocks
= minimum
[id
];
3109 plane_blocks
+= div_u64((uint64_t)alloc_size
* data_rate
,
3112 ddb
->plane
[pipe
][id
].start
= start
;
3113 ddb
->plane
[pipe
][id
].end
= start
+ plane_blocks
;
3115 start
+= plane_blocks
;
3118 * allocation for y_plane part of planar format:
3120 if (pstate
->fb
->pixel_format
== DRM_FORMAT_NV12
) {
3121 y_data_rate
= skl_plane_relative_data_rate(cstate
,
3124 y_plane_blocks
= y_minimum
[id
];
3125 y_plane_blocks
+= div_u64((uint64_t)alloc_size
* y_data_rate
,
3128 ddb
->y_plane
[pipe
][id
].start
= start
;
3129 ddb
->y_plane
[pipe
][id
].end
= start
+ y_plane_blocks
;
3131 start
+= y_plane_blocks
;
3138 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state
*config
)
3140 /* TODO: Take into account the scalers once we support them */
3141 return config
->base
.adjusted_mode
.crtc_clock
;
3145 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3146 * for the read latency) and cpp should always be <= 8, so that
3147 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3148 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3150 static uint32_t skl_wm_method1(uint32_t pixel_rate
, uint8_t cpp
, uint32_t latency
)
3152 uint32_t wm_intermediate_val
, ret
;
3157 wm_intermediate_val
= latency
* pixel_rate
* cpp
/ 512;
3158 ret
= DIV_ROUND_UP(wm_intermediate_val
, 1000);
3163 static uint32_t skl_wm_method2(uint32_t pixel_rate
, uint32_t pipe_htotal
,
3164 uint32_t horiz_pixels
, uint8_t cpp
,
3165 uint64_t tiling
, uint32_t latency
)
3168 uint32_t plane_bytes_per_line
, plane_blocks_per_line
;
3169 uint32_t wm_intermediate_val
;
3174 plane_bytes_per_line
= horiz_pixels
* cpp
;
3176 if (tiling
== I915_FORMAT_MOD_Y_TILED
||
3177 tiling
== I915_FORMAT_MOD_Yf_TILED
) {
3178 plane_bytes_per_line
*= 4;
3179 plane_blocks_per_line
= DIV_ROUND_UP(plane_bytes_per_line
, 512);
3180 plane_blocks_per_line
/= 4;
3182 plane_blocks_per_line
= DIV_ROUND_UP(plane_bytes_per_line
, 512);
3185 wm_intermediate_val
= latency
* pixel_rate
;
3186 ret
= DIV_ROUND_UP(wm_intermediate_val
, pipe_htotal
* 1000) *
3187 plane_blocks_per_line
;
3192 static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation
*new_ddb
,
3193 const struct intel_crtc
*intel_crtc
)
3195 struct drm_device
*dev
= intel_crtc
->base
.dev
;
3196 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3197 const struct skl_ddb_allocation
*cur_ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
3200 * If ddb allocation of pipes changed, it may require recalculation of
3203 if (memcmp(new_ddb
->pipe
, cur_ddb
->pipe
, sizeof(new_ddb
->pipe
)))
3209 static bool skl_compute_plane_wm(const struct drm_i915_private
*dev_priv
,
3210 struct intel_crtc_state
*cstate
,
3211 struct intel_plane
*intel_plane
,
3212 uint16_t ddb_allocation
,
3214 uint16_t *out_blocks
, /* out */
3215 uint8_t *out_lines
/* out */)
3217 struct drm_plane
*plane
= &intel_plane
->base
;
3218 struct drm_framebuffer
*fb
= plane
->state
->fb
;
3219 struct intel_plane_state
*intel_pstate
=
3220 to_intel_plane_state(plane
->state
);
3221 uint32_t latency
= dev_priv
->wm
.skl_latency
[level
];
3222 uint32_t method1
, method2
;
3223 uint32_t plane_bytes_per_line
, plane_blocks_per_line
;
3224 uint32_t res_blocks
, res_lines
;
3225 uint32_t selected_result
;
3227 uint32_t width
= 0, height
= 0;
3229 if (latency
== 0 || !cstate
->base
.active
|| !intel_pstate
->visible
)
3232 width
= drm_rect_width(&intel_pstate
->src
) >> 16;
3233 height
= drm_rect_height(&intel_pstate
->src
) >> 16;
3235 if (intel_rotation_90_or_270(plane
->state
->rotation
))
3236 swap(width
, height
);
3238 cpp
= drm_format_plane_cpp(fb
->pixel_format
, 0);
3239 method1
= skl_wm_method1(skl_pipe_pixel_rate(cstate
),
3241 method2
= skl_wm_method2(skl_pipe_pixel_rate(cstate
),
3242 cstate
->base
.adjusted_mode
.crtc_htotal
,
3248 plane_bytes_per_line
= width
* cpp
;
3249 plane_blocks_per_line
= DIV_ROUND_UP(plane_bytes_per_line
, 512);
3251 if (fb
->modifier
[0] == I915_FORMAT_MOD_Y_TILED
||
3252 fb
->modifier
[0] == I915_FORMAT_MOD_Yf_TILED
) {
3253 uint32_t min_scanlines
= 4;
3254 uint32_t y_tile_minimum
;
3255 if (intel_rotation_90_or_270(plane
->state
->rotation
)) {
3256 int cpp
= (fb
->pixel_format
== DRM_FORMAT_NV12
) ?
3257 drm_format_plane_cpp(fb
->pixel_format
, 1) :
3258 drm_format_plane_cpp(fb
->pixel_format
, 0);
3268 WARN(1, "Unsupported pixel depth for rotation");
3271 y_tile_minimum
= plane_blocks_per_line
* min_scanlines
;
3272 selected_result
= max(method2
, y_tile_minimum
);
3274 if ((ddb_allocation
/ plane_blocks_per_line
) >= 1)
3275 selected_result
= min(method1
, method2
);
3277 selected_result
= method1
;
3280 res_blocks
= selected_result
+ 1;
3281 res_lines
= DIV_ROUND_UP(selected_result
, plane_blocks_per_line
);
3283 if (level
>= 1 && level
<= 7) {
3284 if (fb
->modifier
[0] == I915_FORMAT_MOD_Y_TILED
||
3285 fb
->modifier
[0] == I915_FORMAT_MOD_Yf_TILED
)
3291 if (res_blocks
>= ddb_allocation
|| res_lines
> 31)
3294 *out_blocks
= res_blocks
;
3295 *out_lines
= res_lines
;
3300 static void skl_compute_wm_level(const struct drm_i915_private
*dev_priv
,
3301 struct skl_ddb_allocation
*ddb
,
3302 struct intel_crtc_state
*cstate
,
3304 struct skl_wm_level
*result
)
3306 struct drm_device
*dev
= dev_priv
->dev
;
3307 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
3308 struct intel_plane
*intel_plane
;
3309 uint16_t ddb_blocks
;
3310 enum pipe pipe
= intel_crtc
->pipe
;
3312 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
3313 int i
= skl_wm_plane_id(intel_plane
);
3315 ddb_blocks
= skl_ddb_entry_size(&ddb
->plane
[pipe
][i
]);
3317 result
->plane_en
[i
] = skl_compute_plane_wm(dev_priv
,
3322 &result
->plane_res_b
[i
],
3323 &result
->plane_res_l
[i
]);
3328 skl_compute_linetime_wm(struct intel_crtc_state
*cstate
)
3330 if (!cstate
->base
.active
)
3333 if (WARN_ON(skl_pipe_pixel_rate(cstate
) == 0))
3336 return DIV_ROUND_UP(8 * cstate
->base
.adjusted_mode
.crtc_htotal
* 1000,
3337 skl_pipe_pixel_rate(cstate
));
3340 static void skl_compute_transition_wm(struct intel_crtc_state
*cstate
,
3341 struct skl_wm_level
*trans_wm
/* out */)
3343 struct drm_crtc
*crtc
= cstate
->base
.crtc
;
3344 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3345 struct intel_plane
*intel_plane
;
3347 if (!cstate
->base
.active
)
3350 /* Until we know more, just disable transition WMs */
3351 for_each_intel_plane_on_crtc(crtc
->dev
, intel_crtc
, intel_plane
) {
3352 int i
= skl_wm_plane_id(intel_plane
);
3354 trans_wm
->plane_en
[i
] = false;
3358 static void skl_compute_pipe_wm(struct intel_crtc_state
*cstate
,
3359 struct skl_ddb_allocation
*ddb
,
3360 struct skl_pipe_wm
*pipe_wm
)
3362 struct drm_device
*dev
= cstate
->base
.crtc
->dev
;
3363 const struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3364 int level
, max_level
= ilk_wm_max_level(dev
);
3366 for (level
= 0; level
<= max_level
; level
++) {
3367 skl_compute_wm_level(dev_priv
, ddb
, cstate
,
3368 level
, &pipe_wm
->wm
[level
]);
3370 pipe_wm
->linetime
= skl_compute_linetime_wm(cstate
);
3372 skl_compute_transition_wm(cstate
, &pipe_wm
->trans_wm
);
3375 static void skl_compute_wm_results(struct drm_device
*dev
,
3376 struct skl_pipe_wm
*p_wm
,
3377 struct skl_wm_values
*r
,
3378 struct intel_crtc
*intel_crtc
)
3380 int level
, max_level
= ilk_wm_max_level(dev
);
3381 enum pipe pipe
= intel_crtc
->pipe
;
3385 for (level
= 0; level
<= max_level
; level
++) {
3386 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
3389 temp
|= p_wm
->wm
[level
].plane_res_l
[i
] <<
3390 PLANE_WM_LINES_SHIFT
;
3391 temp
|= p_wm
->wm
[level
].plane_res_b
[i
];
3392 if (p_wm
->wm
[level
].plane_en
[i
])
3393 temp
|= PLANE_WM_EN
;
3395 r
->plane
[pipe
][i
][level
] = temp
;
3400 temp
|= p_wm
->wm
[level
].plane_res_l
[PLANE_CURSOR
] << PLANE_WM_LINES_SHIFT
;
3401 temp
|= p_wm
->wm
[level
].plane_res_b
[PLANE_CURSOR
];
3403 if (p_wm
->wm
[level
].plane_en
[PLANE_CURSOR
])
3404 temp
|= PLANE_WM_EN
;
3406 r
->plane
[pipe
][PLANE_CURSOR
][level
] = temp
;
3410 /* transition WMs */
3411 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
3413 temp
|= p_wm
->trans_wm
.plane_res_l
[i
] << PLANE_WM_LINES_SHIFT
;
3414 temp
|= p_wm
->trans_wm
.plane_res_b
[i
];
3415 if (p_wm
->trans_wm
.plane_en
[i
])
3416 temp
|= PLANE_WM_EN
;
3418 r
->plane_trans
[pipe
][i
] = temp
;
3422 temp
|= p_wm
->trans_wm
.plane_res_l
[PLANE_CURSOR
] << PLANE_WM_LINES_SHIFT
;
3423 temp
|= p_wm
->trans_wm
.plane_res_b
[PLANE_CURSOR
];
3424 if (p_wm
->trans_wm
.plane_en
[PLANE_CURSOR
])
3425 temp
|= PLANE_WM_EN
;
3427 r
->plane_trans
[pipe
][PLANE_CURSOR
] = temp
;
3429 r
->wm_linetime
[pipe
] = p_wm
->linetime
;
3432 static void skl_ddb_entry_write(struct drm_i915_private
*dev_priv
,
3434 const struct skl_ddb_entry
*entry
)
3437 I915_WRITE(reg
, (entry
->end
- 1) << 16 | entry
->start
);
3442 static void skl_write_wm_values(struct drm_i915_private
*dev_priv
,
3443 const struct skl_wm_values
*new)
3445 struct drm_device
*dev
= dev_priv
->dev
;
3446 struct intel_crtc
*crtc
;
3448 for_each_intel_crtc(dev
, crtc
) {
3449 int i
, level
, max_level
= ilk_wm_max_level(dev
);
3450 enum pipe pipe
= crtc
->pipe
;
3452 if (!new->dirty
[pipe
])
3455 I915_WRITE(PIPE_WM_LINETIME(pipe
), new->wm_linetime
[pipe
]);
3457 for (level
= 0; level
<= max_level
; level
++) {
3458 for (i
= 0; i
< intel_num_planes(crtc
); i
++)
3459 I915_WRITE(PLANE_WM(pipe
, i
, level
),
3460 new->plane
[pipe
][i
][level
]);
3461 I915_WRITE(CUR_WM(pipe
, level
),
3462 new->plane
[pipe
][PLANE_CURSOR
][level
]);
3464 for (i
= 0; i
< intel_num_planes(crtc
); i
++)
3465 I915_WRITE(PLANE_WM_TRANS(pipe
, i
),
3466 new->plane_trans
[pipe
][i
]);
3467 I915_WRITE(CUR_WM_TRANS(pipe
),
3468 new->plane_trans
[pipe
][PLANE_CURSOR
]);
3470 for (i
= 0; i
< intel_num_planes(crtc
); i
++) {
3471 skl_ddb_entry_write(dev_priv
,
3472 PLANE_BUF_CFG(pipe
, i
),
3473 &new->ddb
.plane
[pipe
][i
]);
3474 skl_ddb_entry_write(dev_priv
,
3475 PLANE_NV12_BUF_CFG(pipe
, i
),
3476 &new->ddb
.y_plane
[pipe
][i
]);
3479 skl_ddb_entry_write(dev_priv
, CUR_BUF_CFG(pipe
),
3480 &new->ddb
.plane
[pipe
][PLANE_CURSOR
]);
3485 * When setting up a new DDB allocation arrangement, we need to correctly
3486 * sequence the times at which the new allocations for the pipes are taken into
3487 * account or we'll have pipes fetching from space previously allocated to
3490 * Roughly the sequence looks like:
3491 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3492 * overlapping with a previous light-up pipe (another way to put it is:
3493 * pipes with their new allocation strickly included into their old ones).
3494 * 2. re-allocate the other pipes that get their allocation reduced
3495 * 3. allocate the pipes having their allocation increased
3497 * Steps 1. and 2. are here to take care of the following case:
3498 * - Initially DDB looks like this:
3501 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3505 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3509 skl_wm_flush_pipe(struct drm_i915_private
*dev_priv
, enum pipe pipe
, int pass
)
3513 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe
), pass
);
3515 for_each_plane(dev_priv
, pipe
, plane
) {
3516 I915_WRITE(PLANE_SURF(pipe
, plane
),
3517 I915_READ(PLANE_SURF(pipe
, plane
)));
3519 I915_WRITE(CURBASE(pipe
), I915_READ(CURBASE(pipe
)));
3523 skl_ddb_allocation_included(const struct skl_ddb_allocation
*old
,
3524 const struct skl_ddb_allocation
*new,
3527 uint16_t old_size
, new_size
;
3529 old_size
= skl_ddb_entry_size(&old
->pipe
[pipe
]);
3530 new_size
= skl_ddb_entry_size(&new->pipe
[pipe
]);
3532 return old_size
!= new_size
&&
3533 new->pipe
[pipe
].start
>= old
->pipe
[pipe
].start
&&
3534 new->pipe
[pipe
].end
<= old
->pipe
[pipe
].end
;
3537 static void skl_flush_wm_values(struct drm_i915_private
*dev_priv
,
3538 struct skl_wm_values
*new_values
)
3540 struct drm_device
*dev
= dev_priv
->dev
;
3541 struct skl_ddb_allocation
*cur_ddb
, *new_ddb
;
3542 bool reallocated
[I915_MAX_PIPES
] = {};
3543 struct intel_crtc
*crtc
;
3546 new_ddb
= &new_values
->ddb
;
3547 cur_ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
3550 * First pass: flush the pipes with the new allocation contained into
3553 * We'll wait for the vblank on those pipes to ensure we can safely
3554 * re-allocate the freed space without this pipe fetching from it.
3556 for_each_intel_crtc(dev
, crtc
) {
3562 if (!skl_ddb_allocation_included(cur_ddb
, new_ddb
, pipe
))
3565 skl_wm_flush_pipe(dev_priv
, pipe
, 1);
3566 intel_wait_for_vblank(dev
, pipe
);
3568 reallocated
[pipe
] = true;
3573 * Second pass: flush the pipes that are having their allocation
3574 * reduced, but overlapping with a previous allocation.
3576 * Here as well we need to wait for the vblank to make sure the freed
3577 * space is not used anymore.
3579 for_each_intel_crtc(dev
, crtc
) {
3585 if (reallocated
[pipe
])
3588 if (skl_ddb_entry_size(&new_ddb
->pipe
[pipe
]) <
3589 skl_ddb_entry_size(&cur_ddb
->pipe
[pipe
])) {
3590 skl_wm_flush_pipe(dev_priv
, pipe
, 2);
3591 intel_wait_for_vblank(dev
, pipe
);
3592 reallocated
[pipe
] = true;
3597 * Third pass: flush the pipes that got more space allocated.
3599 * We don't need to actively wait for the update here, next vblank
3600 * will just get more DDB space with the correct WM values.
3602 for_each_intel_crtc(dev
, crtc
) {
3609 * At this point, only the pipes more space than before are
3610 * left to re-allocate.
3612 if (reallocated
[pipe
])
3615 skl_wm_flush_pipe(dev_priv
, pipe
, 3);
3619 static bool skl_update_pipe_wm(struct drm_crtc
*crtc
,
3620 struct skl_ddb_allocation
*ddb
, /* out */
3621 struct skl_pipe_wm
*pipe_wm
/* out */)
3623 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3624 struct intel_crtc_state
*cstate
= to_intel_crtc_state(crtc
->state
);
3626 skl_allocate_pipe_ddb(cstate
, ddb
);
3627 skl_compute_pipe_wm(cstate
, ddb
, pipe_wm
);
3629 if (!memcmp(&intel_crtc
->wm
.active
.skl
, pipe_wm
, sizeof(*pipe_wm
)))
3632 intel_crtc
->wm
.active
.skl
= *pipe_wm
;
3637 static void skl_update_other_pipe_wm(struct drm_device
*dev
,
3638 struct drm_crtc
*crtc
,
3639 struct skl_wm_values
*r
)
3641 struct intel_crtc
*intel_crtc
;
3642 struct intel_crtc
*this_crtc
= to_intel_crtc(crtc
);
3645 * If the WM update hasn't changed the allocation for this_crtc (the
3646 * crtc we are currently computing the new WM values for), other
3647 * enabled crtcs will keep the same allocation and we don't need to
3648 * recompute anything for them.
3650 if (!skl_ddb_allocation_changed(&r
->ddb
, this_crtc
))
3654 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3655 * other active pipes need new DDB allocation and WM values.
3657 for_each_intel_crtc(dev
, intel_crtc
) {
3658 struct skl_pipe_wm pipe_wm
= {};
3661 if (this_crtc
->pipe
== intel_crtc
->pipe
)
3664 if (!intel_crtc
->active
)
3667 wm_changed
= skl_update_pipe_wm(&intel_crtc
->base
,
3671 * If we end up re-computing the other pipe WM values, it's
3672 * because it was really needed, so we expect the WM values to
3675 WARN_ON(!wm_changed
);
3677 skl_compute_wm_results(dev
, &pipe_wm
, r
, intel_crtc
);
3678 r
->dirty
[intel_crtc
->pipe
] = true;
3682 static void skl_clear_wm(struct skl_wm_values
*watermarks
, enum pipe pipe
)
3684 watermarks
->wm_linetime
[pipe
] = 0;
3685 memset(watermarks
->plane
[pipe
], 0,
3686 sizeof(uint32_t) * 8 * I915_MAX_PLANES
);
3687 memset(watermarks
->plane_trans
[pipe
],
3688 0, sizeof(uint32_t) * I915_MAX_PLANES
);
3689 watermarks
->plane_trans
[pipe
][PLANE_CURSOR
] = 0;
3691 /* Clear ddb entries for pipe */
3692 memset(&watermarks
->ddb
.pipe
[pipe
], 0, sizeof(struct skl_ddb_entry
));
3693 memset(&watermarks
->ddb
.plane
[pipe
], 0,
3694 sizeof(struct skl_ddb_entry
) * I915_MAX_PLANES
);
3695 memset(&watermarks
->ddb
.y_plane
[pipe
], 0,
3696 sizeof(struct skl_ddb_entry
) * I915_MAX_PLANES
);
3697 memset(&watermarks
->ddb
.plane
[pipe
][PLANE_CURSOR
], 0,
3698 sizeof(struct skl_ddb_entry
));
3702 static void skl_update_wm(struct drm_crtc
*crtc
)
3704 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3705 struct drm_device
*dev
= crtc
->dev
;
3706 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3707 struct skl_wm_values
*results
= &dev_priv
->wm
.skl_results
;
3708 struct intel_crtc_state
*cstate
= to_intel_crtc_state(crtc
->state
);
3709 struct skl_pipe_wm
*pipe_wm
= &cstate
->wm
.optimal
.skl
;
3712 /* Clear all dirty flags */
3713 memset(results
->dirty
, 0, sizeof(bool) * I915_MAX_PIPES
);
3715 skl_clear_wm(results
, intel_crtc
->pipe
);
3717 if (!skl_update_pipe_wm(crtc
, &results
->ddb
, pipe_wm
))
3720 skl_compute_wm_results(dev
, pipe_wm
, results
, intel_crtc
);
3721 results
->dirty
[intel_crtc
->pipe
] = true;
3723 skl_update_other_pipe_wm(dev
, crtc
, results
);
3724 skl_write_wm_values(dev_priv
, results
);
3725 skl_flush_wm_values(dev_priv
, results
);
3727 /* store the new configuration */
3728 dev_priv
->wm
.skl_hw
= *results
;
3731 static void ilk_compute_wm_config(struct drm_device
*dev
,
3732 struct intel_wm_config
*config
)
3734 struct intel_crtc
*crtc
;
3736 /* Compute the currently _active_ config */
3737 for_each_intel_crtc(dev
, crtc
) {
3738 const struct intel_pipe_wm
*wm
= &crtc
->wm
.active
.ilk
;
3740 if (!wm
->pipe_enabled
)
3743 config
->sprites_enabled
|= wm
->sprites_enabled
;
3744 config
->sprites_scaled
|= wm
->sprites_scaled
;
3745 config
->num_pipes_active
++;
3749 static void ilk_program_watermarks(struct drm_i915_private
*dev_priv
)
3751 struct drm_device
*dev
= dev_priv
->dev
;
3752 struct intel_pipe_wm lp_wm_1_2
= {}, lp_wm_5_6
= {}, *best_lp_wm
;
3753 struct ilk_wm_maximums max
;
3754 struct intel_wm_config config
= {};
3755 struct ilk_wm_values results
= {};
3756 enum intel_ddb_partitioning partitioning
;
3758 ilk_compute_wm_config(dev
, &config
);
3760 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_1_2
, &max
);
3761 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_1_2
);
3763 /* 5/6 split only in single pipe config on IVB+ */
3764 if (INTEL_INFO(dev
)->gen
>= 7 &&
3765 config
.num_pipes_active
== 1 && config
.sprites_enabled
) {
3766 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_5_6
, &max
);
3767 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_5_6
);
3769 best_lp_wm
= ilk_find_best_result(dev
, &lp_wm_1_2
, &lp_wm_5_6
);
3771 best_lp_wm
= &lp_wm_1_2
;
3774 partitioning
= (best_lp_wm
== &lp_wm_1_2
) ?
3775 INTEL_DDB_PART_1_2
: INTEL_DDB_PART_5_6
;
3777 ilk_compute_wm_results(dev
, best_lp_wm
, partitioning
, &results
);
3779 ilk_write_wm_values(dev_priv
, &results
);
3782 static void ilk_initial_watermarks(struct intel_crtc_state
*cstate
)
3784 struct drm_i915_private
*dev_priv
= to_i915(cstate
->base
.crtc
->dev
);
3785 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
3787 mutex_lock(&dev_priv
->wm
.wm_mutex
);
3788 intel_crtc
->wm
.active
.ilk
= cstate
->wm
.intermediate
;
3789 ilk_program_watermarks(dev_priv
);
3790 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
3793 static void ilk_optimize_watermarks(struct intel_crtc_state
*cstate
)
3795 struct drm_i915_private
*dev_priv
= to_i915(cstate
->base
.crtc
->dev
);
3796 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
3798 mutex_lock(&dev_priv
->wm
.wm_mutex
);
3799 if (cstate
->wm
.need_postvbl_update
) {
3800 intel_crtc
->wm
.active
.ilk
= cstate
->wm
.optimal
.ilk
;
3801 ilk_program_watermarks(dev_priv
);
3803 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
3806 static void skl_pipe_wm_active_state(uint32_t val
,
3807 struct skl_pipe_wm
*active
,
3813 bool is_enabled
= (val
& PLANE_WM_EN
) != 0;
3817 active
->wm
[level
].plane_en
[i
] = is_enabled
;
3818 active
->wm
[level
].plane_res_b
[i
] =
3819 val
& PLANE_WM_BLOCKS_MASK
;
3820 active
->wm
[level
].plane_res_l
[i
] =
3821 (val
>> PLANE_WM_LINES_SHIFT
) &
3822 PLANE_WM_LINES_MASK
;
3824 active
->wm
[level
].plane_en
[PLANE_CURSOR
] = is_enabled
;
3825 active
->wm
[level
].plane_res_b
[PLANE_CURSOR
] =
3826 val
& PLANE_WM_BLOCKS_MASK
;
3827 active
->wm
[level
].plane_res_l
[PLANE_CURSOR
] =
3828 (val
>> PLANE_WM_LINES_SHIFT
) &
3829 PLANE_WM_LINES_MASK
;
3833 active
->trans_wm
.plane_en
[i
] = is_enabled
;
3834 active
->trans_wm
.plane_res_b
[i
] =
3835 val
& PLANE_WM_BLOCKS_MASK
;
3836 active
->trans_wm
.plane_res_l
[i
] =
3837 (val
>> PLANE_WM_LINES_SHIFT
) &
3838 PLANE_WM_LINES_MASK
;
3840 active
->trans_wm
.plane_en
[PLANE_CURSOR
] = is_enabled
;
3841 active
->trans_wm
.plane_res_b
[PLANE_CURSOR
] =
3842 val
& PLANE_WM_BLOCKS_MASK
;
3843 active
->trans_wm
.plane_res_l
[PLANE_CURSOR
] =
3844 (val
>> PLANE_WM_LINES_SHIFT
) &
3845 PLANE_WM_LINES_MASK
;
3850 static void skl_pipe_wm_get_hw_state(struct drm_crtc
*crtc
)
3852 struct drm_device
*dev
= crtc
->dev
;
3853 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3854 struct skl_wm_values
*hw
= &dev_priv
->wm
.skl_hw
;
3855 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3856 struct intel_crtc_state
*cstate
= to_intel_crtc_state(crtc
->state
);
3857 struct skl_pipe_wm
*active
= &cstate
->wm
.optimal
.skl
;
3858 enum pipe pipe
= intel_crtc
->pipe
;
3859 int level
, i
, max_level
;
3862 max_level
= ilk_wm_max_level(dev
);
3864 hw
->wm_linetime
[pipe
] = I915_READ(PIPE_WM_LINETIME(pipe
));
3866 for (level
= 0; level
<= max_level
; level
++) {
3867 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++)
3868 hw
->plane
[pipe
][i
][level
] =
3869 I915_READ(PLANE_WM(pipe
, i
, level
));
3870 hw
->plane
[pipe
][PLANE_CURSOR
][level
] = I915_READ(CUR_WM(pipe
, level
));
3873 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++)
3874 hw
->plane_trans
[pipe
][i
] = I915_READ(PLANE_WM_TRANS(pipe
, i
));
3875 hw
->plane_trans
[pipe
][PLANE_CURSOR
] = I915_READ(CUR_WM_TRANS(pipe
));
3877 if (!intel_crtc
->active
)
3880 hw
->dirty
[pipe
] = true;
3882 active
->linetime
= hw
->wm_linetime
[pipe
];
3884 for (level
= 0; level
<= max_level
; level
++) {
3885 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
3886 temp
= hw
->plane
[pipe
][i
][level
];
3887 skl_pipe_wm_active_state(temp
, active
, false,
3890 temp
= hw
->plane
[pipe
][PLANE_CURSOR
][level
];
3891 skl_pipe_wm_active_state(temp
, active
, false, true, i
, level
);
3894 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
3895 temp
= hw
->plane_trans
[pipe
][i
];
3896 skl_pipe_wm_active_state(temp
, active
, true, false, i
, 0);
3899 temp
= hw
->plane_trans
[pipe
][PLANE_CURSOR
];
3900 skl_pipe_wm_active_state(temp
, active
, true, true, i
, 0);
3902 intel_crtc
->wm
.active
.skl
= *active
;
3905 void skl_wm_get_hw_state(struct drm_device
*dev
)
3907 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3908 struct skl_ddb_allocation
*ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
3909 struct drm_crtc
*crtc
;
3911 skl_ddb_get_hw_state(dev_priv
, ddb
);
3912 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
)
3913 skl_pipe_wm_get_hw_state(crtc
);
3916 static void ilk_pipe_wm_get_hw_state(struct drm_crtc
*crtc
)
3918 struct drm_device
*dev
= crtc
->dev
;
3919 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3920 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
3921 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3922 struct intel_crtc_state
*cstate
= to_intel_crtc_state(crtc
->state
);
3923 struct intel_pipe_wm
*active
= &cstate
->wm
.optimal
.ilk
;
3924 enum pipe pipe
= intel_crtc
->pipe
;
3925 static const i915_reg_t wm0_pipe_reg
[] = {
3926 [PIPE_A
] = WM0_PIPEA_ILK
,
3927 [PIPE_B
] = WM0_PIPEB_ILK
,
3928 [PIPE_C
] = WM0_PIPEC_IVB
,
3931 hw
->wm_pipe
[pipe
] = I915_READ(wm0_pipe_reg
[pipe
]);
3932 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
3933 hw
->wm_linetime
[pipe
] = I915_READ(PIPE_WM_LINETIME(pipe
));
3935 memset(active
, 0, sizeof(*active
));
3937 active
->pipe_enabled
= intel_crtc
->active
;
3939 if (active
->pipe_enabled
) {
3940 u32 tmp
= hw
->wm_pipe
[pipe
];
3943 * For active pipes LP0 watermark is marked as
3944 * enabled, and LP1+ watermaks as disabled since
3945 * we can't really reverse compute them in case
3946 * multiple pipes are active.
3948 active
->wm
[0].enable
= true;
3949 active
->wm
[0].pri_val
= (tmp
& WM0_PIPE_PLANE_MASK
) >> WM0_PIPE_PLANE_SHIFT
;
3950 active
->wm
[0].spr_val
= (tmp
& WM0_PIPE_SPRITE_MASK
) >> WM0_PIPE_SPRITE_SHIFT
;
3951 active
->wm
[0].cur_val
= tmp
& WM0_PIPE_CURSOR_MASK
;
3952 active
->linetime
= hw
->wm_linetime
[pipe
];
3954 int level
, max_level
= ilk_wm_max_level(dev
);
3957 * For inactive pipes, all watermark levels
3958 * should be marked as enabled but zeroed,
3959 * which is what we'd compute them to.
3961 for (level
= 0; level
<= max_level
; level
++)
3962 active
->wm
[level
].enable
= true;
3965 intel_crtc
->wm
.active
.ilk
= *active
;
3968 #define _FW_WM(value, plane) \
3969 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
3970 #define _FW_WM_VLV(value, plane) \
3971 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
3973 static void vlv_read_wm_values(struct drm_i915_private
*dev_priv
,
3974 struct vlv_wm_values
*wm
)
3979 for_each_pipe(dev_priv
, pipe
) {
3980 tmp
= I915_READ(VLV_DDL(pipe
));
3982 wm
->ddl
[pipe
].primary
=
3983 (tmp
>> DDL_PLANE_SHIFT
) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
3984 wm
->ddl
[pipe
].cursor
=
3985 (tmp
>> DDL_CURSOR_SHIFT
) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
3986 wm
->ddl
[pipe
].sprite
[0] =
3987 (tmp
>> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
3988 wm
->ddl
[pipe
].sprite
[1] =
3989 (tmp
>> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
3992 tmp
= I915_READ(DSPFW1
);
3993 wm
->sr
.plane
= _FW_WM(tmp
, SR
);
3994 wm
->pipe
[PIPE_B
].cursor
= _FW_WM(tmp
, CURSORB
);
3995 wm
->pipe
[PIPE_B
].primary
= _FW_WM_VLV(tmp
, PLANEB
);
3996 wm
->pipe
[PIPE_A
].primary
= _FW_WM_VLV(tmp
, PLANEA
);
3998 tmp
= I915_READ(DSPFW2
);
3999 wm
->pipe
[PIPE_A
].sprite
[1] = _FW_WM_VLV(tmp
, SPRITEB
);
4000 wm
->pipe
[PIPE_A
].cursor
= _FW_WM(tmp
, CURSORA
);
4001 wm
->pipe
[PIPE_A
].sprite
[0] = _FW_WM_VLV(tmp
, SPRITEA
);
4003 tmp
= I915_READ(DSPFW3
);
4004 wm
->sr
.cursor
= _FW_WM(tmp
, CURSOR_SR
);
4006 if (IS_CHERRYVIEW(dev_priv
)) {
4007 tmp
= I915_READ(DSPFW7_CHV
);
4008 wm
->pipe
[PIPE_B
].sprite
[1] = _FW_WM_VLV(tmp
, SPRITED
);
4009 wm
->pipe
[PIPE_B
].sprite
[0] = _FW_WM_VLV(tmp
, SPRITEC
);
4011 tmp
= I915_READ(DSPFW8_CHV
);
4012 wm
->pipe
[PIPE_C
].sprite
[1] = _FW_WM_VLV(tmp
, SPRITEF
);
4013 wm
->pipe
[PIPE_C
].sprite
[0] = _FW_WM_VLV(tmp
, SPRITEE
);
4015 tmp
= I915_READ(DSPFW9_CHV
);
4016 wm
->pipe
[PIPE_C
].primary
= _FW_WM_VLV(tmp
, PLANEC
);
4017 wm
->pipe
[PIPE_C
].cursor
= _FW_WM(tmp
, CURSORC
);
4019 tmp
= I915_READ(DSPHOWM
);
4020 wm
->sr
.plane
|= _FW_WM(tmp
, SR_HI
) << 9;
4021 wm
->pipe
[PIPE_C
].sprite
[1] |= _FW_WM(tmp
, SPRITEF_HI
) << 8;
4022 wm
->pipe
[PIPE_C
].sprite
[0] |= _FW_WM(tmp
, SPRITEE_HI
) << 8;
4023 wm
->pipe
[PIPE_C
].primary
|= _FW_WM(tmp
, PLANEC_HI
) << 8;
4024 wm
->pipe
[PIPE_B
].sprite
[1] |= _FW_WM(tmp
, SPRITED_HI
) << 8;
4025 wm
->pipe
[PIPE_B
].sprite
[0] |= _FW_WM(tmp
, SPRITEC_HI
) << 8;
4026 wm
->pipe
[PIPE_B
].primary
|= _FW_WM(tmp
, PLANEB_HI
) << 8;
4027 wm
->pipe
[PIPE_A
].sprite
[1] |= _FW_WM(tmp
, SPRITEB_HI
) << 8;
4028 wm
->pipe
[PIPE_A
].sprite
[0] |= _FW_WM(tmp
, SPRITEA_HI
) << 8;
4029 wm
->pipe
[PIPE_A
].primary
|= _FW_WM(tmp
, PLANEA_HI
) << 8;
4031 tmp
= I915_READ(DSPFW7
);
4032 wm
->pipe
[PIPE_B
].sprite
[1] = _FW_WM_VLV(tmp
, SPRITED
);
4033 wm
->pipe
[PIPE_B
].sprite
[0] = _FW_WM_VLV(tmp
, SPRITEC
);
4035 tmp
= I915_READ(DSPHOWM
);
4036 wm
->sr
.plane
|= _FW_WM(tmp
, SR_HI
) << 9;
4037 wm
->pipe
[PIPE_B
].sprite
[1] |= _FW_WM(tmp
, SPRITED_HI
) << 8;
4038 wm
->pipe
[PIPE_B
].sprite
[0] |= _FW_WM(tmp
, SPRITEC_HI
) << 8;
4039 wm
->pipe
[PIPE_B
].primary
|= _FW_WM(tmp
, PLANEB_HI
) << 8;
4040 wm
->pipe
[PIPE_A
].sprite
[1] |= _FW_WM(tmp
, SPRITEB_HI
) << 8;
4041 wm
->pipe
[PIPE_A
].sprite
[0] |= _FW_WM(tmp
, SPRITEA_HI
) << 8;
4042 wm
->pipe
[PIPE_A
].primary
|= _FW_WM(tmp
, PLANEA_HI
) << 8;
4049 void vlv_wm_get_hw_state(struct drm_device
*dev
)
4051 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4052 struct vlv_wm_values
*wm
= &dev_priv
->wm
.vlv
;
4053 struct intel_plane
*plane
;
4057 vlv_read_wm_values(dev_priv
, wm
);
4059 for_each_intel_plane(dev
, plane
) {
4060 switch (plane
->base
.type
) {
4062 case DRM_PLANE_TYPE_CURSOR
:
4063 plane
->wm
.fifo_size
= 63;
4065 case DRM_PLANE_TYPE_PRIMARY
:
4066 plane
->wm
.fifo_size
= vlv_get_fifo_size(dev
, plane
->pipe
, 0);
4068 case DRM_PLANE_TYPE_OVERLAY
:
4069 sprite
= plane
->plane
;
4070 plane
->wm
.fifo_size
= vlv_get_fifo_size(dev
, plane
->pipe
, sprite
+ 1);
4075 wm
->cxsr
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
4076 wm
->level
= VLV_WM_LEVEL_PM2
;
4078 if (IS_CHERRYVIEW(dev_priv
)) {
4079 mutex_lock(&dev_priv
->rps
.hw_lock
);
4081 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
);
4082 if (val
& DSP_MAXFIFO_PM5_ENABLE
)
4083 wm
->level
= VLV_WM_LEVEL_PM5
;
4086 * If DDR DVFS is disabled in the BIOS, Punit
4087 * will never ack the request. So if that happens
4088 * assume we don't have to enable/disable DDR DVFS
4089 * dynamically. To test that just set the REQ_ACK
4090 * bit to poke the Punit, but don't change the
4091 * HIGH/LOW bits so that we don't actually change
4092 * the current state.
4094 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
4095 val
|= FORCE_DDR_FREQ_REQ_ACK
;
4096 vlv_punit_write(dev_priv
, PUNIT_REG_DDR_SETUP2
, val
);
4098 if (wait_for((vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
) &
4099 FORCE_DDR_FREQ_REQ_ACK
) == 0, 3)) {
4100 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4101 "assuming DDR DVFS is disabled\n");
4102 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_PM5
;
4104 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
4105 if ((val
& FORCE_DDR_HIGH_FREQ
) == 0)
4106 wm
->level
= VLV_WM_LEVEL_DDR_DVFS
;
4109 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4112 for_each_pipe(dev_priv
, pipe
)
4113 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4114 pipe_name(pipe
), wm
->pipe
[pipe
].primary
, wm
->pipe
[pipe
].cursor
,
4115 wm
->pipe
[pipe
].sprite
[0], wm
->pipe
[pipe
].sprite
[1]);
4117 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4118 wm
->sr
.plane
, wm
->sr
.cursor
, wm
->level
, wm
->cxsr
);
4121 void ilk_wm_get_hw_state(struct drm_device
*dev
)
4123 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4124 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
4125 struct drm_crtc
*crtc
;
4127 for_each_crtc(dev
, crtc
)
4128 ilk_pipe_wm_get_hw_state(crtc
);
4130 hw
->wm_lp
[0] = I915_READ(WM1_LP_ILK
);
4131 hw
->wm_lp
[1] = I915_READ(WM2_LP_ILK
);
4132 hw
->wm_lp
[2] = I915_READ(WM3_LP_ILK
);
4134 hw
->wm_lp_spr
[0] = I915_READ(WM1S_LP_ILK
);
4135 if (INTEL_INFO(dev
)->gen
>= 7) {
4136 hw
->wm_lp_spr
[1] = I915_READ(WM2S_LP_IVB
);
4137 hw
->wm_lp_spr
[2] = I915_READ(WM3S_LP_IVB
);
4140 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
4141 hw
->partitioning
= (I915_READ(WM_MISC
) & WM_MISC_DATA_PARTITION_5_6
) ?
4142 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
4143 else if (IS_IVYBRIDGE(dev
))
4144 hw
->partitioning
= (I915_READ(DISP_ARB_CTL2
) & DISP_DATA_PARTITION_5_6
) ?
4145 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
4148 !(I915_READ(DISP_ARB_CTL
) & DISP_FBC_WM_DIS
);
4152 * intel_update_watermarks - update FIFO watermark values based on current modes
4154 * Calculate watermark values for the various WM regs based on current mode
4155 * and plane configuration.
4157 * There are several cases to deal with here:
4158 * - normal (i.e. non-self-refresh)
4159 * - self-refresh (SR) mode
4160 * - lines are large relative to FIFO size (buffer can hold up to 2)
4161 * - lines are small relative to FIFO size (buffer can hold more than 2
4162 * lines), so need to account for TLB latency
4164 * The normal calculation is:
4165 * watermark = dotclock * bytes per pixel * latency
4166 * where latency is platform & configuration dependent (we assume pessimal
4169 * The SR calculation is:
4170 * watermark = (trunc(latency/line time)+1) * surface width *
4173 * line time = htotal / dotclock
4174 * surface width = hdisplay for normal plane and 64 for cursor
4175 * and latency is assumed to be high, as above.
4177 * The final value programmed to the register should always be rounded up,
4178 * and include an extra 2 entries to account for clock crossings.
4180 * We don't use the sprite, so we can ignore that. And on Crestline we have
4181 * to set the non-SR watermarks to 8.
4183 void intel_update_watermarks(struct drm_crtc
*crtc
)
4185 struct drm_i915_private
*dev_priv
= crtc
->dev
->dev_private
;
4187 if (dev_priv
->display
.update_wm
)
4188 dev_priv
->display
.update_wm(crtc
);
4192 * Lock protecting IPS related data structures
4194 DEFINE_SPINLOCK(mchdev_lock
);
4196 /* Global for IPS driver to get at the current i915 device. Protected by
4198 static struct drm_i915_private
*i915_mch_dev
;
4200 bool ironlake_set_drps(struct drm_device
*dev
, u8 val
)
4202 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4205 assert_spin_locked(&mchdev_lock
);
4207 rgvswctl
= I915_READ16(MEMSWCTL
);
4208 if (rgvswctl
& MEMCTL_CMD_STS
) {
4209 DRM_DEBUG("gpu busy, RCS change rejected\n");
4210 return false; /* still busy with another command */
4213 rgvswctl
= (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
4214 (val
<< MEMCTL_FREQ_SHIFT
) | MEMCTL_SFCAVM
;
4215 I915_WRITE16(MEMSWCTL
, rgvswctl
);
4216 POSTING_READ16(MEMSWCTL
);
4218 rgvswctl
|= MEMCTL_CMD_STS
;
4219 I915_WRITE16(MEMSWCTL
, rgvswctl
);
4224 static void ironlake_enable_drps(struct drm_device
*dev
)
4226 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4228 u8 fmax
, fmin
, fstart
, vstart
;
4230 spin_lock_irq(&mchdev_lock
);
4232 rgvmodectl
= I915_READ(MEMMODECTL
);
4234 /* Enable temp reporting */
4235 I915_WRITE16(PMMISC
, I915_READ(PMMISC
) | MCPPCE_EN
);
4236 I915_WRITE16(TSC1
, I915_READ(TSC1
) | TSE
);
4238 /* 100ms RC evaluation intervals */
4239 I915_WRITE(RCUPEI
, 100000);
4240 I915_WRITE(RCDNEI
, 100000);
4242 /* Set max/min thresholds to 90ms and 80ms respectively */
4243 I915_WRITE(RCBMAXAVG
, 90000);
4244 I915_WRITE(RCBMINAVG
, 80000);
4246 I915_WRITE(MEMIHYST
, 1);
4248 /* Set up min, max, and cur for interrupt handling */
4249 fmax
= (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
;
4250 fmin
= (rgvmodectl
& MEMMODE_FMIN_MASK
);
4251 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
4252 MEMMODE_FSTART_SHIFT
;
4254 vstart
= (I915_READ(PXVFREQ(fstart
)) & PXVFREQ_PX_MASK
) >>
4257 dev_priv
->ips
.fmax
= fmax
; /* IPS callback will increase this */
4258 dev_priv
->ips
.fstart
= fstart
;
4260 dev_priv
->ips
.max_delay
= fstart
;
4261 dev_priv
->ips
.min_delay
= fmin
;
4262 dev_priv
->ips
.cur_delay
= fstart
;
4264 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4265 fmax
, fmin
, fstart
);
4267 I915_WRITE(MEMINTREN
, MEMINT_CX_SUPR_EN
| MEMINT_EVAL_CHG_EN
);
4270 * Interrupts will be enabled in ironlake_irq_postinstall
4273 I915_WRITE(VIDSTART
, vstart
);
4274 POSTING_READ(VIDSTART
);
4276 rgvmodectl
|= MEMMODE_SWMODE_EN
;
4277 I915_WRITE(MEMMODECTL
, rgvmodectl
);
4279 if (wait_for_atomic((I915_READ(MEMSWCTL
) & MEMCTL_CMD_STS
) == 0, 10))
4280 DRM_ERROR("stuck trying to change perf mode\n");
4283 ironlake_set_drps(dev
, fstart
);
4285 dev_priv
->ips
.last_count1
= I915_READ(DMIEC
) +
4286 I915_READ(DDREC
) + I915_READ(CSIEC
);
4287 dev_priv
->ips
.last_time1
= jiffies_to_msecs(jiffies
);
4288 dev_priv
->ips
.last_count2
= I915_READ(GFXEC
);
4289 dev_priv
->ips
.last_time2
= ktime_get_raw_ns();
4291 spin_unlock_irq(&mchdev_lock
);
4294 static void ironlake_disable_drps(struct drm_device
*dev
)
4296 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4299 spin_lock_irq(&mchdev_lock
);
4301 rgvswctl
= I915_READ16(MEMSWCTL
);
4303 /* Ack interrupts, disable EFC interrupt */
4304 I915_WRITE(MEMINTREN
, I915_READ(MEMINTREN
) & ~MEMINT_EVAL_CHG_EN
);
4305 I915_WRITE(MEMINTRSTS
, MEMINT_EVAL_CHG
);
4306 I915_WRITE(DEIER
, I915_READ(DEIER
) & ~DE_PCU_EVENT
);
4307 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
4308 I915_WRITE(DEIMR
, I915_READ(DEIMR
) | DE_PCU_EVENT
);
4310 /* Go back to the starting frequency */
4311 ironlake_set_drps(dev
, dev_priv
->ips
.fstart
);
4313 rgvswctl
|= MEMCTL_CMD_STS
;
4314 I915_WRITE(MEMSWCTL
, rgvswctl
);
4317 spin_unlock_irq(&mchdev_lock
);
4320 /* There's a funny hw issue where the hw returns all 0 when reading from
4321 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4322 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4323 * all limits and the gpu stuck at whatever frequency it is at atm).
4325 static u32
intel_rps_limits(struct drm_i915_private
*dev_priv
, u8 val
)
4329 /* Only set the down limit when we've reached the lowest level to avoid
4330 * getting more interrupts, otherwise leave this clear. This prevents a
4331 * race in the hw when coming out of rc6: There's a tiny window where
4332 * the hw runs at the minimal clock before selecting the desired
4333 * frequency, if the down threshold expires in that window we will not
4334 * receive a down interrupt. */
4335 if (IS_GEN9(dev_priv
)) {
4336 limits
= (dev_priv
->rps
.max_freq_softlimit
) << 23;
4337 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
4338 limits
|= (dev_priv
->rps
.min_freq_softlimit
) << 14;
4340 limits
= dev_priv
->rps
.max_freq_softlimit
<< 24;
4341 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
4342 limits
|= dev_priv
->rps
.min_freq_softlimit
<< 16;
4348 static void gen6_set_rps_thresholds(struct drm_i915_private
*dev_priv
, u8 val
)
4351 u32 threshold_up
= 0, threshold_down
= 0; /* in % */
4352 u32 ei_up
= 0, ei_down
= 0;
4354 new_power
= dev_priv
->rps
.power
;
4355 switch (dev_priv
->rps
.power
) {
4357 if (val
> dev_priv
->rps
.efficient_freq
+ 1 && val
> dev_priv
->rps
.cur_freq
)
4358 new_power
= BETWEEN
;
4362 if (val
<= dev_priv
->rps
.efficient_freq
&& val
< dev_priv
->rps
.cur_freq
)
4363 new_power
= LOW_POWER
;
4364 else if (val
>= dev_priv
->rps
.rp0_freq
&& val
> dev_priv
->rps
.cur_freq
)
4365 new_power
= HIGH_POWER
;
4369 if (val
< (dev_priv
->rps
.rp1_freq
+ dev_priv
->rps
.rp0_freq
) >> 1 && val
< dev_priv
->rps
.cur_freq
)
4370 new_power
= BETWEEN
;
4373 /* Max/min bins are special */
4374 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
4375 new_power
= LOW_POWER
;
4376 if (val
>= dev_priv
->rps
.max_freq_softlimit
)
4377 new_power
= HIGH_POWER
;
4378 if (new_power
== dev_priv
->rps
.power
)
4381 /* Note the units here are not exactly 1us, but 1280ns. */
4382 switch (new_power
) {
4384 /* Upclock if more than 95% busy over 16ms */
4388 /* Downclock if less than 85% busy over 32ms */
4390 threshold_down
= 85;
4394 /* Upclock if more than 90% busy over 13ms */
4398 /* Downclock if less than 75% busy over 32ms */
4400 threshold_down
= 75;
4404 /* Upclock if more than 85% busy over 10ms */
4408 /* Downclock if less than 60% busy over 32ms */
4410 threshold_down
= 60;
4414 I915_WRITE(GEN6_RP_UP_EI
,
4415 GT_INTERVAL_FROM_US(dev_priv
, ei_up
));
4416 I915_WRITE(GEN6_RP_UP_THRESHOLD
,
4417 GT_INTERVAL_FROM_US(dev_priv
, (ei_up
* threshold_up
/ 100)));
4419 I915_WRITE(GEN6_RP_DOWN_EI
,
4420 GT_INTERVAL_FROM_US(dev_priv
, ei_down
));
4421 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
,
4422 GT_INTERVAL_FROM_US(dev_priv
, (ei_down
* threshold_down
/ 100)));
4424 I915_WRITE(GEN6_RP_CONTROL
,
4425 GEN6_RP_MEDIA_TURBO
|
4426 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
4427 GEN6_RP_MEDIA_IS_GFX
|
4429 GEN6_RP_UP_BUSY_AVG
|
4430 GEN6_RP_DOWN_IDLE_AVG
);
4432 dev_priv
->rps
.power
= new_power
;
4433 dev_priv
->rps
.up_threshold
= threshold_up
;
4434 dev_priv
->rps
.down_threshold
= threshold_down
;
4435 dev_priv
->rps
.last_adj
= 0;
4438 static u32
gen6_rps_pm_mask(struct drm_i915_private
*dev_priv
, u8 val
)
4442 if (val
> dev_priv
->rps
.min_freq_softlimit
)
4443 mask
|= GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_DOWN_THRESHOLD
| GEN6_PM_RP_DOWN_TIMEOUT
;
4444 if (val
< dev_priv
->rps
.max_freq_softlimit
)
4445 mask
|= GEN6_PM_RP_UP_EI_EXPIRED
| GEN6_PM_RP_UP_THRESHOLD
;
4447 mask
&= dev_priv
->pm_rps_events
;
4449 return gen6_sanitize_rps_pm_mask(dev_priv
, ~mask
);
4452 /* gen6_set_rps is called to update the frequency request, but should also be
4453 * called when the range (min_delay and max_delay) is modified so that we can
4454 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4455 static void gen6_set_rps(struct drm_device
*dev
, u8 val
)
4457 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4459 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4460 if (IS_BXT_REVID(dev
, 0, BXT_REVID_A1
))
4463 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4464 WARN_ON(val
> dev_priv
->rps
.max_freq
);
4465 WARN_ON(val
< dev_priv
->rps
.min_freq
);
4467 /* min/max delay may still have been modified so be sure to
4468 * write the limits value.
4470 if (val
!= dev_priv
->rps
.cur_freq
) {
4471 gen6_set_rps_thresholds(dev_priv
, val
);
4474 I915_WRITE(GEN6_RPNSWREQ
,
4475 GEN9_FREQUENCY(val
));
4476 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
4477 I915_WRITE(GEN6_RPNSWREQ
,
4478 HSW_FREQUENCY(val
));
4480 I915_WRITE(GEN6_RPNSWREQ
,
4481 GEN6_FREQUENCY(val
) |
4483 GEN6_AGGRESSIVE_TURBO
);
4486 /* Make sure we continue to get interrupts
4487 * until we hit the minimum or maximum frequencies.
4489 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
, intel_rps_limits(dev_priv
, val
));
4490 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
4492 POSTING_READ(GEN6_RPNSWREQ
);
4494 dev_priv
->rps
.cur_freq
= val
;
4495 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv
, val
));
4498 static void valleyview_set_rps(struct drm_device
*dev
, u8 val
)
4500 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4502 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4503 WARN_ON(val
> dev_priv
->rps
.max_freq
);
4504 WARN_ON(val
< dev_priv
->rps
.min_freq
);
4506 if (WARN_ONCE(IS_CHERRYVIEW(dev
) && (val
& 1),
4507 "Odd GPU freq value\n"))
4510 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
4512 if (val
!= dev_priv
->rps
.cur_freq
) {
4513 vlv_punit_write(dev_priv
, PUNIT_REG_GPU_FREQ_REQ
, val
);
4514 if (!IS_CHERRYVIEW(dev_priv
))
4515 gen6_set_rps_thresholds(dev_priv
, val
);
4518 dev_priv
->rps
.cur_freq
= val
;
4519 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv
, val
));
4522 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
4524 * * If Gfx is Idle, then
4525 * 1. Forcewake Media well.
4526 * 2. Request idle freq.
4527 * 3. Release Forcewake of Media well.
4529 static void vlv_set_rps_idle(struct drm_i915_private
*dev_priv
)
4531 u32 val
= dev_priv
->rps
.idle_freq
;
4533 if (dev_priv
->rps
.cur_freq
<= val
)
4536 /* Wake up the media well, as that takes a lot less
4537 * power than the Render well. */
4538 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_MEDIA
);
4539 valleyview_set_rps(dev_priv
->dev
, val
);
4540 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_MEDIA
);
4543 void gen6_rps_busy(struct drm_i915_private
*dev_priv
)
4545 mutex_lock(&dev_priv
->rps
.hw_lock
);
4546 if (dev_priv
->rps
.enabled
) {
4547 if (dev_priv
->pm_rps_events
& (GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_UP_EI_EXPIRED
))
4548 gen6_rps_reset_ei(dev_priv
);
4549 I915_WRITE(GEN6_PMINTRMSK
,
4550 gen6_rps_pm_mask(dev_priv
, dev_priv
->rps
.cur_freq
));
4552 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4555 void gen6_rps_idle(struct drm_i915_private
*dev_priv
)
4557 struct drm_device
*dev
= dev_priv
->dev
;
4559 mutex_lock(&dev_priv
->rps
.hw_lock
);
4560 if (dev_priv
->rps
.enabled
) {
4561 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
))
4562 vlv_set_rps_idle(dev_priv
);
4564 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.idle_freq
);
4565 dev_priv
->rps
.last_adj
= 0;
4566 I915_WRITE(GEN6_PMINTRMSK
, 0xffffffff);
4568 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4570 spin_lock(&dev_priv
->rps
.client_lock
);
4571 while (!list_empty(&dev_priv
->rps
.clients
))
4572 list_del_init(dev_priv
->rps
.clients
.next
);
4573 spin_unlock(&dev_priv
->rps
.client_lock
);
4576 void gen6_rps_boost(struct drm_i915_private
*dev_priv
,
4577 struct intel_rps_client
*rps
,
4578 unsigned long submitted
)
4580 /* This is intentionally racy! We peek at the state here, then
4581 * validate inside the RPS worker.
4583 if (!(dev_priv
->mm
.busy
&&
4584 dev_priv
->rps
.enabled
&&
4585 dev_priv
->rps
.cur_freq
< dev_priv
->rps
.max_freq_softlimit
))
4588 /* Force a RPS boost (and don't count it against the client) if
4589 * the GPU is severely congested.
4591 if (rps
&& time_after(jiffies
, submitted
+ DRM_I915_THROTTLE_JIFFIES
))
4594 spin_lock(&dev_priv
->rps
.client_lock
);
4595 if (rps
== NULL
|| list_empty(&rps
->link
)) {
4596 spin_lock_irq(&dev_priv
->irq_lock
);
4597 if (dev_priv
->rps
.interrupts_enabled
) {
4598 dev_priv
->rps
.client_boost
= true;
4599 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
4601 spin_unlock_irq(&dev_priv
->irq_lock
);
4604 list_add(&rps
->link
, &dev_priv
->rps
.clients
);
4607 dev_priv
->rps
.boosts
++;
4609 spin_unlock(&dev_priv
->rps
.client_lock
);
4612 void intel_set_rps(struct drm_device
*dev
, u8 val
)
4614 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
))
4615 valleyview_set_rps(dev
, val
);
4617 gen6_set_rps(dev
, val
);
4620 static void gen9_disable_rc6(struct drm_device
*dev
)
4622 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4624 I915_WRITE(GEN6_RC_CONTROL
, 0);
4625 I915_WRITE(GEN9_PG_ENABLE
, 0);
4628 static void gen9_disable_rps(struct drm_device
*dev
)
4630 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4632 I915_WRITE(GEN6_RP_CONTROL
, 0);
4635 static void gen6_disable_rps(struct drm_device
*dev
)
4637 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4639 I915_WRITE(GEN6_RC_CONTROL
, 0);
4640 I915_WRITE(GEN6_RPNSWREQ
, 1 << 31);
4641 I915_WRITE(GEN6_RP_CONTROL
, 0);
4644 static void cherryview_disable_rps(struct drm_device
*dev
)
4646 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4648 I915_WRITE(GEN6_RC_CONTROL
, 0);
4651 static void valleyview_disable_rps(struct drm_device
*dev
)
4653 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4655 /* we're doing forcewake before Disabling RC6,
4656 * This what the BIOS expects when going into suspend */
4657 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
4659 I915_WRITE(GEN6_RC_CONTROL
, 0);
4661 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
4664 static void intel_print_rc6_info(struct drm_device
*dev
, u32 mode
)
4666 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
)) {
4667 if (mode
& (GEN7_RC_CTL_TO_MODE
| GEN6_RC_CTL_EI_MODE(1)))
4668 mode
= GEN6_RC_CTL_RC6_ENABLE
;
4673 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4674 onoff(mode
& GEN6_RC_CTL_RC6_ENABLE
),
4675 onoff(mode
& GEN6_RC_CTL_RC6p_ENABLE
),
4676 onoff(mode
& GEN6_RC_CTL_RC6pp_ENABLE
));
4679 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
4680 onoff(mode
& GEN6_RC_CTL_RC6_ENABLE
));
4683 static bool bxt_check_bios_rc6_setup(const struct drm_device
*dev
)
4685 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4686 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
4687 bool enable_rc6
= true;
4688 unsigned long rc6_ctx_base
;
4690 if (!(I915_READ(RC6_LOCATION
) & RC6_CTX_IN_DRAM
)) {
4691 DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
4696 * The exact context size is not known for BXT, so assume a page size
4699 rc6_ctx_base
= I915_READ(RC6_CTX_BASE
) & RC6_CTX_BASE_MASK
;
4700 if (!((rc6_ctx_base
>= ggtt
->stolen_reserved_base
) &&
4701 (rc6_ctx_base
+ PAGE_SIZE
<= ggtt
->stolen_reserved_base
+
4702 ggtt
->stolen_reserved_size
))) {
4703 DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
4707 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT
) & IDLE_TIME_MASK
) > 1) &&
4708 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0
) & IDLE_TIME_MASK
) > 1) &&
4709 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT
) & IDLE_TIME_MASK
) > 1) &&
4710 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT
) & IDLE_TIME_MASK
) > 1))) {
4711 DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
4715 if (!(I915_READ(GEN6_RC_CONTROL
) & (GEN6_RC_CTL_RC6_ENABLE
|
4716 GEN6_RC_CTL_HW_ENABLE
)) &&
4717 ((I915_READ(GEN6_RC_CONTROL
) & GEN6_RC_CTL_HW_ENABLE
) ||
4718 !(I915_READ(GEN6_RC_STATE
) & RC6_STATE
))) {
4719 DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
4726 int sanitize_rc6_option(const struct drm_device
*dev
, int enable_rc6
)
4728 /* No RC6 before Ironlake and code is gone for ilk. */
4729 if (INTEL_INFO(dev
)->gen
< 6)
4735 if (IS_BROXTON(dev
) && !bxt_check_bios_rc6_setup(dev
)) {
4736 DRM_INFO("RC6 disabled by BIOS\n");
4740 /* Respect the kernel parameter if it is set */
4741 if (enable_rc6
>= 0) {
4745 mask
= INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
|
4748 mask
= INTEL_RC6_ENABLE
;
4750 if ((enable_rc6
& mask
) != enable_rc6
)
4751 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
4752 enable_rc6
& mask
, enable_rc6
, mask
);
4754 return enable_rc6
& mask
;
4757 if (IS_IVYBRIDGE(dev
))
4758 return (INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
);
4760 return INTEL_RC6_ENABLE
;
4763 int intel_enable_rc6(const struct drm_device
*dev
)
4765 return i915
.enable_rc6
;
4768 static void gen6_init_rps_frequencies(struct drm_device
*dev
)
4770 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4771 uint32_t rp_state_cap
;
4772 u32 ddcc_status
= 0;
4775 /* All of these values are in units of 50MHz */
4776 dev_priv
->rps
.cur_freq
= 0;
4777 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
4778 if (IS_BROXTON(dev
)) {
4779 rp_state_cap
= I915_READ(BXT_RP_STATE_CAP
);
4780 dev_priv
->rps
.rp0_freq
= (rp_state_cap
>> 16) & 0xff;
4781 dev_priv
->rps
.rp1_freq
= (rp_state_cap
>> 8) & 0xff;
4782 dev_priv
->rps
.min_freq
= (rp_state_cap
>> 0) & 0xff;
4784 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
4785 dev_priv
->rps
.rp0_freq
= (rp_state_cap
>> 0) & 0xff;
4786 dev_priv
->rps
.rp1_freq
= (rp_state_cap
>> 8) & 0xff;
4787 dev_priv
->rps
.min_freq
= (rp_state_cap
>> 16) & 0xff;
4790 /* hw_max = RP0 until we check for overclocking */
4791 dev_priv
->rps
.max_freq
= dev_priv
->rps
.rp0_freq
;
4793 dev_priv
->rps
.efficient_freq
= dev_priv
->rps
.rp1_freq
;
4794 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
) ||
4795 IS_SKYLAKE(dev
) || IS_KABYLAKE(dev
)) {
4796 ret
= sandybridge_pcode_read(dev_priv
,
4797 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL
,
4800 dev_priv
->rps
.efficient_freq
=
4802 ((ddcc_status
>> 8) & 0xff),
4803 dev_priv
->rps
.min_freq
,
4804 dev_priv
->rps
.max_freq
);
4807 if (IS_SKYLAKE(dev
) || IS_KABYLAKE(dev
)) {
4808 /* Store the frequency values in 16.66 MHZ units, which is
4809 the natural hardware unit for SKL */
4810 dev_priv
->rps
.rp0_freq
*= GEN9_FREQ_SCALER
;
4811 dev_priv
->rps
.rp1_freq
*= GEN9_FREQ_SCALER
;
4812 dev_priv
->rps
.min_freq
*= GEN9_FREQ_SCALER
;
4813 dev_priv
->rps
.max_freq
*= GEN9_FREQ_SCALER
;
4814 dev_priv
->rps
.efficient_freq
*= GEN9_FREQ_SCALER
;
4817 dev_priv
->rps
.idle_freq
= dev_priv
->rps
.min_freq
;
4819 /* Preserve min/max settings in case of re-init */
4820 if (dev_priv
->rps
.max_freq_softlimit
== 0)
4821 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
4823 if (dev_priv
->rps
.min_freq_softlimit
== 0) {
4824 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
4825 dev_priv
->rps
.min_freq_softlimit
=
4826 max_t(int, dev_priv
->rps
.efficient_freq
,
4827 intel_freq_opcode(dev_priv
, 450));
4829 dev_priv
->rps
.min_freq_softlimit
=
4830 dev_priv
->rps
.min_freq
;
4834 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
4835 static void gen9_enable_rps(struct drm_device
*dev
)
4837 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4839 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
4841 gen6_init_rps_frequencies(dev
);
4843 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4844 if (IS_BXT_REVID(dev
, 0, BXT_REVID_A1
)) {
4846 * BIOS could leave the Hw Turbo enabled, so need to explicitly
4847 * clear out the Control register just to avoid inconsitency
4848 * with debugfs interface, which will show Turbo as enabled
4849 * only and that is not expected by the User after adding the
4850 * WaGsvDisableTurbo. Apart from this there is no problem even
4851 * if the Turbo is left enabled in the Control register, as the
4852 * Up/Down interrupts would remain masked.
4854 gen9_disable_rps(dev
);
4855 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
4859 /* Program defaults and thresholds for RPS*/
4860 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
4861 GEN9_FREQUENCY(dev_priv
->rps
.rp1_freq
));
4863 /* 1 second timeout*/
4864 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
,
4865 GT_INTERVAL_FROM_US(dev_priv
, 1000000));
4867 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 0xa);
4869 /* Leaning on the below call to gen6_set_rps to program/setup the
4870 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
4871 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
4872 dev_priv
->rps
.power
= HIGH_POWER
; /* force a reset */
4873 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.idle_freq
);
4875 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
4878 static void gen9_enable_rc6(struct drm_device
*dev
)
4880 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4881 struct intel_engine_cs
*engine
;
4882 uint32_t rc6_mask
= 0;
4884 /* 1a: Software RC state - RC0 */
4885 I915_WRITE(GEN6_RC_STATE
, 0);
4887 /* 1b: Get forcewake during program sequence. Although the driver
4888 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4889 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
4891 /* 2a: Disable RC states. */
4892 I915_WRITE(GEN6_RC_CONTROL
, 0);
4894 /* 2b: Program RC6 thresholds.*/
4896 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
4897 if (IS_SKYLAKE(dev
))
4898 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 108 << 16);
4900 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 54 << 16);
4901 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
4902 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
4903 for_each_engine(engine
, dev_priv
)
4904 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
4906 if (HAS_GUC_UCODE(dev
))
4907 I915_WRITE(GUC_MAX_IDLE_COUNT
, 0xA);
4909 I915_WRITE(GEN6_RC_SLEEP
, 0);
4911 /* 2c: Program Coarse Power Gating Policies. */
4912 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS
, 25);
4913 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS
, 25);
4915 /* 3a: Enable RC6 */
4916 if (intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
)
4917 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
4918 DRM_INFO("RC6 %s\n", onoff(rc6_mask
& GEN6_RC_CTL_RC6_ENABLE
));
4919 /* WaRsUseTimeoutMode */
4920 if (IS_SKL_REVID(dev
, 0, SKL_REVID_D0
) ||
4921 IS_BXT_REVID(dev
, 0, BXT_REVID_A1
)) {
4922 I915_WRITE(GEN6_RC6_THRESHOLD
, 625); /* 800us */
4923 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
4924 GEN7_RC_CTL_TO_MODE
|
4927 I915_WRITE(GEN6_RC6_THRESHOLD
, 37500); /* 37.5/125ms per EI */
4928 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
4929 GEN6_RC_CTL_EI_MODE(1) |
4934 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4935 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
4937 if (NEEDS_WaRsDisableCoarsePowerGating(dev
))
4938 I915_WRITE(GEN9_PG_ENABLE
, 0);
4940 I915_WRITE(GEN9_PG_ENABLE
, (rc6_mask
& GEN6_RC_CTL_RC6_ENABLE
) ?
4941 (GEN9_RENDER_PG_ENABLE
| GEN9_MEDIA_PG_ENABLE
) : 0);
4943 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
4947 static void gen8_enable_rps(struct drm_device
*dev
)
4949 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4950 struct intel_engine_cs
*engine
;
4951 uint32_t rc6_mask
= 0;
4953 /* 1a: Software RC state - RC0 */
4954 I915_WRITE(GEN6_RC_STATE
, 0);
4956 /* 1c & 1d: Get forcewake during program sequence. Although the driver
4957 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4958 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
4960 /* 2a: Disable RC states. */
4961 I915_WRITE(GEN6_RC_CONTROL
, 0);
4963 /* Initialize rps frequencies */
4964 gen6_init_rps_frequencies(dev
);
4966 /* 2b: Program RC6 thresholds.*/
4967 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
4968 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
4969 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
4970 for_each_engine(engine
, dev_priv
)
4971 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
4972 I915_WRITE(GEN6_RC_SLEEP
, 0);
4973 if (IS_BROADWELL(dev
))
4974 I915_WRITE(GEN6_RC6_THRESHOLD
, 625); /* 800us/1.28 for TO */
4976 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000); /* 50/125ms per EI */
4979 if (intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
)
4980 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
4981 intel_print_rc6_info(dev
, rc6_mask
);
4982 if (IS_BROADWELL(dev
))
4983 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
4984 GEN7_RC_CTL_TO_MODE
|
4987 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
4988 GEN6_RC_CTL_EI_MODE(1) |
4991 /* 4 Program defaults and thresholds for RPS*/
4992 I915_WRITE(GEN6_RPNSWREQ
,
4993 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
4994 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
4995 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
4996 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
4997 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 100000000 / 128); /* 1 second timeout */
4999 /* Docs recommend 900MHz, and 300 MHz respectively */
5000 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
5001 dev_priv
->rps
.max_freq_softlimit
<< 24 |
5002 dev_priv
->rps
.min_freq_softlimit
<< 16);
5004 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 7600000 / 128); /* 76ms busyness per EI, 90% */
5005 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 31300000 / 128); /* 313ms busyness per EI, 70%*/
5006 I915_WRITE(GEN6_RP_UP_EI
, 66000); /* 84.48ms, XXX: random? */
5007 I915_WRITE(GEN6_RP_DOWN_EI
, 350000); /* 448ms, XXX: random? */
5009 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
5012 I915_WRITE(GEN6_RP_CONTROL
,
5013 GEN6_RP_MEDIA_TURBO
|
5014 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
5015 GEN6_RP_MEDIA_IS_GFX
|
5017 GEN6_RP_UP_BUSY_AVG
|
5018 GEN6_RP_DOWN_IDLE_AVG
);
5020 /* 6: Ring frequency + overclocking (our driver does this later */
5022 dev_priv
->rps
.power
= HIGH_POWER
; /* force a reset */
5023 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.idle_freq
);
5025 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5028 static void gen6_enable_rps(struct drm_device
*dev
)
5030 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5031 struct intel_engine_cs
*engine
;
5032 u32 rc6vids
, pcu_mbox
= 0, rc6_mask
= 0;
5037 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
5039 /* Here begins a magic sequence of register writes to enable
5040 * auto-downclocking.
5042 * Perhaps there might be some value in exposing these to
5045 I915_WRITE(GEN6_RC_STATE
, 0);
5047 /* Clear the DBG now so we don't confuse earlier errors */
5048 gtfifodbg
= I915_READ(GTFIFODBG
);
5050 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg
);
5051 I915_WRITE(GTFIFODBG
, gtfifodbg
);
5054 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5056 /* Initialize rps frequencies */
5057 gen6_init_rps_frequencies(dev
);
5059 /* disable the counters and set deterministic thresholds */
5060 I915_WRITE(GEN6_RC_CONTROL
, 0);
5062 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT
, 1000 << 16);
5063 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16 | 30);
5064 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT
, 30);
5065 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
5066 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
5068 for_each_engine(engine
, dev_priv
)
5069 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
5071 I915_WRITE(GEN6_RC_SLEEP
, 0);
5072 I915_WRITE(GEN6_RC1e_THRESHOLD
, 1000);
5073 if (IS_IVYBRIDGE(dev
))
5074 I915_WRITE(GEN6_RC6_THRESHOLD
, 125000);
5076 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000);
5077 I915_WRITE(GEN6_RC6p_THRESHOLD
, 150000);
5078 I915_WRITE(GEN6_RC6pp_THRESHOLD
, 64000); /* unused */
5080 /* Check if we are enabling RC6 */
5081 rc6_mode
= intel_enable_rc6(dev_priv
->dev
);
5082 if (rc6_mode
& INTEL_RC6_ENABLE
)
5083 rc6_mask
|= GEN6_RC_CTL_RC6_ENABLE
;
5085 /* We don't use those on Haswell */
5086 if (!IS_HASWELL(dev
)) {
5087 if (rc6_mode
& INTEL_RC6p_ENABLE
)
5088 rc6_mask
|= GEN6_RC_CTL_RC6p_ENABLE
;
5090 if (rc6_mode
& INTEL_RC6pp_ENABLE
)
5091 rc6_mask
|= GEN6_RC_CTL_RC6pp_ENABLE
;
5094 intel_print_rc6_info(dev
, rc6_mask
);
5096 I915_WRITE(GEN6_RC_CONTROL
,
5098 GEN6_RC_CTL_EI_MODE(1) |
5099 GEN6_RC_CTL_HW_ENABLE
);
5101 /* Power down if completely idle for over 50ms */
5102 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 50000);
5103 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
5105 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_MIN_FREQ_TABLE
, 0);
5107 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
5109 ret
= sandybridge_pcode_read(dev_priv
, GEN6_READ_OC_PARAMS
, &pcu_mbox
);
5110 if (!ret
&& (pcu_mbox
& (1<<31))) { /* OC supported */
5111 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
5112 (dev_priv
->rps
.max_freq_softlimit
& 0xff) * 50,
5113 (pcu_mbox
& 0xff) * 50);
5114 dev_priv
->rps
.max_freq
= pcu_mbox
& 0xff;
5117 dev_priv
->rps
.power
= HIGH_POWER
; /* force a reset */
5118 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.idle_freq
);
5121 ret
= sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
5122 if (IS_GEN6(dev
) && ret
) {
5123 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5124 } else if (IS_GEN6(dev
) && (GEN6_DECODE_RC6_VID(rc6vids
& 0xff) < 450)) {
5125 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5126 GEN6_DECODE_RC6_VID(rc6vids
& 0xff), 450);
5127 rc6vids
&= 0xffff00;
5128 rc6vids
|= GEN6_ENCODE_RC6_VID(450);
5129 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_RC6VIDS
, rc6vids
);
5131 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5134 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5137 static void __gen6_update_ring_freq(struct drm_device
*dev
)
5139 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5141 unsigned int gpu_freq
;
5142 unsigned int max_ia_freq
, min_ring_freq
;
5143 unsigned int max_gpu_freq
, min_gpu_freq
;
5144 int scaling_factor
= 180;
5145 struct cpufreq_policy
*policy
;
5147 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
5149 policy
= cpufreq_cpu_get(0);
5151 max_ia_freq
= policy
->cpuinfo
.max_freq
;
5152 cpufreq_cpu_put(policy
);
5155 * Default to measured freq if none found, PCU will ensure we
5158 max_ia_freq
= tsc_khz
;
5161 /* Convert from kHz to MHz */
5162 max_ia_freq
/= 1000;
5164 min_ring_freq
= I915_READ(DCLK
) & 0xf;
5165 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5166 min_ring_freq
= mult_frac(min_ring_freq
, 8, 3);
5168 if (IS_SKYLAKE(dev
) || IS_KABYLAKE(dev
)) {
5169 /* Convert GT frequency to 50 HZ units */
5170 min_gpu_freq
= dev_priv
->rps
.min_freq
/ GEN9_FREQ_SCALER
;
5171 max_gpu_freq
= dev_priv
->rps
.max_freq
/ GEN9_FREQ_SCALER
;
5173 min_gpu_freq
= dev_priv
->rps
.min_freq
;
5174 max_gpu_freq
= dev_priv
->rps
.max_freq
;
5178 * For each potential GPU frequency, load a ring frequency we'd like
5179 * to use for memory access. We do this by specifying the IA frequency
5180 * the PCU should use as a reference to determine the ring frequency.
5182 for (gpu_freq
= max_gpu_freq
; gpu_freq
>= min_gpu_freq
; gpu_freq
--) {
5183 int diff
= max_gpu_freq
- gpu_freq
;
5184 unsigned int ia_freq
= 0, ring_freq
= 0;
5186 if (IS_SKYLAKE(dev
) || IS_KABYLAKE(dev
)) {
5188 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5189 * No floor required for ring frequency on SKL.
5191 ring_freq
= gpu_freq
;
5192 } else if (INTEL_INFO(dev
)->gen
>= 8) {
5193 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5194 ring_freq
= max(min_ring_freq
, gpu_freq
);
5195 } else if (IS_HASWELL(dev
)) {
5196 ring_freq
= mult_frac(gpu_freq
, 5, 4);
5197 ring_freq
= max(min_ring_freq
, ring_freq
);
5198 /* leave ia_freq as the default, chosen by cpufreq */
5200 /* On older processors, there is no separate ring
5201 * clock domain, so in order to boost the bandwidth
5202 * of the ring, we need to upclock the CPU (ia_freq).
5204 * For GPU frequencies less than 750MHz,
5205 * just use the lowest ring freq.
5207 if (gpu_freq
< min_freq
)
5210 ia_freq
= max_ia_freq
- ((diff
* scaling_factor
) / 2);
5211 ia_freq
= DIV_ROUND_CLOSEST(ia_freq
, 100);
5214 sandybridge_pcode_write(dev_priv
,
5215 GEN6_PCODE_WRITE_MIN_FREQ_TABLE
,
5216 ia_freq
<< GEN6_PCODE_FREQ_IA_RATIO_SHIFT
|
5217 ring_freq
<< GEN6_PCODE_FREQ_RING_RATIO_SHIFT
|
5222 void gen6_update_ring_freq(struct drm_device
*dev
)
5224 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5226 if (!HAS_CORE_RING_FREQ(dev
))
5229 mutex_lock(&dev_priv
->rps
.hw_lock
);
5230 __gen6_update_ring_freq(dev
);
5231 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5234 static int cherryview_rps_max_freq(struct drm_i915_private
*dev_priv
)
5236 struct drm_device
*dev
= dev_priv
->dev
;
5239 val
= vlv_punit_read(dev_priv
, FB_GFX_FMAX_AT_VMAX_FUSE
);
5241 switch (INTEL_INFO(dev
)->eu_total
) {
5243 /* (2 * 4) config */
5244 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT
);
5247 /* (2 * 6) config */
5248 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT
);
5251 /* (2 * 8) config */
5253 /* Setting (2 * 8) Min RP0 for any other combination */
5254 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT
);
5258 rp0
= (rp0
& FB_GFX_FREQ_FUSE_MASK
);
5263 static int cherryview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
5267 val
= vlv_punit_read(dev_priv
, PUNIT_GPU_DUTYCYCLE_REG
);
5268 rpe
= (val
>> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT
) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK
;
5273 static int cherryview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
5277 val
= vlv_punit_read(dev_priv
, FB_GFX_FMAX_AT_VMAX_FUSE
);
5278 rp1
= (val
& FB_GFX_FREQ_FUSE_MASK
);
5283 static int valleyview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
5287 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
5289 rp1
= (val
& FB_GFX_FGUARANTEED_FREQ_FUSE_MASK
) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT
;
5294 static int valleyview_rps_max_freq(struct drm_i915_private
*dev_priv
)
5298 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
5300 rp0
= (val
& FB_GFX_MAX_FREQ_FUSE_MASK
) >> FB_GFX_MAX_FREQ_FUSE_SHIFT
;
5302 rp0
= min_t(u32
, rp0
, 0xea);
5307 static int valleyview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
5311 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_LO
);
5312 rpe
= (val
& FB_FMAX_VMIN_FREQ_LO_MASK
) >> FB_FMAX_VMIN_FREQ_LO_SHIFT
;
5313 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_HI
);
5314 rpe
|= (val
& FB_FMAX_VMIN_FREQ_HI_MASK
) << 5;
5319 static int valleyview_rps_min_freq(struct drm_i915_private
*dev_priv
)
5323 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
) & 0xff;
5325 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
5326 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
5327 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
5328 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
5329 * to make sure it matches what Punit accepts.
5331 return max_t(u32
, val
, 0xc0);
5334 /* Check that the pctx buffer wasn't move under us. */
5335 static void valleyview_check_pctx(struct drm_i915_private
*dev_priv
)
5337 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
5339 WARN_ON(pctx_addr
!= dev_priv
->mm
.stolen_base
+
5340 dev_priv
->vlv_pctx
->stolen
->start
);
5344 /* Check that the pcbr address is not empty. */
5345 static void cherryview_check_pctx(struct drm_i915_private
*dev_priv
)
5347 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
5349 WARN_ON((pctx_addr
>> VLV_PCBR_ADDR_SHIFT
) == 0);
5352 static void cherryview_setup_pctx(struct drm_device
*dev
)
5354 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5355 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
5356 unsigned long pctx_paddr
, paddr
;
5358 int pctx_size
= 32*1024;
5360 pcbr
= I915_READ(VLV_PCBR
);
5361 if ((pcbr
>> VLV_PCBR_ADDR_SHIFT
) == 0) {
5362 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5363 paddr
= (dev_priv
->mm
.stolen_base
+
5364 (ggtt
->stolen_size
- pctx_size
));
5366 pctx_paddr
= (paddr
& (~4095));
5367 I915_WRITE(VLV_PCBR
, pctx_paddr
);
5370 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR
));
5373 static void valleyview_setup_pctx(struct drm_device
*dev
)
5375 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5376 struct drm_i915_gem_object
*pctx
;
5377 unsigned long pctx_paddr
;
5379 int pctx_size
= 24*1024;
5381 mutex_lock(&dev
->struct_mutex
);
5383 pcbr
= I915_READ(VLV_PCBR
);
5385 /* BIOS set it up already, grab the pre-alloc'd space */
5388 pcbr_offset
= (pcbr
& (~4095)) - dev_priv
->mm
.stolen_base
;
5389 pctx
= i915_gem_object_create_stolen_for_preallocated(dev_priv
->dev
,
5391 I915_GTT_OFFSET_NONE
,
5396 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5399 * From the Gunit register HAS:
5400 * The Gfx driver is expected to program this register and ensure
5401 * proper allocation within Gfx stolen memory. For example, this
5402 * register should be programmed such than the PCBR range does not
5403 * overlap with other ranges, such as the frame buffer, protected
5404 * memory, or any other relevant ranges.
5406 pctx
= i915_gem_object_create_stolen(dev
, pctx_size
);
5408 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5412 pctx_paddr
= dev_priv
->mm
.stolen_base
+ pctx
->stolen
->start
;
5413 I915_WRITE(VLV_PCBR
, pctx_paddr
);
5416 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR
));
5417 dev_priv
->vlv_pctx
= pctx
;
5418 mutex_unlock(&dev
->struct_mutex
);
5421 static void valleyview_cleanup_pctx(struct drm_device
*dev
)
5423 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5425 if (WARN_ON(!dev_priv
->vlv_pctx
))
5428 drm_gem_object_unreference_unlocked(&dev_priv
->vlv_pctx
->base
);
5429 dev_priv
->vlv_pctx
= NULL
;
5432 static void vlv_init_gpll_ref_freq(struct drm_i915_private
*dev_priv
)
5434 dev_priv
->rps
.gpll_ref_freq
=
5435 vlv_get_cck_clock(dev_priv
, "GPLL ref",
5436 CCK_GPLL_CLOCK_CONTROL
,
5437 dev_priv
->czclk_freq
);
5439 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
5440 dev_priv
->rps
.gpll_ref_freq
);
5443 static void valleyview_init_gt_powersave(struct drm_device
*dev
)
5445 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5448 valleyview_setup_pctx(dev
);
5450 vlv_init_gpll_ref_freq(dev_priv
);
5452 mutex_lock(&dev_priv
->rps
.hw_lock
);
5454 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
5455 switch ((val
>> 6) & 3) {
5458 dev_priv
->mem_freq
= 800;
5461 dev_priv
->mem_freq
= 1066;
5464 dev_priv
->mem_freq
= 1333;
5467 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv
->mem_freq
);
5469 dev_priv
->rps
.max_freq
= valleyview_rps_max_freq(dev_priv
);
5470 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
5471 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5472 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
5473 dev_priv
->rps
.max_freq
);
5475 dev_priv
->rps
.efficient_freq
= valleyview_rps_rpe_freq(dev_priv
);
5476 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5477 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
5478 dev_priv
->rps
.efficient_freq
);
5480 dev_priv
->rps
.rp1_freq
= valleyview_rps_guar_freq(dev_priv
);
5481 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5482 intel_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
5483 dev_priv
->rps
.rp1_freq
);
5485 dev_priv
->rps
.min_freq
= valleyview_rps_min_freq(dev_priv
);
5486 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5487 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
5488 dev_priv
->rps
.min_freq
);
5490 dev_priv
->rps
.idle_freq
= dev_priv
->rps
.min_freq
;
5492 /* Preserve min/max settings in case of re-init */
5493 if (dev_priv
->rps
.max_freq_softlimit
== 0)
5494 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
5496 if (dev_priv
->rps
.min_freq_softlimit
== 0)
5497 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
5499 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5502 static void cherryview_init_gt_powersave(struct drm_device
*dev
)
5504 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5507 cherryview_setup_pctx(dev
);
5509 vlv_init_gpll_ref_freq(dev_priv
);
5511 mutex_lock(&dev_priv
->rps
.hw_lock
);
5513 mutex_lock(&dev_priv
->sb_lock
);
5514 val
= vlv_cck_read(dev_priv
, CCK_FUSE_REG
);
5515 mutex_unlock(&dev_priv
->sb_lock
);
5517 switch ((val
>> 2) & 0x7) {
5519 dev_priv
->mem_freq
= 2000;
5522 dev_priv
->mem_freq
= 1600;
5525 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv
->mem_freq
);
5527 dev_priv
->rps
.max_freq
= cherryview_rps_max_freq(dev_priv
);
5528 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
5529 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5530 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
5531 dev_priv
->rps
.max_freq
);
5533 dev_priv
->rps
.efficient_freq
= cherryview_rps_rpe_freq(dev_priv
);
5534 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5535 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
5536 dev_priv
->rps
.efficient_freq
);
5538 dev_priv
->rps
.rp1_freq
= cherryview_rps_guar_freq(dev_priv
);
5539 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
5540 intel_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
5541 dev_priv
->rps
.rp1_freq
);
5543 /* PUnit validated range is only [RPe, RP0] */
5544 dev_priv
->rps
.min_freq
= dev_priv
->rps
.efficient_freq
;
5545 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5546 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
5547 dev_priv
->rps
.min_freq
);
5549 WARN_ONCE((dev_priv
->rps
.max_freq
|
5550 dev_priv
->rps
.efficient_freq
|
5551 dev_priv
->rps
.rp1_freq
|
5552 dev_priv
->rps
.min_freq
) & 1,
5553 "Odd GPU freq values\n");
5555 dev_priv
->rps
.idle_freq
= dev_priv
->rps
.min_freq
;
5557 /* Preserve min/max settings in case of re-init */
5558 if (dev_priv
->rps
.max_freq_softlimit
== 0)
5559 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
5561 if (dev_priv
->rps
.min_freq_softlimit
== 0)
5562 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
5564 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5567 static void valleyview_cleanup_gt_powersave(struct drm_device
*dev
)
5569 valleyview_cleanup_pctx(dev
);
5572 static void cherryview_enable_rps(struct drm_device
*dev
)
5574 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5575 struct intel_engine_cs
*engine
;
5576 u32 gtfifodbg
, val
, rc6_mode
= 0, pcbr
;
5578 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
5580 gtfifodbg
= I915_READ(GTFIFODBG
) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV
|
5581 GT_FIFO_FREE_ENTRIES_CHV
);
5583 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5585 I915_WRITE(GTFIFODBG
, gtfifodbg
);
5588 cherryview_check_pctx(dev_priv
);
5590 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5591 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5592 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5594 /* Disable RC states. */
5595 I915_WRITE(GEN6_RC_CONTROL
, 0);
5597 /* 2a: Program RC6 thresholds.*/
5598 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
5599 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
5600 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
5602 for_each_engine(engine
, dev_priv
)
5603 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
5604 I915_WRITE(GEN6_RC_SLEEP
, 0);
5606 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5607 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x186);
5609 /* allows RC6 residency counter to work */
5610 I915_WRITE(VLV_COUNTER_CONTROL
,
5611 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
|
5612 VLV_MEDIA_RC6_COUNT_EN
|
5613 VLV_RENDER_RC6_COUNT_EN
));
5615 /* For now we assume BIOS is allocating and populating the PCBR */
5616 pcbr
= I915_READ(VLV_PCBR
);
5619 if ((intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
) &&
5620 (pcbr
>> VLV_PCBR_ADDR_SHIFT
))
5621 rc6_mode
= GEN7_RC_CTL_TO_MODE
;
5623 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
5625 /* 4 Program defaults and thresholds for RPS*/
5626 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
5627 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
5628 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
5629 I915_WRITE(GEN6_RP_UP_EI
, 66000);
5630 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
5632 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
5635 I915_WRITE(GEN6_RP_CONTROL
,
5636 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
5637 GEN6_RP_MEDIA_IS_GFX
|
5639 GEN6_RP_UP_BUSY_AVG
|
5640 GEN6_RP_DOWN_IDLE_AVG
);
5642 /* Setting Fixed Bias */
5643 val
= VLV_OVERRIDE_EN
|
5645 CHV_BIAS_CPU_50_SOC_50
;
5646 vlv_punit_write(dev_priv
, VLV_TURBO_SOC_OVERRIDE
, val
);
5648 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
5650 /* RPS code assumes GPLL is used */
5651 WARN_ONCE((val
& GPLLENABLE
) == 0, "GPLL not enabled\n");
5653 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val
& GPLLENABLE
));
5654 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
5656 dev_priv
->rps
.cur_freq
= (val
>> 8) & 0xff;
5657 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5658 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
5659 dev_priv
->rps
.cur_freq
);
5661 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5662 intel_gpu_freq(dev_priv
, dev_priv
->rps
.idle_freq
),
5663 dev_priv
->rps
.idle_freq
);
5665 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.idle_freq
);
5667 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5670 static void valleyview_enable_rps(struct drm_device
*dev
)
5672 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5673 struct intel_engine_cs
*engine
;
5674 u32 gtfifodbg
, val
, rc6_mode
= 0;
5676 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
5678 valleyview_check_pctx(dev_priv
);
5680 gtfifodbg
= I915_READ(GTFIFODBG
);
5682 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5684 I915_WRITE(GTFIFODBG
, gtfifodbg
);
5687 /* If VLV, Forcewake all wells, else re-direct to regular path */
5688 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5690 /* Disable RC states. */
5691 I915_WRITE(GEN6_RC_CONTROL
, 0);
5693 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
5694 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
5695 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
5696 I915_WRITE(GEN6_RP_UP_EI
, 66000);
5697 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
5699 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
5701 I915_WRITE(GEN6_RP_CONTROL
,
5702 GEN6_RP_MEDIA_TURBO
|
5703 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
5704 GEN6_RP_MEDIA_IS_GFX
|
5706 GEN6_RP_UP_BUSY_AVG
|
5707 GEN6_RP_DOWN_IDLE_CONT
);
5709 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 0x00280000);
5710 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
5711 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
5713 for_each_engine(engine
, dev_priv
)
5714 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
5716 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x557);
5718 /* allows RC6 residency counter to work */
5719 I915_WRITE(VLV_COUNTER_CONTROL
,
5720 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN
|
5721 VLV_RENDER_RC0_COUNT_EN
|
5722 VLV_MEDIA_RC6_COUNT_EN
|
5723 VLV_RENDER_RC6_COUNT_EN
));
5725 if (intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
)
5726 rc6_mode
= GEN7_RC_CTL_TO_MODE
| VLV_RC_CTL_CTX_RST_PARALLEL
;
5728 intel_print_rc6_info(dev
, rc6_mode
);
5730 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
5732 /* Setting Fixed Bias */
5733 val
= VLV_OVERRIDE_EN
|
5735 VLV_BIAS_CPU_125_SOC_875
;
5736 vlv_punit_write(dev_priv
, VLV_TURBO_SOC_OVERRIDE
, val
);
5738 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
5740 /* RPS code assumes GPLL is used */
5741 WARN_ONCE((val
& GPLLENABLE
) == 0, "GPLL not enabled\n");
5743 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val
& GPLLENABLE
));
5744 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
5746 dev_priv
->rps
.cur_freq
= (val
>> 8) & 0xff;
5747 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5748 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
5749 dev_priv
->rps
.cur_freq
);
5751 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5752 intel_gpu_freq(dev_priv
, dev_priv
->rps
.idle_freq
),
5753 dev_priv
->rps
.idle_freq
);
5755 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.idle_freq
);
5757 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5760 static unsigned long intel_pxfreq(u32 vidfreq
)
5763 int div
= (vidfreq
& 0x3f0000) >> 16;
5764 int post
= (vidfreq
& 0x3000) >> 12;
5765 int pre
= (vidfreq
& 0x7);
5770 freq
= ((div
* 133333) / ((1<<post
) * pre
));
5775 static const struct cparams
{
5781 { 1, 1333, 301, 28664 },
5782 { 1, 1066, 294, 24460 },
5783 { 1, 800, 294, 25192 },
5784 { 0, 1333, 276, 27605 },
5785 { 0, 1066, 276, 27605 },
5786 { 0, 800, 231, 23784 },
5789 static unsigned long __i915_chipset_val(struct drm_i915_private
*dev_priv
)
5791 u64 total_count
, diff
, ret
;
5792 u32 count1
, count2
, count3
, m
= 0, c
= 0;
5793 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
5796 assert_spin_locked(&mchdev_lock
);
5798 diff1
= now
- dev_priv
->ips
.last_time1
;
5800 /* Prevent division-by-zero if we are asking too fast.
5801 * Also, we don't get interesting results if we are polling
5802 * faster than once in 10ms, so just return the saved value
5806 return dev_priv
->ips
.chipset_power
;
5808 count1
= I915_READ(DMIEC
);
5809 count2
= I915_READ(DDREC
);
5810 count3
= I915_READ(CSIEC
);
5812 total_count
= count1
+ count2
+ count3
;
5814 /* FIXME: handle per-counter overflow */
5815 if (total_count
< dev_priv
->ips
.last_count1
) {
5816 diff
= ~0UL - dev_priv
->ips
.last_count1
;
5817 diff
+= total_count
;
5819 diff
= total_count
- dev_priv
->ips
.last_count1
;
5822 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
5823 if (cparams
[i
].i
== dev_priv
->ips
.c_m
&&
5824 cparams
[i
].t
== dev_priv
->ips
.r_t
) {
5831 diff
= div_u64(diff
, diff1
);
5832 ret
= ((m
* diff
) + c
);
5833 ret
= div_u64(ret
, 10);
5835 dev_priv
->ips
.last_count1
= total_count
;
5836 dev_priv
->ips
.last_time1
= now
;
5838 dev_priv
->ips
.chipset_power
= ret
;
5843 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
5845 struct drm_device
*dev
= dev_priv
->dev
;
5848 if (INTEL_INFO(dev
)->gen
!= 5)
5851 spin_lock_irq(&mchdev_lock
);
5853 val
= __i915_chipset_val(dev_priv
);
5855 spin_unlock_irq(&mchdev_lock
);
5860 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
5862 unsigned long m
, x
, b
;
5865 tsfs
= I915_READ(TSFS
);
5867 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
5868 x
= I915_READ8(TR1
);
5870 b
= tsfs
& TSFS_INTR_MASK
;
5872 return ((m
* x
) / 127) - b
;
5875 static int _pxvid_to_vd(u8 pxvid
)
5880 if (pxvid
>= 8 && pxvid
< 31)
5883 return (pxvid
+ 2) * 125;
5886 static u32
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
5888 struct drm_device
*dev
= dev_priv
->dev
;
5889 const int vd
= _pxvid_to_vd(pxvid
);
5890 const int vm
= vd
- 1125;
5892 if (INTEL_INFO(dev
)->is_mobile
)
5893 return vm
> 0 ? vm
: 0;
5898 static void __i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
5900 u64 now
, diff
, diffms
;
5903 assert_spin_locked(&mchdev_lock
);
5905 now
= ktime_get_raw_ns();
5906 diffms
= now
- dev_priv
->ips
.last_time2
;
5907 do_div(diffms
, NSEC_PER_MSEC
);
5909 /* Don't divide by 0 */
5913 count
= I915_READ(GFXEC
);
5915 if (count
< dev_priv
->ips
.last_count2
) {
5916 diff
= ~0UL - dev_priv
->ips
.last_count2
;
5919 diff
= count
- dev_priv
->ips
.last_count2
;
5922 dev_priv
->ips
.last_count2
= count
;
5923 dev_priv
->ips
.last_time2
= now
;
5925 /* More magic constants... */
5927 diff
= div_u64(diff
, diffms
* 10);
5928 dev_priv
->ips
.gfx_power
= diff
;
5931 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
5933 struct drm_device
*dev
= dev_priv
->dev
;
5935 if (INTEL_INFO(dev
)->gen
!= 5)
5938 spin_lock_irq(&mchdev_lock
);
5940 __i915_update_gfx_val(dev_priv
);
5942 spin_unlock_irq(&mchdev_lock
);
5945 static unsigned long __i915_gfx_val(struct drm_i915_private
*dev_priv
)
5947 unsigned long t
, corr
, state1
, corr2
, state2
;
5950 assert_spin_locked(&mchdev_lock
);
5952 pxvid
= I915_READ(PXVFREQ(dev_priv
->rps
.cur_freq
));
5953 pxvid
= (pxvid
>> 24) & 0x7f;
5954 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
5958 t
= i915_mch_val(dev_priv
);
5960 /* Revel in the empirically derived constants */
5962 /* Correction factor in 1/100000 units */
5964 corr
= ((t
* 2349) + 135940);
5966 corr
= ((t
* 964) + 29317);
5968 corr
= ((t
* 301) + 1004);
5970 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
5972 corr2
= (corr
* dev_priv
->ips
.corr
);
5974 state2
= (corr2
* state1
) / 10000;
5975 state2
/= 100; /* convert to mW */
5977 __i915_update_gfx_val(dev_priv
);
5979 return dev_priv
->ips
.gfx_power
+ state2
;
5982 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
5984 struct drm_device
*dev
= dev_priv
->dev
;
5987 if (INTEL_INFO(dev
)->gen
!= 5)
5990 spin_lock_irq(&mchdev_lock
);
5992 val
= __i915_gfx_val(dev_priv
);
5994 spin_unlock_irq(&mchdev_lock
);
6000 * i915_read_mch_val - return value for IPS use
6002 * Calculate and return a value for the IPS driver to use when deciding whether
6003 * we have thermal and power headroom to increase CPU or GPU power budget.
6005 unsigned long i915_read_mch_val(void)
6007 struct drm_i915_private
*dev_priv
;
6008 unsigned long chipset_val
, graphics_val
, ret
= 0;
6010 spin_lock_irq(&mchdev_lock
);
6013 dev_priv
= i915_mch_dev
;
6015 chipset_val
= __i915_chipset_val(dev_priv
);
6016 graphics_val
= __i915_gfx_val(dev_priv
);
6018 ret
= chipset_val
+ graphics_val
;
6021 spin_unlock_irq(&mchdev_lock
);
6025 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
6028 * i915_gpu_raise - raise GPU frequency limit
6030 * Raise the limit; IPS indicates we have thermal headroom.
6032 bool i915_gpu_raise(void)
6034 struct drm_i915_private
*dev_priv
;
6037 spin_lock_irq(&mchdev_lock
);
6038 if (!i915_mch_dev
) {
6042 dev_priv
= i915_mch_dev
;
6044 if (dev_priv
->ips
.max_delay
> dev_priv
->ips
.fmax
)
6045 dev_priv
->ips
.max_delay
--;
6048 spin_unlock_irq(&mchdev_lock
);
6052 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
6055 * i915_gpu_lower - lower GPU frequency limit
6057 * IPS indicates we're close to a thermal limit, so throttle back the GPU
6058 * frequency maximum.
6060 bool i915_gpu_lower(void)
6062 struct drm_i915_private
*dev_priv
;
6065 spin_lock_irq(&mchdev_lock
);
6066 if (!i915_mch_dev
) {
6070 dev_priv
= i915_mch_dev
;
6072 if (dev_priv
->ips
.max_delay
< dev_priv
->ips
.min_delay
)
6073 dev_priv
->ips
.max_delay
++;
6076 spin_unlock_irq(&mchdev_lock
);
6080 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
6083 * i915_gpu_busy - indicate GPU business to IPS
6085 * Tell the IPS driver whether or not the GPU is busy.
6087 bool i915_gpu_busy(void)
6089 struct drm_i915_private
*dev_priv
;
6090 struct intel_engine_cs
*engine
;
6093 spin_lock_irq(&mchdev_lock
);
6096 dev_priv
= i915_mch_dev
;
6098 for_each_engine(engine
, dev_priv
)
6099 ret
|= !list_empty(&engine
->request_list
);
6102 spin_unlock_irq(&mchdev_lock
);
6106 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
6109 * i915_gpu_turbo_disable - disable graphics turbo
6111 * Disable graphics turbo by resetting the max frequency and setting the
6112 * current frequency to the default.
6114 bool i915_gpu_turbo_disable(void)
6116 struct drm_i915_private
*dev_priv
;
6119 spin_lock_irq(&mchdev_lock
);
6120 if (!i915_mch_dev
) {
6124 dev_priv
= i915_mch_dev
;
6126 dev_priv
->ips
.max_delay
= dev_priv
->ips
.fstart
;
6128 if (!ironlake_set_drps(dev_priv
->dev
, dev_priv
->ips
.fstart
))
6132 spin_unlock_irq(&mchdev_lock
);
6136 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
6139 * Tells the intel_ips driver that the i915 driver is now loaded, if
6140 * IPS got loaded first.
6142 * This awkward dance is so that neither module has to depend on the
6143 * other in order for IPS to do the appropriate communication of
6144 * GPU turbo limits to i915.
6147 ips_ping_for_i915_load(void)
6151 link
= symbol_get(ips_link_to_i915_driver
);
6154 symbol_put(ips_link_to_i915_driver
);
6158 void intel_gpu_ips_init(struct drm_i915_private
*dev_priv
)
6160 /* We only register the i915 ips part with intel-ips once everything is
6161 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6162 spin_lock_irq(&mchdev_lock
);
6163 i915_mch_dev
= dev_priv
;
6164 spin_unlock_irq(&mchdev_lock
);
6166 ips_ping_for_i915_load();
6169 void intel_gpu_ips_teardown(void)
6171 spin_lock_irq(&mchdev_lock
);
6172 i915_mch_dev
= NULL
;
6173 spin_unlock_irq(&mchdev_lock
);
6176 static void intel_init_emon(struct drm_device
*dev
)
6178 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6183 /* Disable to program */
6187 /* Program energy weights for various events */
6188 I915_WRITE(SDEW
, 0x15040d00);
6189 I915_WRITE(CSIEW0
, 0x007f0000);
6190 I915_WRITE(CSIEW1
, 0x1e220004);
6191 I915_WRITE(CSIEW2
, 0x04000004);
6193 for (i
= 0; i
< 5; i
++)
6194 I915_WRITE(PEW(i
), 0);
6195 for (i
= 0; i
< 3; i
++)
6196 I915_WRITE(DEW(i
), 0);
6198 /* Program P-state weights to account for frequency power adjustment */
6199 for (i
= 0; i
< 16; i
++) {
6200 u32 pxvidfreq
= I915_READ(PXVFREQ(i
));
6201 unsigned long freq
= intel_pxfreq(pxvidfreq
);
6202 unsigned long vid
= (pxvidfreq
& PXVFREQ_PX_MASK
) >>
6207 val
*= (freq
/ 1000);
6209 val
/= (127*127*900);
6211 DRM_ERROR("bad pxval: %ld\n", val
);
6214 /* Render standby states get 0 weight */
6218 for (i
= 0; i
< 4; i
++) {
6219 u32 val
= (pxw
[i
*4] << 24) | (pxw
[(i
*4)+1] << 16) |
6220 (pxw
[(i
*4)+2] << 8) | (pxw
[(i
*4)+3]);
6221 I915_WRITE(PXW(i
), val
);
6224 /* Adjust magic regs to magic values (more experimental results) */
6225 I915_WRITE(OGW0
, 0);
6226 I915_WRITE(OGW1
, 0);
6227 I915_WRITE(EG0
, 0x00007f00);
6228 I915_WRITE(EG1
, 0x0000000e);
6229 I915_WRITE(EG2
, 0x000e0000);
6230 I915_WRITE(EG3
, 0x68000300);
6231 I915_WRITE(EG4
, 0x42000000);
6232 I915_WRITE(EG5
, 0x00140031);
6236 for (i
= 0; i
< 8; i
++)
6237 I915_WRITE(PXWL(i
), 0);
6239 /* Enable PMON + select events */
6240 I915_WRITE(ECR
, 0x80000019);
6242 lcfuse
= I915_READ(LCFUSE02
);
6244 dev_priv
->ips
.corr
= (lcfuse
& LCFUSE_HIV_MASK
);
6247 void intel_init_gt_powersave(struct drm_device
*dev
)
6249 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6252 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6255 if (!i915
.enable_rc6
) {
6256 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6257 intel_runtime_pm_get(dev_priv
);
6260 if (IS_CHERRYVIEW(dev
))
6261 cherryview_init_gt_powersave(dev
);
6262 else if (IS_VALLEYVIEW(dev
))
6263 valleyview_init_gt_powersave(dev
);
6266 void intel_cleanup_gt_powersave(struct drm_device
*dev
)
6268 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6270 if (IS_CHERRYVIEW(dev
))
6272 else if (IS_VALLEYVIEW(dev
))
6273 valleyview_cleanup_gt_powersave(dev
);
6275 if (!i915
.enable_rc6
)
6276 intel_runtime_pm_put(dev_priv
);
6279 static void gen6_suspend_rps(struct drm_device
*dev
)
6281 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6283 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
6285 gen6_disable_rps_interrupts(dev
);
6289 * intel_suspend_gt_powersave - suspend PM work and helper threads
6292 * We don't want to disable RC6 or other features here, we just want
6293 * to make sure any work we've queued has finished and won't bother
6294 * us while we're suspended.
6296 void intel_suspend_gt_powersave(struct drm_device
*dev
)
6298 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6300 if (INTEL_INFO(dev
)->gen
< 6)
6303 gen6_suspend_rps(dev
);
6305 /* Force GPU to min freq during suspend */
6306 gen6_rps_idle(dev_priv
);
6309 void intel_disable_gt_powersave(struct drm_device
*dev
)
6311 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6313 if (IS_IRONLAKE_M(dev
)) {
6314 ironlake_disable_drps(dev
);
6315 } else if (INTEL_INFO(dev
)->gen
>= 6) {
6316 intel_suspend_gt_powersave(dev
);
6318 mutex_lock(&dev_priv
->rps
.hw_lock
);
6319 if (INTEL_INFO(dev
)->gen
>= 9) {
6320 gen9_disable_rc6(dev
);
6321 gen9_disable_rps(dev
);
6322 } else if (IS_CHERRYVIEW(dev
))
6323 cherryview_disable_rps(dev
);
6324 else if (IS_VALLEYVIEW(dev
))
6325 valleyview_disable_rps(dev
);
6327 gen6_disable_rps(dev
);
6329 dev_priv
->rps
.enabled
= false;
6330 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6334 static void intel_gen6_powersave_work(struct work_struct
*work
)
6336 struct drm_i915_private
*dev_priv
=
6337 container_of(work
, struct drm_i915_private
,
6338 rps
.delayed_resume_work
.work
);
6339 struct drm_device
*dev
= dev_priv
->dev
;
6341 mutex_lock(&dev_priv
->rps
.hw_lock
);
6343 gen6_reset_rps_interrupts(dev
);
6345 if (IS_CHERRYVIEW(dev
)) {
6346 cherryview_enable_rps(dev
);
6347 } else if (IS_VALLEYVIEW(dev
)) {
6348 valleyview_enable_rps(dev
);
6349 } else if (INTEL_INFO(dev
)->gen
>= 9) {
6350 gen9_enable_rc6(dev
);
6351 gen9_enable_rps(dev
);
6352 if (IS_SKYLAKE(dev
) || IS_KABYLAKE(dev
))
6353 __gen6_update_ring_freq(dev
);
6354 } else if (IS_BROADWELL(dev
)) {
6355 gen8_enable_rps(dev
);
6356 __gen6_update_ring_freq(dev
);
6358 gen6_enable_rps(dev
);
6359 __gen6_update_ring_freq(dev
);
6362 WARN_ON(dev_priv
->rps
.max_freq
< dev_priv
->rps
.min_freq
);
6363 WARN_ON(dev_priv
->rps
.idle_freq
> dev_priv
->rps
.max_freq
);
6365 WARN_ON(dev_priv
->rps
.efficient_freq
< dev_priv
->rps
.min_freq
);
6366 WARN_ON(dev_priv
->rps
.efficient_freq
> dev_priv
->rps
.max_freq
);
6368 dev_priv
->rps
.enabled
= true;
6370 gen6_enable_rps_interrupts(dev
);
6372 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6374 intel_runtime_pm_put(dev_priv
);
6377 void intel_enable_gt_powersave(struct drm_device
*dev
)
6379 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6381 /* Powersaving is controlled by the host when inside a VM */
6382 if (intel_vgpu_active(dev
))
6385 if (IS_IRONLAKE_M(dev
)) {
6386 ironlake_enable_drps(dev
);
6387 mutex_lock(&dev
->struct_mutex
);
6388 intel_init_emon(dev
);
6389 mutex_unlock(&dev
->struct_mutex
);
6390 } else if (INTEL_INFO(dev
)->gen
>= 6) {
6392 * PCU communication is slow and this doesn't need to be
6393 * done at any specific time, so do this out of our fast path
6394 * to make resume and init faster.
6396 * We depend on the HW RC6 power context save/restore
6397 * mechanism when entering D3 through runtime PM suspend. So
6398 * disable RPM until RPS/RC6 is properly setup. We can only
6399 * get here via the driver load/system resume/runtime resume
6400 * paths, so the _noresume version is enough (and in case of
6401 * runtime resume it's necessary).
6403 if (schedule_delayed_work(&dev_priv
->rps
.delayed_resume_work
,
6404 round_jiffies_up_relative(HZ
)))
6405 intel_runtime_pm_get_noresume(dev_priv
);
6409 void intel_reset_gt_powersave(struct drm_device
*dev
)
6411 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6413 if (INTEL_INFO(dev
)->gen
< 6)
6416 gen6_suspend_rps(dev
);
6417 dev_priv
->rps
.enabled
= false;
6420 static void ibx_init_clock_gating(struct drm_device
*dev
)
6422 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6425 * On Ibex Peak and Cougar Point, we need to disable clock
6426 * gating for the panel power sequencer or it will fail to
6427 * start up when no ports are active.
6429 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
6432 static void g4x_disable_trickle_feed(struct drm_device
*dev
)
6434 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6437 for_each_pipe(dev_priv
, pipe
) {
6438 I915_WRITE(DSPCNTR(pipe
),
6439 I915_READ(DSPCNTR(pipe
)) |
6440 DISPPLANE_TRICKLE_FEED_DISABLE
);
6442 I915_WRITE(DSPSURF(pipe
), I915_READ(DSPSURF(pipe
)));
6443 POSTING_READ(DSPSURF(pipe
));
6447 static void ilk_init_lp_watermarks(struct drm_device
*dev
)
6449 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6451 I915_WRITE(WM3_LP_ILK
, I915_READ(WM3_LP_ILK
) & ~WM1_LP_SR_EN
);
6452 I915_WRITE(WM2_LP_ILK
, I915_READ(WM2_LP_ILK
) & ~WM1_LP_SR_EN
);
6453 I915_WRITE(WM1_LP_ILK
, I915_READ(WM1_LP_ILK
) & ~WM1_LP_SR_EN
);
6456 * Don't touch WM1S_LP_EN here.
6457 * Doing so could cause underruns.
6461 static void ironlake_init_clock_gating(struct drm_device
*dev
)
6463 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6464 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
6468 * WaFbcDisableDpfcClockGating:ilk
6470 dspclk_gate
|= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE
|
6471 ILK_DPFCUNIT_CLOCK_GATE_DISABLE
|
6472 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
;
6474 I915_WRITE(PCH_3DCGDIS0
,
6475 MARIUNIT_CLOCK_GATE_DISABLE
|
6476 SVSMUNIT_CLOCK_GATE_DISABLE
);
6477 I915_WRITE(PCH_3DCGDIS1
,
6478 VFMUNIT_CLOCK_GATE_DISABLE
);
6481 * According to the spec the following bits should be set in
6482 * order to enable memory self-refresh
6483 * The bit 22/21 of 0x42004
6484 * The bit 5 of 0x42020
6485 * The bit 15 of 0x45000
6487 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6488 (I915_READ(ILK_DISPLAY_CHICKEN2
) |
6489 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
));
6490 dspclk_gate
|= ILK_DPARBUNIT_CLOCK_GATE_ENABLE
;
6491 I915_WRITE(DISP_ARB_CTL
,
6492 (I915_READ(DISP_ARB_CTL
) |
6495 ilk_init_lp_watermarks(dev
);
6498 * Based on the document from hardware guys the following bits
6499 * should be set unconditionally in order to enable FBC.
6500 * The bit 22 of 0x42000
6501 * The bit 22 of 0x42004
6502 * The bit 7,8,9 of 0x42020.
6504 if (IS_IRONLAKE_M(dev
)) {
6505 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6506 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
6507 I915_READ(ILK_DISPLAY_CHICKEN1
) |
6509 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6510 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6514 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
6516 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6517 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6518 ILK_ELPIN_409_SELECT
);
6519 I915_WRITE(_3D_CHICKEN2
,
6520 _3D_CHICKEN2_WM_READ_PIPELINED
<< 16 |
6521 _3D_CHICKEN2_WM_READ_PIPELINED
);
6523 /* WaDisableRenderCachePipelinedFlush:ilk */
6524 I915_WRITE(CACHE_MODE_0
,
6525 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
6527 /* WaDisable_RenderCache_OperationalFlush:ilk */
6528 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6530 g4x_disable_trickle_feed(dev
);
6532 ibx_init_clock_gating(dev
);
6535 static void cpt_init_clock_gating(struct drm_device
*dev
)
6537 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6542 * On Ibex Peak and Cougar Point, we need to disable clock
6543 * gating for the panel power sequencer or it will fail to
6544 * start up when no ports are active.
6546 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
|
6547 PCH_DPLUNIT_CLOCK_GATE_DISABLE
|
6548 PCH_CPUNIT_CLOCK_GATE_DISABLE
);
6549 I915_WRITE(SOUTH_CHICKEN2
, I915_READ(SOUTH_CHICKEN2
) |
6550 DPLS_EDP_PPS_FIX_DIS
);
6551 /* The below fixes the weird display corruption, a few pixels shifted
6552 * downward, on (only) LVDS of some HP laptops with IVY.
6554 for_each_pipe(dev_priv
, pipe
) {
6555 val
= I915_READ(TRANS_CHICKEN2(pipe
));
6556 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
6557 val
&= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
6558 if (dev_priv
->vbt
.fdi_rx_polarity_inverted
)
6559 val
|= TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
6560 val
&= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK
;
6561 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER
;
6562 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH
;
6563 I915_WRITE(TRANS_CHICKEN2(pipe
), val
);
6565 /* WADP0ClockGatingDisable */
6566 for_each_pipe(dev_priv
, pipe
) {
6567 I915_WRITE(TRANS_CHICKEN1(pipe
),
6568 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
6572 static void gen6_check_mch_setup(struct drm_device
*dev
)
6574 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6577 tmp
= I915_READ(MCH_SSKPD
);
6578 if ((tmp
& MCH_SSKPD_WM0_MASK
) != MCH_SSKPD_WM0_VAL
)
6579 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6583 static void gen6_init_clock_gating(struct drm_device
*dev
)
6585 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6586 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
6588 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
6590 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6591 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6592 ILK_ELPIN_409_SELECT
);
6594 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6595 I915_WRITE(_3D_CHICKEN
,
6596 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB
));
6598 /* WaDisable_RenderCache_OperationalFlush:snb */
6599 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6602 * BSpec recoomends 8x4 when MSAA is used,
6603 * however in practice 16x4 seems fastest.
6605 * Note that PS/WM thread counts depend on the WIZ hashing
6606 * disable bit, which we don't touch here, but it's good
6607 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6609 I915_WRITE(GEN6_GT_MODE
,
6610 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
6612 ilk_init_lp_watermarks(dev
);
6614 I915_WRITE(CACHE_MODE_0
,
6615 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
6617 I915_WRITE(GEN6_UCGCTL1
,
6618 I915_READ(GEN6_UCGCTL1
) |
6619 GEN6_BLBUNIT_CLOCK_GATE_DISABLE
|
6620 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
6622 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6623 * gating disable must be set. Failure to set it results in
6624 * flickering pixels due to Z write ordering failures after
6625 * some amount of runtime in the Mesa "fire" demo, and Unigine
6626 * Sanctuary and Tropics, and apparently anything else with
6627 * alpha test or pixel discard.
6629 * According to the spec, bit 11 (RCCUNIT) must also be set,
6630 * but we didn't debug actual testcases to find it out.
6632 * WaDisableRCCUnitClockGating:snb
6633 * WaDisableRCPBUnitClockGating:snb
6635 I915_WRITE(GEN6_UCGCTL2
,
6636 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
6637 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
6639 /* WaStripsFansDisableFastClipPerformanceFix:snb */
6640 I915_WRITE(_3D_CHICKEN3
,
6641 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL
));
6645 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6646 * 3DSTATE_SF number of SF output attributes is more than 16."
6648 I915_WRITE(_3D_CHICKEN3
,
6649 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH
));
6652 * According to the spec the following bits should be
6653 * set in order to enable memory self-refresh and fbc:
6654 * The bit21 and bit22 of 0x42000
6655 * The bit21 and bit22 of 0x42004
6656 * The bit5 and bit7 of 0x42020
6657 * The bit14 of 0x70180
6658 * The bit14 of 0x71180
6660 * WaFbcAsynchFlipDisableFbcQueue:snb
6662 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
6663 I915_READ(ILK_DISPLAY_CHICKEN1
) |
6664 ILK_FBCQ_DIS
| ILK_PABSTRETCH_DIS
);
6665 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6666 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6667 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
);
6668 I915_WRITE(ILK_DSPCLK_GATE_D
,
6669 I915_READ(ILK_DSPCLK_GATE_D
) |
6670 ILK_DPARBUNIT_CLOCK_GATE_ENABLE
|
6671 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
);
6673 g4x_disable_trickle_feed(dev
);
6675 cpt_init_clock_gating(dev
);
6677 gen6_check_mch_setup(dev
);
6680 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private
*dev_priv
)
6682 uint32_t reg
= I915_READ(GEN7_FF_THREAD_MODE
);
6685 * WaVSThreadDispatchOverride:ivb,vlv
6687 * This actually overrides the dispatch
6688 * mode for all thread types.
6690 reg
&= ~GEN7_FF_SCHED_MASK
;
6691 reg
|= GEN7_FF_TS_SCHED_HW
;
6692 reg
|= GEN7_FF_VS_SCHED_HW
;
6693 reg
|= GEN7_FF_DS_SCHED_HW
;
6695 I915_WRITE(GEN7_FF_THREAD_MODE
, reg
);
6698 static void lpt_init_clock_gating(struct drm_device
*dev
)
6700 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6703 * TODO: this bit should only be enabled when really needed, then
6704 * disabled when not needed anymore in order to save power.
6706 if (HAS_PCH_LPT_LP(dev
))
6707 I915_WRITE(SOUTH_DSPCLK_GATE_D
,
6708 I915_READ(SOUTH_DSPCLK_GATE_D
) |
6709 PCH_LP_PARTITION_LEVEL_DISABLE
);
6711 /* WADPOClockGatingDisable:hsw */
6712 I915_WRITE(TRANS_CHICKEN1(PIPE_A
),
6713 I915_READ(TRANS_CHICKEN1(PIPE_A
)) |
6714 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
6717 static void lpt_suspend_hw(struct drm_device
*dev
)
6719 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6721 if (HAS_PCH_LPT_LP(dev
)) {
6722 uint32_t val
= I915_READ(SOUTH_DSPCLK_GATE_D
);
6724 val
&= ~PCH_LP_PARTITION_LEVEL_DISABLE
;
6725 I915_WRITE(SOUTH_DSPCLK_GATE_D
, val
);
6729 static void kabylake_init_clock_gating(struct drm_device
*dev
)
6731 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6733 gen9_init_clock_gating(dev
);
6735 /* WaDisableSDEUnitClockGating:kbl */
6736 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
6737 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
6738 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
6740 /* WaDisableGamClockGating:kbl */
6741 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
6742 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
6743 GEN6_GAMUNIT_CLOCK_GATE_DISABLE
);
6745 /* WaFbcNukeOnHostModify:kbl */
6746 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
6747 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
6750 static void skylake_init_clock_gating(struct drm_device
*dev
)
6752 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6754 gen9_init_clock_gating(dev
);
6756 /* WaFbcNukeOnHostModify:skl */
6757 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
6758 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
6761 static void broadwell_init_clock_gating(struct drm_device
*dev
)
6763 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6767 ilk_init_lp_watermarks(dev
);
6769 /* WaSwitchSolVfFArbitrationPriority:bdw */
6770 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
6772 /* WaPsrDPAMaskVBlankInSRD:bdw */
6773 I915_WRITE(CHICKEN_PAR1_1
,
6774 I915_READ(CHICKEN_PAR1_1
) | DPA_MASK_VBLANK_SRD
);
6776 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
6777 for_each_pipe(dev_priv
, pipe
) {
6778 I915_WRITE(CHICKEN_PIPESL_1(pipe
),
6779 I915_READ(CHICKEN_PIPESL_1(pipe
)) |
6780 BDW_DPRS_MASK_VBLANK_SRD
);
6783 /* WaVSRefCountFullforceMissDisable:bdw */
6784 /* WaDSRefCountFullforceMissDisable:bdw */
6785 I915_WRITE(GEN7_FF_THREAD_MODE
,
6786 I915_READ(GEN7_FF_THREAD_MODE
) &
6787 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
6789 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
6790 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
6792 /* WaDisableSDEUnitClockGating:bdw */
6793 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
6794 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
6797 * WaProgramL3SqcReg1Default:bdw
6798 * WaTempDisableDOPClkGating:bdw
6800 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
6801 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
6802 I915_WRITE(GEN8_L3SQCREG1
, BDW_WA_L3SQCREG1_DEFAULT
);
6804 * Wait at least 100 clocks before re-enabling clock gating. See
6805 * the definition of L3SQCREG1 in BSpec.
6807 POSTING_READ(GEN8_L3SQCREG1
);
6809 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
6812 * WaGttCachingOffByDefault:bdw
6813 * GTT cache may not work with big pages, so if those
6814 * are ever enabled GTT cache may need to be disabled.
6816 I915_WRITE(HSW_GTT_CACHE_EN
, GTT_CACHE_EN_ALL
);
6818 lpt_init_clock_gating(dev
);
6821 static void haswell_init_clock_gating(struct drm_device
*dev
)
6823 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6825 ilk_init_lp_watermarks(dev
);
6827 /* L3 caching of data atomics doesn't work -- disable it. */
6828 I915_WRITE(HSW_SCRATCH1
, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE
);
6829 I915_WRITE(HSW_ROW_CHICKEN3
,
6830 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
));
6832 /* This is required by WaCatErrorRejectionIssue:hsw */
6833 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
6834 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
6835 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
6837 /* WaVSRefCountFullforceMissDisable:hsw */
6838 I915_WRITE(GEN7_FF_THREAD_MODE
,
6839 I915_READ(GEN7_FF_THREAD_MODE
) & ~GEN7_FF_VS_REF_CNT_FFME
);
6841 /* WaDisable_RenderCache_OperationalFlush:hsw */
6842 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6844 /* enable HiZ Raw Stall Optimization */
6845 I915_WRITE(CACHE_MODE_0_GEN7
,
6846 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
6848 /* WaDisable4x2SubspanOptimization:hsw */
6849 I915_WRITE(CACHE_MODE_1
,
6850 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
6853 * BSpec recommends 8x4 when MSAA is used,
6854 * however in practice 16x4 seems fastest.
6856 * Note that PS/WM thread counts depend on the WIZ hashing
6857 * disable bit, which we don't touch here, but it's good
6858 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6860 I915_WRITE(GEN7_GT_MODE
,
6861 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
6863 /* WaSampleCChickenBitEnable:hsw */
6864 I915_WRITE(HALF_SLICE_CHICKEN3
,
6865 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE
));
6867 /* WaSwitchSolVfFArbitrationPriority:hsw */
6868 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
6870 /* WaRsPkgCStateDisplayPMReq:hsw */
6871 I915_WRITE(CHICKEN_PAR1_1
,
6872 I915_READ(CHICKEN_PAR1_1
) | FORCE_ARB_IDLE_PLANES
);
6874 lpt_init_clock_gating(dev
);
6877 static void ivybridge_init_clock_gating(struct drm_device
*dev
)
6879 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6882 ilk_init_lp_watermarks(dev
);
6884 I915_WRITE(ILK_DSPCLK_GATE_D
, ILK_VRHUNIT_CLOCK_GATE_DISABLE
);
6886 /* WaDisableEarlyCull:ivb */
6887 I915_WRITE(_3D_CHICKEN3
,
6888 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
6890 /* WaDisableBackToBackFlipFix:ivb */
6891 I915_WRITE(IVB_CHICKEN3
,
6892 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
6893 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
6895 /* WaDisablePSDDualDispatchEnable:ivb */
6896 if (IS_IVB_GT1(dev
))
6897 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
6898 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
6900 /* WaDisable_RenderCache_OperationalFlush:ivb */
6901 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6903 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6904 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
6905 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
6907 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6908 I915_WRITE(GEN7_L3CNTLREG1
,
6909 GEN7_WA_FOR_GEN7_L3_CONTROL
);
6910 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
6911 GEN7_WA_L3_CHICKEN_MODE
);
6912 if (IS_IVB_GT1(dev
))
6913 I915_WRITE(GEN7_ROW_CHICKEN2
,
6914 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6916 /* must write both registers */
6917 I915_WRITE(GEN7_ROW_CHICKEN2
,
6918 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6919 I915_WRITE(GEN7_ROW_CHICKEN2_GT2
,
6920 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6923 /* WaForceL3Serialization:ivb */
6924 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
6925 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
6928 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6929 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
6931 I915_WRITE(GEN6_UCGCTL2
,
6932 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
6934 /* This is required by WaCatErrorRejectionIssue:ivb */
6935 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
6936 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
6937 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
6939 g4x_disable_trickle_feed(dev
);
6941 gen7_setup_fixed_func_scheduler(dev_priv
);
6943 if (0) { /* causes HiZ corruption on ivb:gt1 */
6944 /* enable HiZ Raw Stall Optimization */
6945 I915_WRITE(CACHE_MODE_0_GEN7
,
6946 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
6949 /* WaDisable4x2SubspanOptimization:ivb */
6950 I915_WRITE(CACHE_MODE_1
,
6951 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
6954 * BSpec recommends 8x4 when MSAA is used,
6955 * however in practice 16x4 seems fastest.
6957 * Note that PS/WM thread counts depend on the WIZ hashing
6958 * disable bit, which we don't touch here, but it's good
6959 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6961 I915_WRITE(GEN7_GT_MODE
,
6962 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
6964 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
6965 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
6966 snpcr
|= GEN6_MBC_SNPCR_MED
;
6967 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
6969 if (!HAS_PCH_NOP(dev
))
6970 cpt_init_clock_gating(dev
);
6972 gen6_check_mch_setup(dev
);
6975 static void valleyview_init_clock_gating(struct drm_device
*dev
)
6977 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6979 /* WaDisableEarlyCull:vlv */
6980 I915_WRITE(_3D_CHICKEN3
,
6981 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
6983 /* WaDisableBackToBackFlipFix:vlv */
6984 I915_WRITE(IVB_CHICKEN3
,
6985 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
6986 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
6988 /* WaPsdDispatchEnable:vlv */
6989 /* WaDisablePSDDualDispatchEnable:vlv */
6990 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
6991 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP
|
6992 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
6994 /* WaDisable_RenderCache_OperationalFlush:vlv */
6995 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6997 /* WaForceL3Serialization:vlv */
6998 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
6999 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
7001 /* WaDisableDopClockGating:vlv */
7002 I915_WRITE(GEN7_ROW_CHICKEN2
,
7003 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7005 /* This is required by WaCatErrorRejectionIssue:vlv */
7006 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
7007 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
7008 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
7010 gen7_setup_fixed_func_scheduler(dev_priv
);
7013 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7014 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7016 I915_WRITE(GEN6_UCGCTL2
,
7017 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
7019 /* WaDisableL3Bank2xClockGate:vlv
7020 * Disabling L3 clock gating- MMIO 940c[25] = 1
7021 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7022 I915_WRITE(GEN7_UCGCTL4
,
7023 I915_READ(GEN7_UCGCTL4
) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE
);
7026 * BSpec says this must be set, even though
7027 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7029 I915_WRITE(CACHE_MODE_1
,
7030 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
7033 * BSpec recommends 8x4 when MSAA is used,
7034 * however in practice 16x4 seems fastest.
7036 * Note that PS/WM thread counts depend on the WIZ hashing
7037 * disable bit, which we don't touch here, but it's good
7038 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7040 I915_WRITE(GEN7_GT_MODE
,
7041 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7044 * WaIncreaseL3CreditsForVLVB0:vlv
7045 * This is the hardware default actually.
7047 I915_WRITE(GEN7_L3SQCREG1
, VLV_B0_WA_L3SQCREG1_VALUE
);
7050 * WaDisableVLVClockGating_VBIIssue:vlv
7051 * Disable clock gating on th GCFG unit to prevent a delay
7052 * in the reporting of vblank events.
7054 I915_WRITE(VLV_GUNIT_CLOCK_GATE
, GCFG_DIS
);
7057 static void cherryview_init_clock_gating(struct drm_device
*dev
)
7059 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7061 /* WaVSRefCountFullforceMissDisable:chv */
7062 /* WaDSRefCountFullforceMissDisable:chv */
7063 I915_WRITE(GEN7_FF_THREAD_MODE
,
7064 I915_READ(GEN7_FF_THREAD_MODE
) &
7065 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
7067 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7068 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
7069 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
7071 /* WaDisableCSUnitClockGating:chv */
7072 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
7073 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
7075 /* WaDisableSDEUnitClockGating:chv */
7076 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
7077 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
7080 * GTT cache may not work with big pages, so if those
7081 * are ever enabled GTT cache may need to be disabled.
7083 I915_WRITE(HSW_GTT_CACHE_EN
, GTT_CACHE_EN_ALL
);
7086 static void g4x_init_clock_gating(struct drm_device
*dev
)
7088 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7089 uint32_t dspclk_gate
;
7091 I915_WRITE(RENCLK_GATE_D1
, 0);
7092 I915_WRITE(RENCLK_GATE_D2
, VF_UNIT_CLOCK_GATE_DISABLE
|
7093 GS_UNIT_CLOCK_GATE_DISABLE
|
7094 CL_UNIT_CLOCK_GATE_DISABLE
);
7095 I915_WRITE(RAMCLK_GATE_D
, 0);
7096 dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
|
7097 OVRUNIT_CLOCK_GATE_DISABLE
|
7098 OVCUNIT_CLOCK_GATE_DISABLE
;
7100 dspclk_gate
|= DSSUNIT_CLOCK_GATE_DISABLE
;
7101 I915_WRITE(DSPCLK_GATE_D
, dspclk_gate
);
7103 /* WaDisableRenderCachePipelinedFlush */
7104 I915_WRITE(CACHE_MODE_0
,
7105 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
7107 /* WaDisable_RenderCache_OperationalFlush:g4x */
7108 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7110 g4x_disable_trickle_feed(dev
);
7113 static void crestline_init_clock_gating(struct drm_device
*dev
)
7115 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7117 I915_WRITE(RENCLK_GATE_D1
, I965_RCC_CLOCK_GATE_DISABLE
);
7118 I915_WRITE(RENCLK_GATE_D2
, 0);
7119 I915_WRITE(DSPCLK_GATE_D
, 0);
7120 I915_WRITE(RAMCLK_GATE_D
, 0);
7121 I915_WRITE16(DEUC
, 0);
7122 I915_WRITE(MI_ARB_STATE
,
7123 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7125 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7126 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7129 static void broadwater_init_clock_gating(struct drm_device
*dev
)
7131 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7133 I915_WRITE(RENCLK_GATE_D1
, I965_RCZ_CLOCK_GATE_DISABLE
|
7134 I965_RCC_CLOCK_GATE_DISABLE
|
7135 I965_RCPB_CLOCK_GATE_DISABLE
|
7136 I965_ISC_CLOCK_GATE_DISABLE
|
7137 I965_FBC_CLOCK_GATE_DISABLE
);
7138 I915_WRITE(RENCLK_GATE_D2
, 0);
7139 I915_WRITE(MI_ARB_STATE
,
7140 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7142 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7143 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7146 static void gen3_init_clock_gating(struct drm_device
*dev
)
7148 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7149 u32 dstate
= I915_READ(D_STATE
);
7151 dstate
|= DSTATE_PLL_D3_OFF
| DSTATE_GFX_CLOCK_GATING
|
7152 DSTATE_DOT_CLOCK_GATING
;
7153 I915_WRITE(D_STATE
, dstate
);
7155 if (IS_PINEVIEW(dev
))
7156 I915_WRITE(ECOSKPD
, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY
));
7158 /* IIR "flip pending" means done if this bit is set */
7159 I915_WRITE(ECOSKPD
, _MASKED_BIT_DISABLE(ECO_FLIP_DONE
));
7161 /* interrupts should cause a wake up from C3 */
7162 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN
));
7164 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7165 I915_WRITE(MI_ARB_STATE
, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE
));
7167 I915_WRITE(MI_ARB_STATE
,
7168 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7171 static void i85x_init_clock_gating(struct drm_device
*dev
)
7173 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7175 I915_WRITE(RENCLK_GATE_D1
, SV_CLOCK_GATE_DISABLE
);
7177 /* interrupts should cause a wake up from C3 */
7178 I915_WRITE(MI_STATE
, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN
) |
7179 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE
));
7181 I915_WRITE(MEM_MODE
,
7182 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE
));
7185 static void i830_init_clock_gating(struct drm_device
*dev
)
7187 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7189 I915_WRITE(DSPCLK_GATE_D
, OVRUNIT_CLOCK_GATE_DISABLE
);
7191 I915_WRITE(MEM_MODE
,
7192 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE
) |
7193 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE
));
7196 void intel_init_clock_gating(struct drm_device
*dev
)
7198 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7200 dev_priv
->display
.init_clock_gating(dev
);
7203 void intel_suspend_hw(struct drm_device
*dev
)
7205 if (HAS_PCH_LPT(dev
))
7206 lpt_suspend_hw(dev
);
7209 static void nop_init_clock_gating(struct drm_device
*dev
)
7211 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
7215 * intel_init_clock_gating_hooks - setup the clock gating hooks
7216 * @dev_priv: device private
7218 * Setup the hooks that configure which clocks of a given platform can be
7219 * gated and also apply various GT and display specific workarounds for these
7220 * platforms. Note that some GT specific workarounds are applied separately
7221 * when GPU contexts or batchbuffers start their execution.
7223 void intel_init_clock_gating_hooks(struct drm_i915_private
*dev_priv
)
7225 if (IS_SKYLAKE(dev_priv
))
7226 dev_priv
->display
.init_clock_gating
= skylake_init_clock_gating
;
7227 else if (IS_KABYLAKE(dev_priv
))
7228 dev_priv
->display
.init_clock_gating
= kabylake_init_clock_gating
;
7229 else if (IS_BROXTON(dev_priv
))
7230 dev_priv
->display
.init_clock_gating
= bxt_init_clock_gating
;
7231 else if (IS_BROADWELL(dev_priv
))
7232 dev_priv
->display
.init_clock_gating
= broadwell_init_clock_gating
;
7233 else if (IS_CHERRYVIEW(dev_priv
))
7234 dev_priv
->display
.init_clock_gating
= cherryview_init_clock_gating
;
7235 else if (IS_HASWELL(dev_priv
))
7236 dev_priv
->display
.init_clock_gating
= haswell_init_clock_gating
;
7237 else if (IS_IVYBRIDGE(dev_priv
))
7238 dev_priv
->display
.init_clock_gating
= ivybridge_init_clock_gating
;
7239 else if (IS_VALLEYVIEW(dev_priv
))
7240 dev_priv
->display
.init_clock_gating
= valleyview_init_clock_gating
;
7241 else if (IS_GEN6(dev_priv
))
7242 dev_priv
->display
.init_clock_gating
= gen6_init_clock_gating
;
7243 else if (IS_GEN5(dev_priv
))
7244 dev_priv
->display
.init_clock_gating
= ironlake_init_clock_gating
;
7245 else if (IS_G4X(dev_priv
))
7246 dev_priv
->display
.init_clock_gating
= g4x_init_clock_gating
;
7247 else if (IS_CRESTLINE(dev_priv
))
7248 dev_priv
->display
.init_clock_gating
= crestline_init_clock_gating
;
7249 else if (IS_BROADWATER(dev_priv
))
7250 dev_priv
->display
.init_clock_gating
= broadwater_init_clock_gating
;
7251 else if (IS_GEN3(dev_priv
))
7252 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
7253 else if (IS_I85X(dev_priv
) || IS_I865G(dev_priv
))
7254 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
7255 else if (IS_GEN2(dev_priv
))
7256 dev_priv
->display
.init_clock_gating
= i830_init_clock_gating
;
7258 MISSING_CASE(INTEL_DEVID(dev_priv
));
7259 dev_priv
->display
.init_clock_gating
= nop_init_clock_gating
;
7263 /* Set up chip specific power management-related functions */
7264 void intel_init_pm(struct drm_device
*dev
)
7266 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7268 intel_fbc_init(dev_priv
);
7271 if (IS_PINEVIEW(dev
))
7272 i915_pineview_get_mem_freq(dev
);
7273 else if (IS_GEN5(dev
))
7274 i915_ironlake_get_mem_freq(dev
);
7276 /* For FIFO watermark updates */
7277 if (INTEL_INFO(dev
)->gen
>= 9) {
7278 skl_setup_wm_latency(dev
);
7279 dev_priv
->display
.update_wm
= skl_update_wm
;
7280 } else if (HAS_PCH_SPLIT(dev
)) {
7281 ilk_setup_wm_latency(dev
);
7283 if ((IS_GEN5(dev
) && dev_priv
->wm
.pri_latency
[1] &&
7284 dev_priv
->wm
.spr_latency
[1] && dev_priv
->wm
.cur_latency
[1]) ||
7285 (!IS_GEN5(dev
) && dev_priv
->wm
.pri_latency
[0] &&
7286 dev_priv
->wm
.spr_latency
[0] && dev_priv
->wm
.cur_latency
[0])) {
7287 dev_priv
->display
.compute_pipe_wm
= ilk_compute_pipe_wm
;
7288 dev_priv
->display
.compute_intermediate_wm
=
7289 ilk_compute_intermediate_wm
;
7290 dev_priv
->display
.initial_watermarks
=
7291 ilk_initial_watermarks
;
7292 dev_priv
->display
.optimize_watermarks
=
7293 ilk_optimize_watermarks
;
7295 DRM_DEBUG_KMS("Failed to read display plane latency. "
7298 } else if (IS_CHERRYVIEW(dev
)) {
7299 vlv_setup_wm_latency(dev
);
7300 dev_priv
->display
.update_wm
= vlv_update_wm
;
7301 } else if (IS_VALLEYVIEW(dev
)) {
7302 vlv_setup_wm_latency(dev
);
7303 dev_priv
->display
.update_wm
= vlv_update_wm
;
7304 } else if (IS_PINEVIEW(dev
)) {
7305 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev
),
7308 dev_priv
->mem_freq
)) {
7309 DRM_INFO("failed to find known CxSR latency "
7310 "(found ddr%s fsb freq %d, mem freq %d), "
7312 (dev_priv
->is_ddr3
== 1) ? "3" : "2",
7313 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
7314 /* Disable CxSR and never update its watermark again */
7315 intel_set_memory_cxsr(dev_priv
, false);
7316 dev_priv
->display
.update_wm
= NULL
;
7318 dev_priv
->display
.update_wm
= pineview_update_wm
;
7319 } else if (IS_G4X(dev
)) {
7320 dev_priv
->display
.update_wm
= g4x_update_wm
;
7321 } else if (IS_GEN4(dev
)) {
7322 dev_priv
->display
.update_wm
= i965_update_wm
;
7323 } else if (IS_GEN3(dev
)) {
7324 dev_priv
->display
.update_wm
= i9xx_update_wm
;
7325 dev_priv
->display
.get_fifo_size
= i9xx_get_fifo_size
;
7326 } else if (IS_GEN2(dev
)) {
7327 if (INTEL_INFO(dev
)->num_pipes
== 1) {
7328 dev_priv
->display
.update_wm
= i845_update_wm
;
7329 dev_priv
->display
.get_fifo_size
= i845_get_fifo_size
;
7331 dev_priv
->display
.update_wm
= i9xx_update_wm
;
7332 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
7335 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7339 int sandybridge_pcode_read(struct drm_i915_private
*dev_priv
, u32 mbox
, u32
*val
)
7341 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
7343 if (I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
7344 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7348 I915_WRITE(GEN6_PCODE_DATA
, *val
);
7349 I915_WRITE(GEN6_PCODE_DATA1
, 0);
7350 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
7352 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
7354 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox
);
7358 *val
= I915_READ(GEN6_PCODE_DATA
);
7359 I915_WRITE(GEN6_PCODE_DATA
, 0);
7364 int sandybridge_pcode_write(struct drm_i915_private
*dev_priv
, u32 mbox
, u32 val
)
7366 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
7368 if (I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
7369 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7373 I915_WRITE(GEN6_PCODE_DATA
, val
);
7374 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
7376 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
7378 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox
);
7382 I915_WRITE(GEN6_PCODE_DATA
, 0);
7387 static int byt_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
7391 * Slow = Fast = GPLL ref * N
7393 return DIV_ROUND_CLOSEST(dev_priv
->rps
.gpll_ref_freq
* (val
- 0xb7), 1000);
7396 static int byt_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
7398 return DIV_ROUND_CLOSEST(1000 * val
, dev_priv
->rps
.gpll_ref_freq
) + 0xb7;
7401 static int chv_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
7405 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
7407 return DIV_ROUND_CLOSEST(dev_priv
->rps
.gpll_ref_freq
* val
, 2 * 2 * 1000);
7410 static int chv_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
7412 /* CHV needs even values */
7413 return DIV_ROUND_CLOSEST(2 * 1000 * val
, dev_priv
->rps
.gpll_ref_freq
) * 2;
7416 int intel_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
7418 if (IS_GEN9(dev_priv
))
7419 return DIV_ROUND_CLOSEST(val
* GT_FREQUENCY_MULTIPLIER
,
7421 else if (IS_CHERRYVIEW(dev_priv
))
7422 return chv_gpu_freq(dev_priv
, val
);
7423 else if (IS_VALLEYVIEW(dev_priv
))
7424 return byt_gpu_freq(dev_priv
, val
);
7426 return val
* GT_FREQUENCY_MULTIPLIER
;
7429 int intel_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
7431 if (IS_GEN9(dev_priv
))
7432 return DIV_ROUND_CLOSEST(val
* GEN9_FREQ_SCALER
,
7433 GT_FREQUENCY_MULTIPLIER
);
7434 else if (IS_CHERRYVIEW(dev_priv
))
7435 return chv_freq_opcode(dev_priv
, val
);
7436 else if (IS_VALLEYVIEW(dev_priv
))
7437 return byt_freq_opcode(dev_priv
, val
);
7439 return DIV_ROUND_CLOSEST(val
, GT_FREQUENCY_MULTIPLIER
);
7442 struct request_boost
{
7443 struct work_struct work
;
7444 struct drm_i915_gem_request
*req
;
7447 static void __intel_rps_boost_work(struct work_struct
*work
)
7449 struct request_boost
*boost
= container_of(work
, struct request_boost
, work
);
7450 struct drm_i915_gem_request
*req
= boost
->req
;
7452 if (!i915_gem_request_completed(req
, true))
7453 gen6_rps_boost(to_i915(req
->engine
->dev
), NULL
,
7454 req
->emitted_jiffies
);
7456 i915_gem_request_unreference__unlocked(req
);
7460 void intel_queue_rps_boost_for_request(struct drm_device
*dev
,
7461 struct drm_i915_gem_request
*req
)
7463 struct request_boost
*boost
;
7465 if (req
== NULL
|| INTEL_INFO(dev
)->gen
< 6)
7468 if (i915_gem_request_completed(req
, true))
7471 boost
= kmalloc(sizeof(*boost
), GFP_ATOMIC
);
7475 i915_gem_request_reference(req
);
7478 INIT_WORK(&boost
->work
, __intel_rps_boost_work
);
7479 queue_work(to_i915(dev
)->wq
, &boost
->work
);
7482 void intel_pm_setup(struct drm_device
*dev
)
7484 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7486 mutex_init(&dev_priv
->rps
.hw_lock
);
7487 spin_lock_init(&dev_priv
->rps
.client_lock
);
7489 INIT_DELAYED_WORK(&dev_priv
->rps
.delayed_resume_work
,
7490 intel_gen6_powersave_work
);
7491 INIT_LIST_HEAD(&dev_priv
->rps
.clients
);
7492 INIT_LIST_HEAD(&dev_priv
->rps
.semaphores
.link
);
7493 INIT_LIST_HEAD(&dev_priv
->rps
.mmioflips
.link
);
7495 dev_priv
->pm
.suspended
= false;
7496 atomic_set(&dev_priv
->pm
.wakeref_count
, 0);
7497 atomic_set(&dev_priv
->pm
.atomic_seq
, 0);