Merge remote-tracking branches 'spi/fix/omap2' and 'spi/fix/rockchip' into spi-linus
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_pm.c
CommitLineData
85208be0
ED
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
2b4e57bd 28#include <linux/cpufreq.h>
85208be0
ED
29#include "i915_drv.h"
30#include "intel_drv.h"
eb48eb00
DV
31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h>
85208be0 33
dc39fff7 34/**
18afd443
JN
35 * DOC: RC6
36 *
dc39fff7
BW
37 * RC6 is a special power stage which allows the GPU to enter an very
38 * low-voltage mode when idle, using down to 0V while at this stage. This
39 * stage is entered automatically when the GPU is idle when RC6 support is
40 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
41 *
42 * There are different RC6 modes available in Intel GPU, which differentiate
43 * among each other with the latency required to enter and leave RC6 and
44 * voltage consumed by the GPU in different states.
45 *
46 * The combination of the following flags define which states GPU is allowed
47 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
48 * RC6pp is deepest RC6. Their support by hardware varies according to the
49 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
50 * which brings the most power savings; deeper states save more power, but
51 * require higher latency to switch to and wake up.
52 */
53#define INTEL_RC6_ENABLE (1<<0)
54#define INTEL_RC6p_ENABLE (1<<1)
55#define INTEL_RC6pp_ENABLE (1<<2)
56
a82abe43
ID
57static void bxt_init_clock_gating(struct drm_device *dev)
58{
32608ca2
ID
59 struct drm_i915_private *dev_priv = dev->dev_private;
60
a7546159
NH
61 /* WaDisableSDEUnitClockGating:bxt */
62 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
63 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
64
32608ca2
ID
65 /*
66 * FIXME:
868434c5 67 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
32608ca2 68 */
32608ca2 69 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
868434c5 70 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
d965e7ac
ID
71
72 /*
73 * Wa: Backlight PWM may stop in the asserted state, causing backlight
74 * to stay fully on.
75 */
76 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
77 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
78 PWM1_GATING_DIS | PWM2_GATING_DIS);
a82abe43
ID
79}
80
c921aba8
DV
81static void i915_pineview_get_mem_freq(struct drm_device *dev)
82{
50227e1c 83 struct drm_i915_private *dev_priv = dev->dev_private;
c921aba8
DV
84 u32 tmp;
85
86 tmp = I915_READ(CLKCFG);
87
88 switch (tmp & CLKCFG_FSB_MASK) {
89 case CLKCFG_FSB_533:
90 dev_priv->fsb_freq = 533; /* 133*4 */
91 break;
92 case CLKCFG_FSB_800:
93 dev_priv->fsb_freq = 800; /* 200*4 */
94 break;
95 case CLKCFG_FSB_667:
96 dev_priv->fsb_freq = 667; /* 167*4 */
97 break;
98 case CLKCFG_FSB_400:
99 dev_priv->fsb_freq = 400; /* 100*4 */
100 break;
101 }
102
103 switch (tmp & CLKCFG_MEM_MASK) {
104 case CLKCFG_MEM_533:
105 dev_priv->mem_freq = 533;
106 break;
107 case CLKCFG_MEM_667:
108 dev_priv->mem_freq = 667;
109 break;
110 case CLKCFG_MEM_800:
111 dev_priv->mem_freq = 800;
112 break;
113 }
114
115 /* detect pineview DDR3 setting */
116 tmp = I915_READ(CSHRDDR3CTL);
117 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
118}
119
120static void i915_ironlake_get_mem_freq(struct drm_device *dev)
121{
50227e1c 122 struct drm_i915_private *dev_priv = dev->dev_private;
c921aba8
DV
123 u16 ddrpll, csipll;
124
125 ddrpll = I915_READ16(DDRMPLL1);
126 csipll = I915_READ16(CSIPLL0);
127
128 switch (ddrpll & 0xff) {
129 case 0xc:
130 dev_priv->mem_freq = 800;
131 break;
132 case 0x10:
133 dev_priv->mem_freq = 1066;
134 break;
135 case 0x14:
136 dev_priv->mem_freq = 1333;
137 break;
138 case 0x18:
139 dev_priv->mem_freq = 1600;
140 break;
141 default:
142 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
143 ddrpll & 0xff);
144 dev_priv->mem_freq = 0;
145 break;
146 }
147
20e4d407 148 dev_priv->ips.r_t = dev_priv->mem_freq;
c921aba8
DV
149
150 switch (csipll & 0x3ff) {
151 case 0x00c:
152 dev_priv->fsb_freq = 3200;
153 break;
154 case 0x00e:
155 dev_priv->fsb_freq = 3733;
156 break;
157 case 0x010:
158 dev_priv->fsb_freq = 4266;
159 break;
160 case 0x012:
161 dev_priv->fsb_freq = 4800;
162 break;
163 case 0x014:
164 dev_priv->fsb_freq = 5333;
165 break;
166 case 0x016:
167 dev_priv->fsb_freq = 5866;
168 break;
169 case 0x018:
170 dev_priv->fsb_freq = 6400;
171 break;
172 default:
173 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
174 csipll & 0x3ff);
175 dev_priv->fsb_freq = 0;
176 break;
177 }
178
179 if (dev_priv->fsb_freq == 3200) {
20e4d407 180 dev_priv->ips.c_m = 0;
c921aba8 181 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
20e4d407 182 dev_priv->ips.c_m = 1;
c921aba8 183 } else {
20e4d407 184 dev_priv->ips.c_m = 2;
c921aba8
DV
185 }
186}
187
b445e3b0
ED
188static const struct cxsr_latency cxsr_latency_table[] = {
189 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
190 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
191 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
192 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
193 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
194
195 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
196 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
197 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
198 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
199 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
200
201 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
202 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
203 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
204 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
205 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
206
207 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
208 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
209 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
210 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
211 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
212
213 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
214 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
215 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
216 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
217 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
218
219 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
220 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
221 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
222 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
223 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
224};
225
63c62275 226static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
b445e3b0
ED
227 int is_ddr3,
228 int fsb,
229 int mem)
230{
231 const struct cxsr_latency *latency;
232 int i;
233
234 if (fsb == 0 || mem == 0)
235 return NULL;
236
237 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
238 latency = &cxsr_latency_table[i];
239 if (is_desktop == latency->is_desktop &&
240 is_ddr3 == latency->is_ddr3 &&
241 fsb == latency->fsb_freq && mem == latency->mem_freq)
242 return latency;
243 }
244
245 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
246
247 return NULL;
248}
249
fc1ac8de
VS
250static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
251{
252 u32 val;
253
254 mutex_lock(&dev_priv->rps.hw_lock);
255
256 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
257 if (enable)
258 val &= ~FORCE_DDR_HIGH_FREQ;
259 else
260 val |= FORCE_DDR_HIGH_FREQ;
261 val &= ~FORCE_DDR_LOW_FREQ;
262 val |= FORCE_DDR_FREQ_REQ_ACK;
263 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
264
265 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
266 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
267 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
268
269 mutex_unlock(&dev_priv->rps.hw_lock);
270}
271
cfb41411
VS
272static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
273{
274 u32 val;
275
276 mutex_lock(&dev_priv->rps.hw_lock);
277
278 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
279 if (enable)
280 val |= DSP_MAXFIFO_PM5_ENABLE;
281 else
282 val &= ~DSP_MAXFIFO_PM5_ENABLE;
283 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
284
285 mutex_unlock(&dev_priv->rps.hw_lock);
286}
287
f4998963
VS
288#define FW_WM(value, plane) \
289 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
290
5209b1f4 291void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
b445e3b0 292{
5209b1f4
ID
293 struct drm_device *dev = dev_priv->dev;
294 u32 val;
b445e3b0 295
666a4537 296 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5209b1f4 297 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
a7a6c498 298 POSTING_READ(FW_BLC_SELF_VLV);
852eb00d 299 dev_priv->wm.vlv.cxsr = enable;
5209b1f4
ID
300 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
301 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
a7a6c498 302 POSTING_READ(FW_BLC_SELF);
5209b1f4
ID
303 } else if (IS_PINEVIEW(dev)) {
304 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
305 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
306 I915_WRITE(DSPFW3, val);
a7a6c498 307 POSTING_READ(DSPFW3);
5209b1f4
ID
308 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
309 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
310 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
311 I915_WRITE(FW_BLC_SELF, val);
a7a6c498 312 POSTING_READ(FW_BLC_SELF);
5209b1f4
ID
313 } else if (IS_I915GM(dev)) {
314 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
315 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
316 I915_WRITE(INSTPM, val);
a7a6c498 317 POSTING_READ(INSTPM);
5209b1f4
ID
318 } else {
319 return;
320 }
b445e3b0 321
5209b1f4
ID
322 DRM_DEBUG_KMS("memory self-refresh is %s\n",
323 enable ? "enabled" : "disabled");
b445e3b0
ED
324}
325
fc1ac8de 326
b445e3b0
ED
327/*
328 * Latency for FIFO fetches is dependent on several factors:
329 * - memory configuration (speed, channels)
330 * - chipset
331 * - current MCH state
332 * It can be fairly high in some situations, so here we assume a fairly
333 * pessimal value. It's a tradeoff between extra memory fetches (if we
334 * set this value too high, the FIFO will fetch frequently to stay full)
335 * and power consumption (set it too low to save power and we might see
336 * FIFO underruns and display "flicker").
337 *
338 * A value of 5us seems to be a good balance; safe for very low end
339 * platforms but not overly aggressive on lower latency configs.
340 */
5aef6003 341static const int pessimal_latency_ns = 5000;
b445e3b0 342
b5004720
VS
343#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
344 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
345
346static int vlv_get_fifo_size(struct drm_device *dev,
347 enum pipe pipe, int plane)
348{
349 struct drm_i915_private *dev_priv = dev->dev_private;
350 int sprite0_start, sprite1_start, size;
351
352 switch (pipe) {
353 uint32_t dsparb, dsparb2, dsparb3;
354 case PIPE_A:
355 dsparb = I915_READ(DSPARB);
356 dsparb2 = I915_READ(DSPARB2);
357 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
358 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
359 break;
360 case PIPE_B:
361 dsparb = I915_READ(DSPARB);
362 dsparb2 = I915_READ(DSPARB2);
363 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
364 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
365 break;
366 case PIPE_C:
367 dsparb2 = I915_READ(DSPARB2);
368 dsparb3 = I915_READ(DSPARB3);
369 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
370 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
371 break;
372 default:
373 return 0;
374 }
375
376 switch (plane) {
377 case 0:
378 size = sprite0_start;
379 break;
380 case 1:
381 size = sprite1_start - sprite0_start;
382 break;
383 case 2:
384 size = 512 - 1 - sprite1_start;
385 break;
386 default:
387 return 0;
388 }
389
390 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
391 pipe_name(pipe), plane == 0 ? "primary" : "sprite",
392 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
393 size);
394
395 return size;
396}
397
1fa61106 398static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
b445e3b0
ED
399{
400 struct drm_i915_private *dev_priv = dev->dev_private;
401 uint32_t dsparb = I915_READ(DSPARB);
402 int size;
403
404 size = dsparb & 0x7f;
405 if (plane)
406 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
407
408 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
409 plane ? "B" : "A", size);
410
411 return size;
412}
413
feb56b93 414static int i830_get_fifo_size(struct drm_device *dev, int plane)
b445e3b0
ED
415{
416 struct drm_i915_private *dev_priv = dev->dev_private;
417 uint32_t dsparb = I915_READ(DSPARB);
418 int size;
419
420 size = dsparb & 0x1ff;
421 if (plane)
422 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
423 size >>= 1; /* Convert to cachelines */
424
425 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
426 plane ? "B" : "A", size);
427
428 return size;
429}
430
1fa61106 431static int i845_get_fifo_size(struct drm_device *dev, int plane)
b445e3b0
ED
432{
433 struct drm_i915_private *dev_priv = dev->dev_private;
434 uint32_t dsparb = I915_READ(DSPARB);
435 int size;
436
437 size = dsparb & 0x7f;
438 size >>= 2; /* Convert to cachelines */
439
440 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
441 plane ? "B" : "A",
442 size);
443
444 return size;
445}
446
b445e3b0
ED
447/* Pineview has different values for various configs */
448static const struct intel_watermark_params pineview_display_wm = {
e0f0273e
VS
449 .fifo_size = PINEVIEW_DISPLAY_FIFO,
450 .max_wm = PINEVIEW_MAX_WM,
451 .default_wm = PINEVIEW_DFT_WM,
452 .guard_size = PINEVIEW_GUARD_WM,
453 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
b445e3b0
ED
454};
455static const struct intel_watermark_params pineview_display_hplloff_wm = {
e0f0273e
VS
456 .fifo_size = PINEVIEW_DISPLAY_FIFO,
457 .max_wm = PINEVIEW_MAX_WM,
458 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
459 .guard_size = PINEVIEW_GUARD_WM,
460 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
b445e3b0
ED
461};
462static const struct intel_watermark_params pineview_cursor_wm = {
e0f0273e
VS
463 .fifo_size = PINEVIEW_CURSOR_FIFO,
464 .max_wm = PINEVIEW_CURSOR_MAX_WM,
465 .default_wm = PINEVIEW_CURSOR_DFT_WM,
466 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
467 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
b445e3b0
ED
468};
469static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
e0f0273e
VS
470 .fifo_size = PINEVIEW_CURSOR_FIFO,
471 .max_wm = PINEVIEW_CURSOR_MAX_WM,
472 .default_wm = PINEVIEW_CURSOR_DFT_WM,
473 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
474 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
b445e3b0
ED
475};
476static const struct intel_watermark_params g4x_wm_info = {
e0f0273e
VS
477 .fifo_size = G4X_FIFO_SIZE,
478 .max_wm = G4X_MAX_WM,
479 .default_wm = G4X_MAX_WM,
480 .guard_size = 2,
481 .cacheline_size = G4X_FIFO_LINE_SIZE,
b445e3b0
ED
482};
483static const struct intel_watermark_params g4x_cursor_wm_info = {
e0f0273e
VS
484 .fifo_size = I965_CURSOR_FIFO,
485 .max_wm = I965_CURSOR_MAX_WM,
486 .default_wm = I965_CURSOR_DFT_WM,
487 .guard_size = 2,
488 .cacheline_size = G4X_FIFO_LINE_SIZE,
b445e3b0
ED
489};
490static const struct intel_watermark_params valleyview_wm_info = {
e0f0273e
VS
491 .fifo_size = VALLEYVIEW_FIFO_SIZE,
492 .max_wm = VALLEYVIEW_MAX_WM,
493 .default_wm = VALLEYVIEW_MAX_WM,
494 .guard_size = 2,
495 .cacheline_size = G4X_FIFO_LINE_SIZE,
b445e3b0
ED
496};
497static const struct intel_watermark_params valleyview_cursor_wm_info = {
e0f0273e
VS
498 .fifo_size = I965_CURSOR_FIFO,
499 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
500 .default_wm = I965_CURSOR_DFT_WM,
501 .guard_size = 2,
502 .cacheline_size = G4X_FIFO_LINE_SIZE,
b445e3b0
ED
503};
504static const struct intel_watermark_params i965_cursor_wm_info = {
e0f0273e
VS
505 .fifo_size = I965_CURSOR_FIFO,
506 .max_wm = I965_CURSOR_MAX_WM,
507 .default_wm = I965_CURSOR_DFT_WM,
508 .guard_size = 2,
509 .cacheline_size = I915_FIFO_LINE_SIZE,
b445e3b0
ED
510};
511static const struct intel_watermark_params i945_wm_info = {
e0f0273e
VS
512 .fifo_size = I945_FIFO_SIZE,
513 .max_wm = I915_MAX_WM,
514 .default_wm = 1,
515 .guard_size = 2,
516 .cacheline_size = I915_FIFO_LINE_SIZE,
b445e3b0
ED
517};
518static const struct intel_watermark_params i915_wm_info = {
e0f0273e
VS
519 .fifo_size = I915_FIFO_SIZE,
520 .max_wm = I915_MAX_WM,
521 .default_wm = 1,
522 .guard_size = 2,
523 .cacheline_size = I915_FIFO_LINE_SIZE,
b445e3b0 524};
9d539105 525static const struct intel_watermark_params i830_a_wm_info = {
e0f0273e
VS
526 .fifo_size = I855GM_FIFO_SIZE,
527 .max_wm = I915_MAX_WM,
528 .default_wm = 1,
529 .guard_size = 2,
530 .cacheline_size = I830_FIFO_LINE_SIZE,
b445e3b0 531};
9d539105
VS
532static const struct intel_watermark_params i830_bc_wm_info = {
533 .fifo_size = I855GM_FIFO_SIZE,
534 .max_wm = I915_MAX_WM/2,
535 .default_wm = 1,
536 .guard_size = 2,
537 .cacheline_size = I830_FIFO_LINE_SIZE,
538};
feb56b93 539static const struct intel_watermark_params i845_wm_info = {
e0f0273e
VS
540 .fifo_size = I830_FIFO_SIZE,
541 .max_wm = I915_MAX_WM,
542 .default_wm = 1,
543 .guard_size = 2,
544 .cacheline_size = I830_FIFO_LINE_SIZE,
b445e3b0
ED
545};
546
b445e3b0
ED
547/**
548 * intel_calculate_wm - calculate watermark level
549 * @clock_in_khz: pixel clock
550 * @wm: chip FIFO params
ac484963 551 * @cpp: bytes per pixel
b445e3b0
ED
552 * @latency_ns: memory latency for the platform
553 *
554 * Calculate the watermark level (the level at which the display plane will
555 * start fetching from memory again). Each chip has a different display
556 * FIFO size and allocation, so the caller needs to figure that out and pass
557 * in the correct intel_watermark_params structure.
558 *
559 * As the pixel clock runs, the FIFO will be drained at a rate that depends
560 * on the pixel size. When it reaches the watermark level, it'll start
561 * fetching FIFO line sized based chunks from memory until the FIFO fills
562 * past the watermark point. If the FIFO drains completely, a FIFO underrun
563 * will occur, and a display engine hang could result.
564 */
565static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
566 const struct intel_watermark_params *wm,
ac484963 567 int fifo_size, int cpp,
b445e3b0
ED
568 unsigned long latency_ns)
569{
570 long entries_required, wm_size;
571
572 /*
573 * Note: we need to make sure we don't overflow for various clock &
574 * latency values.
575 * clocks go from a few thousand to several hundred thousand.
576 * latency is usually a few thousand
577 */
ac484963 578 entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
b445e3b0
ED
579 1000;
580 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
581
582 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
583
584 wm_size = fifo_size - (entries_required + wm->guard_size);
585
586 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
587
588 /* Don't promote wm_size to unsigned... */
589 if (wm_size > (long)wm->max_wm)
590 wm_size = wm->max_wm;
591 if (wm_size <= 0)
592 wm_size = wm->default_wm;
d6feb196
VS
593
594 /*
595 * Bspec seems to indicate that the value shouldn't be lower than
596 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
597 * Lets go for 8 which is the burst size since certain platforms
598 * already use a hardcoded 8 (which is what the spec says should be
599 * done).
600 */
601 if (wm_size <= 8)
602 wm_size = 8;
603
b445e3b0
ED
604 return wm_size;
605}
606
607static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
608{
609 struct drm_crtc *crtc, *enabled = NULL;
610
70e1e0ec 611 for_each_crtc(dev, crtc) {
3490ea5d 612 if (intel_crtc_active(crtc)) {
b445e3b0
ED
613 if (enabled)
614 return NULL;
615 enabled = crtc;
616 }
617 }
618
619 return enabled;
620}
621
46ba614c 622static void pineview_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 623{
46ba614c 624 struct drm_device *dev = unused_crtc->dev;
b445e3b0
ED
625 struct drm_i915_private *dev_priv = dev->dev_private;
626 struct drm_crtc *crtc;
627 const struct cxsr_latency *latency;
628 u32 reg;
629 unsigned long wm;
630
631 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
632 dev_priv->fsb_freq, dev_priv->mem_freq);
633 if (!latency) {
634 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
5209b1f4 635 intel_set_memory_cxsr(dev_priv, false);
b445e3b0
ED
636 return;
637 }
638
639 crtc = single_enabled_crtc(dev);
640 if (crtc) {
7c5f93b0 641 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
ac484963 642 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
7c5f93b0 643 int clock = adjusted_mode->crtc_clock;
b445e3b0
ED
644
645 /* Display SR */
646 wm = intel_calculate_wm(clock, &pineview_display_wm,
647 pineview_display_wm.fifo_size,
ac484963 648 cpp, latency->display_sr);
b445e3b0
ED
649 reg = I915_READ(DSPFW1);
650 reg &= ~DSPFW_SR_MASK;
f4998963 651 reg |= FW_WM(wm, SR);
b445e3b0
ED
652 I915_WRITE(DSPFW1, reg);
653 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
654
655 /* cursor SR */
656 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
657 pineview_display_wm.fifo_size,
ac484963 658 cpp, latency->cursor_sr);
b445e3b0
ED
659 reg = I915_READ(DSPFW3);
660 reg &= ~DSPFW_CURSOR_SR_MASK;
f4998963 661 reg |= FW_WM(wm, CURSOR_SR);
b445e3b0
ED
662 I915_WRITE(DSPFW3, reg);
663
664 /* Display HPLL off SR */
665 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
666 pineview_display_hplloff_wm.fifo_size,
ac484963 667 cpp, latency->display_hpll_disable);
b445e3b0
ED
668 reg = I915_READ(DSPFW3);
669 reg &= ~DSPFW_HPLL_SR_MASK;
f4998963 670 reg |= FW_WM(wm, HPLL_SR);
b445e3b0
ED
671 I915_WRITE(DSPFW3, reg);
672
673 /* cursor HPLL off SR */
674 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
675 pineview_display_hplloff_wm.fifo_size,
ac484963 676 cpp, latency->cursor_hpll_disable);
b445e3b0
ED
677 reg = I915_READ(DSPFW3);
678 reg &= ~DSPFW_HPLL_CURSOR_MASK;
f4998963 679 reg |= FW_WM(wm, HPLL_CURSOR);
b445e3b0
ED
680 I915_WRITE(DSPFW3, reg);
681 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
682
5209b1f4 683 intel_set_memory_cxsr(dev_priv, true);
b445e3b0 684 } else {
5209b1f4 685 intel_set_memory_cxsr(dev_priv, false);
b445e3b0
ED
686 }
687}
688
689static bool g4x_compute_wm0(struct drm_device *dev,
690 int plane,
691 const struct intel_watermark_params *display,
692 int display_latency_ns,
693 const struct intel_watermark_params *cursor,
694 int cursor_latency_ns,
695 int *plane_wm,
696 int *cursor_wm)
697{
698 struct drm_crtc *crtc;
4fe8590a 699 const struct drm_display_mode *adjusted_mode;
ac484963 700 int htotal, hdisplay, clock, cpp;
b445e3b0
ED
701 int line_time_us, line_count;
702 int entries, tlb_miss;
703
704 crtc = intel_get_crtc_for_plane(dev, plane);
3490ea5d 705 if (!intel_crtc_active(crtc)) {
b445e3b0
ED
706 *cursor_wm = cursor->guard_size;
707 *plane_wm = display->guard_size;
708 return false;
709 }
710
6e3c9717 711 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 712 clock = adjusted_mode->crtc_clock;
fec8cba3 713 htotal = adjusted_mode->crtc_htotal;
6e3c9717 714 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
ac484963 715 cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
b445e3b0
ED
716
717 /* Use the small buffer method to calculate plane watermark */
ac484963 718 entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
b445e3b0
ED
719 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
720 if (tlb_miss > 0)
721 entries += tlb_miss;
722 entries = DIV_ROUND_UP(entries, display->cacheline_size);
723 *plane_wm = entries + display->guard_size;
724 if (*plane_wm > (int)display->max_wm)
725 *plane_wm = display->max_wm;
726
727 /* Use the large buffer method to calculate cursor watermark */
922044c9 728 line_time_us = max(htotal * 1000 / clock, 1);
b445e3b0 729 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
ac484963 730 entries = line_count * crtc->cursor->state->crtc_w * cpp;
b445e3b0
ED
731 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
732 if (tlb_miss > 0)
733 entries += tlb_miss;
734 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
735 *cursor_wm = entries + cursor->guard_size;
736 if (*cursor_wm > (int)cursor->max_wm)
737 *cursor_wm = (int)cursor->max_wm;
738
739 return true;
740}
741
742/*
743 * Check the wm result.
744 *
745 * If any calculated watermark values is larger than the maximum value that
746 * can be programmed into the associated watermark register, that watermark
747 * must be disabled.
748 */
749static bool g4x_check_srwm(struct drm_device *dev,
750 int display_wm, int cursor_wm,
751 const struct intel_watermark_params *display,
752 const struct intel_watermark_params *cursor)
753{
754 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
755 display_wm, cursor_wm);
756
757 if (display_wm > display->max_wm) {
758 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
759 display_wm, display->max_wm);
760 return false;
761 }
762
763 if (cursor_wm > cursor->max_wm) {
764 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
765 cursor_wm, cursor->max_wm);
766 return false;
767 }
768
769 if (!(display_wm || cursor_wm)) {
770 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
771 return false;
772 }
773
774 return true;
775}
776
777static bool g4x_compute_srwm(struct drm_device *dev,
778 int plane,
779 int latency_ns,
780 const struct intel_watermark_params *display,
781 const struct intel_watermark_params *cursor,
782 int *display_wm, int *cursor_wm)
783{
784 struct drm_crtc *crtc;
4fe8590a 785 const struct drm_display_mode *adjusted_mode;
ac484963 786 int hdisplay, htotal, cpp, clock;
b445e3b0
ED
787 unsigned long line_time_us;
788 int line_count, line_size;
789 int small, large;
790 int entries;
791
792 if (!latency_ns) {
793 *display_wm = *cursor_wm = 0;
794 return false;
795 }
796
797 crtc = intel_get_crtc_for_plane(dev, plane);
6e3c9717 798 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 799 clock = adjusted_mode->crtc_clock;
fec8cba3 800 htotal = adjusted_mode->crtc_htotal;
6e3c9717 801 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
ac484963 802 cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
b445e3b0 803
922044c9 804 line_time_us = max(htotal * 1000 / clock, 1);
b445e3b0 805 line_count = (latency_ns / line_time_us + 1000) / 1000;
ac484963 806 line_size = hdisplay * cpp;
b445e3b0
ED
807
808 /* Use the minimum of the small and large buffer method for primary */
ac484963 809 small = ((clock * cpp / 1000) * latency_ns) / 1000;
b445e3b0
ED
810 large = line_count * line_size;
811
812 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
813 *display_wm = entries + display->guard_size;
814
815 /* calculate the self-refresh watermark for display cursor */
ac484963 816 entries = line_count * cpp * crtc->cursor->state->crtc_w;
b445e3b0
ED
817 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
818 *cursor_wm = entries + cursor->guard_size;
819
820 return g4x_check_srwm(dev,
821 *display_wm, *cursor_wm,
822 display, cursor);
823}
824
15665979
VS
825#define FW_WM_VLV(value, plane) \
826 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
827
0018fda1
VS
828static void vlv_write_wm_values(struct intel_crtc *crtc,
829 const struct vlv_wm_values *wm)
830{
831 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
832 enum pipe pipe = crtc->pipe;
833
834 I915_WRITE(VLV_DDL(pipe),
835 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
836 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
837 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
838 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
839
ae80152d 840 I915_WRITE(DSPFW1,
15665979
VS
841 FW_WM(wm->sr.plane, SR) |
842 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
843 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
844 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
ae80152d 845 I915_WRITE(DSPFW2,
15665979
VS
846 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
847 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
848 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
ae80152d 849 I915_WRITE(DSPFW3,
15665979 850 FW_WM(wm->sr.cursor, CURSOR_SR));
ae80152d
VS
851
852 if (IS_CHERRYVIEW(dev_priv)) {
853 I915_WRITE(DSPFW7_CHV,
15665979
VS
854 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
855 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
ae80152d 856 I915_WRITE(DSPFW8_CHV,
15665979
VS
857 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
858 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
ae80152d 859 I915_WRITE(DSPFW9_CHV,
15665979
VS
860 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
861 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
ae80152d 862 I915_WRITE(DSPHOWM,
15665979
VS
863 FW_WM(wm->sr.plane >> 9, SR_HI) |
864 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
865 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
866 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
867 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
868 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
869 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
870 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
871 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
872 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
ae80152d
VS
873 } else {
874 I915_WRITE(DSPFW7,
15665979
VS
875 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
876 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
ae80152d 877 I915_WRITE(DSPHOWM,
15665979
VS
878 FW_WM(wm->sr.plane >> 9, SR_HI) |
879 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
880 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
881 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
882 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
883 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
884 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
ae80152d
VS
885 }
886
2cb389b7
VS
887 /* zero (unused) WM1 watermarks */
888 I915_WRITE(DSPFW4, 0);
889 I915_WRITE(DSPFW5, 0);
890 I915_WRITE(DSPFW6, 0);
891 I915_WRITE(DSPHOWM1, 0);
892
ae80152d 893 POSTING_READ(DSPFW1);
0018fda1
VS
894}
895
15665979
VS
896#undef FW_WM_VLV
897
6eb1a681
VS
898enum vlv_wm_level {
899 VLV_WM_LEVEL_PM2,
900 VLV_WM_LEVEL_PM5,
901 VLV_WM_LEVEL_DDR_DVFS,
6eb1a681
VS
902};
903
262cd2e1
VS
904/* latency must be in 0.1us units. */
905static unsigned int vlv_wm_method2(unsigned int pixel_rate,
906 unsigned int pipe_htotal,
907 unsigned int horiz_pixels,
ac484963 908 unsigned int cpp,
262cd2e1
VS
909 unsigned int latency)
910{
911 unsigned int ret;
912
913 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
ac484963 914 ret = (ret + 1) * horiz_pixels * cpp;
262cd2e1
VS
915 ret = DIV_ROUND_UP(ret, 64);
916
917 return ret;
918}
919
920static void vlv_setup_wm_latency(struct drm_device *dev)
921{
922 struct drm_i915_private *dev_priv = dev->dev_private;
923
924 /* all latencies in usec */
925 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
926
58590c14
VS
927 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
928
262cd2e1
VS
929 if (IS_CHERRYVIEW(dev_priv)) {
930 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
931 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
58590c14
VS
932
933 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
262cd2e1
VS
934 }
935}
936
937static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
938 struct intel_crtc *crtc,
939 const struct intel_plane_state *state,
940 int level)
941{
942 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
ac484963 943 int clock, htotal, cpp, width, wm;
262cd2e1
VS
944
945 if (dev_priv->wm.pri_latency[level] == 0)
946 return USHRT_MAX;
947
948 if (!state->visible)
949 return 0;
950
ac484963 951 cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
262cd2e1
VS
952 clock = crtc->config->base.adjusted_mode.crtc_clock;
953 htotal = crtc->config->base.adjusted_mode.crtc_htotal;
954 width = crtc->config->pipe_src_w;
955 if (WARN_ON(htotal == 0))
956 htotal = 1;
957
958 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
959 /*
960 * FIXME the formula gives values that are
961 * too big for the cursor FIFO, and hence we
962 * would never be able to use cursors. For
963 * now just hardcode the watermark.
964 */
965 wm = 63;
966 } else {
ac484963 967 wm = vlv_wm_method2(clock, htotal, width, cpp,
262cd2e1
VS
968 dev_priv->wm.pri_latency[level] * 10);
969 }
970
971 return min_t(int, wm, USHRT_MAX);
972}
973
54f1b6e1
VS
974static void vlv_compute_fifo(struct intel_crtc *crtc)
975{
976 struct drm_device *dev = crtc->base.dev;
977 struct vlv_wm_state *wm_state = &crtc->wm_state;
978 struct intel_plane *plane;
979 unsigned int total_rate = 0;
980 const int fifo_size = 512 - 1;
981 int fifo_extra, fifo_left = fifo_size;
982
983 for_each_intel_plane_on_crtc(dev, crtc, plane) {
984 struct intel_plane_state *state =
985 to_intel_plane_state(plane->base.state);
986
987 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
988 continue;
989
990 if (state->visible) {
991 wm_state->num_active_planes++;
992 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
993 }
994 }
995
996 for_each_intel_plane_on_crtc(dev, crtc, plane) {
997 struct intel_plane_state *state =
998 to_intel_plane_state(plane->base.state);
999 unsigned int rate;
1000
1001 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1002 plane->wm.fifo_size = 63;
1003 continue;
1004 }
1005
1006 if (!state->visible) {
1007 plane->wm.fifo_size = 0;
1008 continue;
1009 }
1010
1011 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1012 plane->wm.fifo_size = fifo_size * rate / total_rate;
1013 fifo_left -= plane->wm.fifo_size;
1014 }
1015
1016 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1017
1018 /* spread the remainder evenly */
1019 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1020 int plane_extra;
1021
1022 if (fifo_left == 0)
1023 break;
1024
1025 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1026 continue;
1027
1028 /* give it all to the first plane if none are active */
1029 if (plane->wm.fifo_size == 0 &&
1030 wm_state->num_active_planes)
1031 continue;
1032
1033 plane_extra = min(fifo_extra, fifo_left);
1034 plane->wm.fifo_size += plane_extra;
1035 fifo_left -= plane_extra;
1036 }
1037
1038 WARN_ON(fifo_left != 0);
1039}
1040
262cd2e1
VS
1041static void vlv_invert_wms(struct intel_crtc *crtc)
1042{
1043 struct vlv_wm_state *wm_state = &crtc->wm_state;
1044 int level;
1045
1046 for (level = 0; level < wm_state->num_levels; level++) {
1047 struct drm_device *dev = crtc->base.dev;
1048 const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1049 struct intel_plane *plane;
1050
1051 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
1052 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
1053
1054 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1055 switch (plane->base.type) {
1056 int sprite;
1057 case DRM_PLANE_TYPE_CURSOR:
1058 wm_state->wm[level].cursor = plane->wm.fifo_size -
1059 wm_state->wm[level].cursor;
1060 break;
1061 case DRM_PLANE_TYPE_PRIMARY:
1062 wm_state->wm[level].primary = plane->wm.fifo_size -
1063 wm_state->wm[level].primary;
1064 break;
1065 case DRM_PLANE_TYPE_OVERLAY:
1066 sprite = plane->plane;
1067 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
1068 wm_state->wm[level].sprite[sprite];
1069 break;
1070 }
1071 }
1072 }
1073}
1074
26e1fe4f 1075static void vlv_compute_wm(struct intel_crtc *crtc)
262cd2e1
VS
1076{
1077 struct drm_device *dev = crtc->base.dev;
1078 struct vlv_wm_state *wm_state = &crtc->wm_state;
1079 struct intel_plane *plane;
1080 int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1081 int level;
1082
1083 memset(wm_state, 0, sizeof(*wm_state));
1084
852eb00d 1085 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
58590c14 1086 wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
262cd2e1
VS
1087
1088 wm_state->num_active_planes = 0;
262cd2e1 1089
54f1b6e1 1090 vlv_compute_fifo(crtc);
262cd2e1
VS
1091
1092 if (wm_state->num_active_planes != 1)
1093 wm_state->cxsr = false;
1094
1095 if (wm_state->cxsr) {
1096 for (level = 0; level < wm_state->num_levels; level++) {
1097 wm_state->sr[level].plane = sr_fifo_size;
1098 wm_state->sr[level].cursor = 63;
1099 }
1100 }
1101
1102 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1103 struct intel_plane_state *state =
1104 to_intel_plane_state(plane->base.state);
1105
1106 if (!state->visible)
1107 continue;
1108
1109 /* normal watermarks */
1110 for (level = 0; level < wm_state->num_levels; level++) {
1111 int wm = vlv_compute_wm_level(plane, crtc, state, level);
1112 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
1113
1114 /* hack */
1115 if (WARN_ON(level == 0 && wm > max_wm))
1116 wm = max_wm;
1117
1118 if (wm > plane->wm.fifo_size)
1119 break;
1120
1121 switch (plane->base.type) {
1122 int sprite;
1123 case DRM_PLANE_TYPE_CURSOR:
1124 wm_state->wm[level].cursor = wm;
1125 break;
1126 case DRM_PLANE_TYPE_PRIMARY:
1127 wm_state->wm[level].primary = wm;
1128 break;
1129 case DRM_PLANE_TYPE_OVERLAY:
1130 sprite = plane->plane;
1131 wm_state->wm[level].sprite[sprite] = wm;
1132 break;
1133 }
1134 }
1135
1136 wm_state->num_levels = level;
1137
1138 if (!wm_state->cxsr)
1139 continue;
1140
1141 /* maxfifo watermarks */
1142 switch (plane->base.type) {
1143 int sprite, level;
1144 case DRM_PLANE_TYPE_CURSOR:
1145 for (level = 0; level < wm_state->num_levels; level++)
1146 wm_state->sr[level].cursor =
5a37ed0a 1147 wm_state->wm[level].cursor;
262cd2e1
VS
1148 break;
1149 case DRM_PLANE_TYPE_PRIMARY:
1150 for (level = 0; level < wm_state->num_levels; level++)
1151 wm_state->sr[level].plane =
1152 min(wm_state->sr[level].plane,
1153 wm_state->wm[level].primary);
1154 break;
1155 case DRM_PLANE_TYPE_OVERLAY:
1156 sprite = plane->plane;
1157 for (level = 0; level < wm_state->num_levels; level++)
1158 wm_state->sr[level].plane =
1159 min(wm_state->sr[level].plane,
1160 wm_state->wm[level].sprite[sprite]);
1161 break;
1162 }
1163 }
1164
1165 /* clear any (partially) filled invalid levels */
58590c14 1166 for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
262cd2e1
VS
1167 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1168 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1169 }
1170
1171 vlv_invert_wms(crtc);
1172}
1173
54f1b6e1
VS
1174#define VLV_FIFO(plane, value) \
1175 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1176
1177static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
1178{
1179 struct drm_device *dev = crtc->base.dev;
1180 struct drm_i915_private *dev_priv = to_i915(dev);
1181 struct intel_plane *plane;
1182 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
1183
1184 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1185 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1186 WARN_ON(plane->wm.fifo_size != 63);
1187 continue;
1188 }
1189
1190 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
1191 sprite0_start = plane->wm.fifo_size;
1192 else if (plane->plane == 0)
1193 sprite1_start = sprite0_start + plane->wm.fifo_size;
1194 else
1195 fifo_size = sprite1_start + plane->wm.fifo_size;
1196 }
1197
1198 WARN_ON(fifo_size != 512 - 1);
1199
1200 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1201 pipe_name(crtc->pipe), sprite0_start,
1202 sprite1_start, fifo_size);
1203
1204 switch (crtc->pipe) {
1205 uint32_t dsparb, dsparb2, dsparb3;
1206 case PIPE_A:
1207 dsparb = I915_READ(DSPARB);
1208 dsparb2 = I915_READ(DSPARB2);
1209
1210 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1211 VLV_FIFO(SPRITEB, 0xff));
1212 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1213 VLV_FIFO(SPRITEB, sprite1_start));
1214
1215 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1216 VLV_FIFO(SPRITEB_HI, 0x1));
1217 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1218 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1219
1220 I915_WRITE(DSPARB, dsparb);
1221 I915_WRITE(DSPARB2, dsparb2);
1222 break;
1223 case PIPE_B:
1224 dsparb = I915_READ(DSPARB);
1225 dsparb2 = I915_READ(DSPARB2);
1226
1227 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1228 VLV_FIFO(SPRITED, 0xff));
1229 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1230 VLV_FIFO(SPRITED, sprite1_start));
1231
1232 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1233 VLV_FIFO(SPRITED_HI, 0xff));
1234 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1235 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1236
1237 I915_WRITE(DSPARB, dsparb);
1238 I915_WRITE(DSPARB2, dsparb2);
1239 break;
1240 case PIPE_C:
1241 dsparb3 = I915_READ(DSPARB3);
1242 dsparb2 = I915_READ(DSPARB2);
1243
1244 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1245 VLV_FIFO(SPRITEF, 0xff));
1246 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1247 VLV_FIFO(SPRITEF, sprite1_start));
1248
1249 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1250 VLV_FIFO(SPRITEF_HI, 0xff));
1251 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1252 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1253
1254 I915_WRITE(DSPARB3, dsparb3);
1255 I915_WRITE(DSPARB2, dsparb2);
1256 break;
1257 default:
1258 break;
1259 }
1260}
1261
1262#undef VLV_FIFO
1263
262cd2e1
VS
1264static void vlv_merge_wm(struct drm_device *dev,
1265 struct vlv_wm_values *wm)
1266{
1267 struct intel_crtc *crtc;
1268 int num_active_crtcs = 0;
1269
58590c14 1270 wm->level = to_i915(dev)->wm.max_level;
262cd2e1
VS
1271 wm->cxsr = true;
1272
1273 for_each_intel_crtc(dev, crtc) {
1274 const struct vlv_wm_state *wm_state = &crtc->wm_state;
1275
1276 if (!crtc->active)
1277 continue;
1278
1279 if (!wm_state->cxsr)
1280 wm->cxsr = false;
1281
1282 num_active_crtcs++;
1283 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1284 }
1285
1286 if (num_active_crtcs != 1)
1287 wm->cxsr = false;
1288
6f9c784b
VS
1289 if (num_active_crtcs > 1)
1290 wm->level = VLV_WM_LEVEL_PM2;
1291
262cd2e1
VS
1292 for_each_intel_crtc(dev, crtc) {
1293 struct vlv_wm_state *wm_state = &crtc->wm_state;
1294 enum pipe pipe = crtc->pipe;
1295
1296 if (!crtc->active)
1297 continue;
1298
1299 wm->pipe[pipe] = wm_state->wm[wm->level];
1300 if (wm->cxsr)
1301 wm->sr = wm_state->sr[wm->level];
1302
1303 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
1304 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
1305 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
1306 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
1307 }
1308}
1309
1310static void vlv_update_wm(struct drm_crtc *crtc)
1311{
1312 struct drm_device *dev = crtc->dev;
1313 struct drm_i915_private *dev_priv = dev->dev_private;
1314 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1315 enum pipe pipe = intel_crtc->pipe;
1316 struct vlv_wm_values wm = {};
1317
26e1fe4f 1318 vlv_compute_wm(intel_crtc);
262cd2e1
VS
1319 vlv_merge_wm(dev, &wm);
1320
54f1b6e1
VS
1321 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
1322 /* FIXME should be part of crtc atomic commit */
1323 vlv_pipe_set_fifo_size(intel_crtc);
262cd2e1 1324 return;
54f1b6e1 1325 }
262cd2e1
VS
1326
1327 if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
1328 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
1329 chv_set_memory_dvfs(dev_priv, false);
1330
1331 if (wm.level < VLV_WM_LEVEL_PM5 &&
1332 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
1333 chv_set_memory_pm5(dev_priv, false);
1334
852eb00d 1335 if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
262cd2e1 1336 intel_set_memory_cxsr(dev_priv, false);
262cd2e1 1337
54f1b6e1
VS
1338 /* FIXME should be part of crtc atomic commit */
1339 vlv_pipe_set_fifo_size(intel_crtc);
1340
262cd2e1
VS
1341 vlv_write_wm_values(intel_crtc, &wm);
1342
1343 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1344 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1345 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1346 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
1347 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
1348
852eb00d 1349 if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
262cd2e1 1350 intel_set_memory_cxsr(dev_priv, true);
262cd2e1
VS
1351
1352 if (wm.level >= VLV_WM_LEVEL_PM5 &&
1353 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
1354 chv_set_memory_pm5(dev_priv, true);
1355
1356 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
1357 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
1358 chv_set_memory_dvfs(dev_priv, true);
1359
1360 dev_priv->wm.vlv = wm;
3c2777fd
VS
1361}
1362
ae80152d
VS
1363#define single_plane_enabled(mask) is_power_of_2(mask)
1364
46ba614c 1365static void g4x_update_wm(struct drm_crtc *crtc)
b445e3b0 1366{
46ba614c 1367 struct drm_device *dev = crtc->dev;
b445e3b0
ED
1368 static const int sr_latency_ns = 12000;
1369 struct drm_i915_private *dev_priv = dev->dev_private;
1370 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1371 int plane_sr, cursor_sr;
1372 unsigned int enabled = 0;
9858425c 1373 bool cxsr_enabled;
b445e3b0 1374
51cea1f4 1375 if (g4x_compute_wm0(dev, PIPE_A,
5aef6003
CW
1376 &g4x_wm_info, pessimal_latency_ns,
1377 &g4x_cursor_wm_info, pessimal_latency_ns,
b445e3b0 1378 &planea_wm, &cursora_wm))
51cea1f4 1379 enabled |= 1 << PIPE_A;
b445e3b0 1380
51cea1f4 1381 if (g4x_compute_wm0(dev, PIPE_B,
5aef6003
CW
1382 &g4x_wm_info, pessimal_latency_ns,
1383 &g4x_cursor_wm_info, pessimal_latency_ns,
b445e3b0 1384 &planeb_wm, &cursorb_wm))
51cea1f4 1385 enabled |= 1 << PIPE_B;
b445e3b0 1386
b445e3b0
ED
1387 if (single_plane_enabled(enabled) &&
1388 g4x_compute_srwm(dev, ffs(enabled) - 1,
1389 sr_latency_ns,
1390 &g4x_wm_info,
1391 &g4x_cursor_wm_info,
52bd02d8 1392 &plane_sr, &cursor_sr)) {
9858425c 1393 cxsr_enabled = true;
52bd02d8 1394 } else {
9858425c 1395 cxsr_enabled = false;
5209b1f4 1396 intel_set_memory_cxsr(dev_priv, false);
52bd02d8
CW
1397 plane_sr = cursor_sr = 0;
1398 }
b445e3b0 1399
a5043453
VS
1400 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1401 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
b445e3b0
ED
1402 planea_wm, cursora_wm,
1403 planeb_wm, cursorb_wm,
1404 plane_sr, cursor_sr);
1405
1406 I915_WRITE(DSPFW1,
f4998963
VS
1407 FW_WM(plane_sr, SR) |
1408 FW_WM(cursorb_wm, CURSORB) |
1409 FW_WM(planeb_wm, PLANEB) |
1410 FW_WM(planea_wm, PLANEA));
b445e3b0 1411 I915_WRITE(DSPFW2,
8c919b28 1412 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
f4998963 1413 FW_WM(cursora_wm, CURSORA));
b445e3b0
ED
1414 /* HPLL off in SR has some issues on G4x... disable it */
1415 I915_WRITE(DSPFW3,
8c919b28 1416 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
f4998963 1417 FW_WM(cursor_sr, CURSOR_SR));
9858425c
ID
1418
1419 if (cxsr_enabled)
1420 intel_set_memory_cxsr(dev_priv, true);
b445e3b0
ED
1421}
1422
46ba614c 1423static void i965_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 1424{
46ba614c 1425 struct drm_device *dev = unused_crtc->dev;
b445e3b0
ED
1426 struct drm_i915_private *dev_priv = dev->dev_private;
1427 struct drm_crtc *crtc;
1428 int srwm = 1;
1429 int cursor_sr = 16;
9858425c 1430 bool cxsr_enabled;
b445e3b0
ED
1431
1432 /* Calc sr entries for one plane configs */
1433 crtc = single_enabled_crtc(dev);
1434 if (crtc) {
1435 /* self-refresh has much higher latency */
1436 static const int sr_latency_ns = 12000;
124abe07 1437 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 1438 int clock = adjusted_mode->crtc_clock;
fec8cba3 1439 int htotal = adjusted_mode->crtc_htotal;
6e3c9717 1440 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
ac484963 1441 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
b445e3b0
ED
1442 unsigned long line_time_us;
1443 int entries;
1444
922044c9 1445 line_time_us = max(htotal * 1000 / clock, 1);
b445e3b0
ED
1446
1447 /* Use ns/us then divide to preserve precision */
1448 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
ac484963 1449 cpp * hdisplay;
b445e3b0
ED
1450 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1451 srwm = I965_FIFO_SIZE - entries;
1452 if (srwm < 0)
1453 srwm = 1;
1454 srwm &= 0x1ff;
1455 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1456 entries, srwm);
1457
1458 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
ac484963 1459 cpp * crtc->cursor->state->crtc_w;
b445e3b0
ED
1460 entries = DIV_ROUND_UP(entries,
1461 i965_cursor_wm_info.cacheline_size);
1462 cursor_sr = i965_cursor_wm_info.fifo_size -
1463 (entries + i965_cursor_wm_info.guard_size);
1464
1465 if (cursor_sr > i965_cursor_wm_info.max_wm)
1466 cursor_sr = i965_cursor_wm_info.max_wm;
1467
1468 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1469 "cursor %d\n", srwm, cursor_sr);
1470
9858425c 1471 cxsr_enabled = true;
b445e3b0 1472 } else {
9858425c 1473 cxsr_enabled = false;
b445e3b0 1474 /* Turn off self refresh if both pipes are enabled */
5209b1f4 1475 intel_set_memory_cxsr(dev_priv, false);
b445e3b0
ED
1476 }
1477
1478 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1479 srwm);
1480
1481 /* 965 has limitations... */
f4998963
VS
1482 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1483 FW_WM(8, CURSORB) |
1484 FW_WM(8, PLANEB) |
1485 FW_WM(8, PLANEA));
1486 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1487 FW_WM(8, PLANEC_OLD));
b445e3b0 1488 /* update cursor SR watermark */
f4998963 1489 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
9858425c
ID
1490
1491 if (cxsr_enabled)
1492 intel_set_memory_cxsr(dev_priv, true);
b445e3b0
ED
1493}
1494
f4998963
VS
1495#undef FW_WM
1496
46ba614c 1497static void i9xx_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 1498{
46ba614c 1499 struct drm_device *dev = unused_crtc->dev;
b445e3b0
ED
1500 struct drm_i915_private *dev_priv = dev->dev_private;
1501 const struct intel_watermark_params *wm_info;
1502 uint32_t fwater_lo;
1503 uint32_t fwater_hi;
1504 int cwm, srwm = 1;
1505 int fifo_size;
1506 int planea_wm, planeb_wm;
1507 struct drm_crtc *crtc, *enabled = NULL;
1508
1509 if (IS_I945GM(dev))
1510 wm_info = &i945_wm_info;
1511 else if (!IS_GEN2(dev))
1512 wm_info = &i915_wm_info;
1513 else
9d539105 1514 wm_info = &i830_a_wm_info;
b445e3b0
ED
1515
1516 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1517 crtc = intel_get_crtc_for_plane(dev, 0);
3490ea5d 1518 if (intel_crtc_active(crtc)) {
241bfc38 1519 const struct drm_display_mode *adjusted_mode;
ac484963 1520 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
b9e0bda3
CW
1521 if (IS_GEN2(dev))
1522 cpp = 4;
1523
6e3c9717 1524 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 1525 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
b9e0bda3 1526 wm_info, fifo_size, cpp,
5aef6003 1527 pessimal_latency_ns);
b445e3b0 1528 enabled = crtc;
9d539105 1529 } else {
b445e3b0 1530 planea_wm = fifo_size - wm_info->guard_size;
9d539105
VS
1531 if (planea_wm > (long)wm_info->max_wm)
1532 planea_wm = wm_info->max_wm;
1533 }
1534
1535 if (IS_GEN2(dev))
1536 wm_info = &i830_bc_wm_info;
b445e3b0
ED
1537
1538 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1539 crtc = intel_get_crtc_for_plane(dev, 1);
3490ea5d 1540 if (intel_crtc_active(crtc)) {
241bfc38 1541 const struct drm_display_mode *adjusted_mode;
ac484963 1542 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
b9e0bda3
CW
1543 if (IS_GEN2(dev))
1544 cpp = 4;
1545
6e3c9717 1546 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 1547 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
b9e0bda3 1548 wm_info, fifo_size, cpp,
5aef6003 1549 pessimal_latency_ns);
b445e3b0
ED
1550 if (enabled == NULL)
1551 enabled = crtc;
1552 else
1553 enabled = NULL;
9d539105 1554 } else {
b445e3b0 1555 planeb_wm = fifo_size - wm_info->guard_size;
9d539105
VS
1556 if (planeb_wm > (long)wm_info->max_wm)
1557 planeb_wm = wm_info->max_wm;
1558 }
b445e3b0
ED
1559
1560 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1561
2ab1bc9d 1562 if (IS_I915GM(dev) && enabled) {
2ff8fde1 1563 struct drm_i915_gem_object *obj;
2ab1bc9d 1564
59bea882 1565 obj = intel_fb_obj(enabled->primary->state->fb);
2ab1bc9d
DV
1566
1567 /* self-refresh seems busted with untiled */
2ff8fde1 1568 if (obj->tiling_mode == I915_TILING_NONE)
2ab1bc9d
DV
1569 enabled = NULL;
1570 }
1571
b445e3b0
ED
1572 /*
1573 * Overlay gets an aggressive default since video jitter is bad.
1574 */
1575 cwm = 2;
1576
1577 /* Play safe and disable self-refresh before adjusting watermarks. */
5209b1f4 1578 intel_set_memory_cxsr(dev_priv, false);
b445e3b0
ED
1579
1580 /* Calc sr entries for one plane configs */
1581 if (HAS_FW_BLC(dev) && enabled) {
1582 /* self-refresh has much higher latency */
1583 static const int sr_latency_ns = 6000;
124abe07 1584 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
241bfc38 1585 int clock = adjusted_mode->crtc_clock;
fec8cba3 1586 int htotal = adjusted_mode->crtc_htotal;
6e3c9717 1587 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
ac484963 1588 int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
b445e3b0
ED
1589 unsigned long line_time_us;
1590 int entries;
1591
922044c9 1592 line_time_us = max(htotal * 1000 / clock, 1);
b445e3b0
ED
1593
1594 /* Use ns/us then divide to preserve precision */
1595 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
ac484963 1596 cpp * hdisplay;
b445e3b0
ED
1597 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1598 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1599 srwm = wm_info->fifo_size - entries;
1600 if (srwm < 0)
1601 srwm = 1;
1602
1603 if (IS_I945G(dev) || IS_I945GM(dev))
1604 I915_WRITE(FW_BLC_SELF,
1605 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1606 else if (IS_I915GM(dev))
1607 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1608 }
1609
1610 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1611 planea_wm, planeb_wm, cwm, srwm);
1612
1613 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1614 fwater_hi = (cwm & 0x1f);
1615
1616 /* Set request length to 8 cachelines per fetch */
1617 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1618 fwater_hi = fwater_hi | (1 << 8);
1619
1620 I915_WRITE(FW_BLC, fwater_lo);
1621 I915_WRITE(FW_BLC2, fwater_hi);
1622
5209b1f4
ID
1623 if (enabled)
1624 intel_set_memory_cxsr(dev_priv, true);
b445e3b0
ED
1625}
1626
feb56b93 1627static void i845_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 1628{
46ba614c 1629 struct drm_device *dev = unused_crtc->dev;
b445e3b0
ED
1630 struct drm_i915_private *dev_priv = dev->dev_private;
1631 struct drm_crtc *crtc;
241bfc38 1632 const struct drm_display_mode *adjusted_mode;
b445e3b0
ED
1633 uint32_t fwater_lo;
1634 int planea_wm;
1635
1636 crtc = single_enabled_crtc(dev);
1637 if (crtc == NULL)
1638 return;
1639
6e3c9717 1640 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 1641 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
feb56b93 1642 &i845_wm_info,
b445e3b0 1643 dev_priv->display.get_fifo_size(dev, 0),
5aef6003 1644 4, pessimal_latency_ns);
b445e3b0
ED
1645 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1646 fwater_lo |= (3<<8) | planea_wm;
1647
1648 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1649
1650 I915_WRITE(FW_BLC, fwater_lo);
1651}
1652
8cfb3407 1653uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
801bcfff 1654{
fd4daa9c 1655 uint32_t pixel_rate;
801bcfff 1656
8cfb3407 1657 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
801bcfff
PZ
1658
1659 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1660 * adjust the pixel_rate here. */
1661
8cfb3407 1662 if (pipe_config->pch_pfit.enabled) {
801bcfff 1663 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
8cfb3407
VS
1664 uint32_t pfit_size = pipe_config->pch_pfit.size;
1665
1666 pipe_w = pipe_config->pipe_src_w;
1667 pipe_h = pipe_config->pipe_src_h;
801bcfff 1668
801bcfff
PZ
1669 pfit_w = (pfit_size >> 16) & 0xFFFF;
1670 pfit_h = pfit_size & 0xFFFF;
1671 if (pipe_w < pfit_w)
1672 pipe_w = pfit_w;
1673 if (pipe_h < pfit_h)
1674 pipe_h = pfit_h;
1675
15126882
MR
1676 if (WARN_ON(!pfit_w || !pfit_h))
1677 return pixel_rate;
1678
801bcfff
PZ
1679 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1680 pfit_w * pfit_h);
1681 }
1682
1683 return pixel_rate;
1684}
1685
37126462 1686/* latency must be in 0.1us units. */
ac484963 1687static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
801bcfff
PZ
1688{
1689 uint64_t ret;
1690
3312ba65
VS
1691 if (WARN(latency == 0, "Latency value missing\n"))
1692 return UINT_MAX;
1693
ac484963 1694 ret = (uint64_t) pixel_rate * cpp * latency;
801bcfff
PZ
1695 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1696
1697 return ret;
1698}
1699
37126462 1700/* latency must be in 0.1us units. */
23297044 1701static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
ac484963 1702 uint32_t horiz_pixels, uint8_t cpp,
801bcfff
PZ
1703 uint32_t latency)
1704{
1705 uint32_t ret;
1706
3312ba65
VS
1707 if (WARN(latency == 0, "Latency value missing\n"))
1708 return UINT_MAX;
15126882
MR
1709 if (WARN_ON(!pipe_htotal))
1710 return UINT_MAX;
3312ba65 1711
801bcfff 1712 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
ac484963 1713 ret = (ret + 1) * horiz_pixels * cpp;
801bcfff
PZ
1714 ret = DIV_ROUND_UP(ret, 64) + 2;
1715 return ret;
1716}
1717
23297044 1718static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
ac484963 1719 uint8_t cpp)
cca32e9a 1720{
15126882
MR
1721 /*
1722 * Neither of these should be possible since this function shouldn't be
1723 * called if the CRTC is off or the plane is invisible. But let's be
1724 * extra paranoid to avoid a potential divide-by-zero if we screw up
1725 * elsewhere in the driver.
1726 */
ac484963 1727 if (WARN_ON(!cpp))
15126882
MR
1728 return 0;
1729 if (WARN_ON(!horiz_pixels))
1730 return 0;
1731
ac484963 1732 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
cca32e9a
PZ
1733}
1734
820c1980 1735struct ilk_wm_maximums {
cca32e9a
PZ
1736 uint16_t pri;
1737 uint16_t spr;
1738 uint16_t cur;
1739 uint16_t fbc;
1740};
1741
37126462
VS
1742/*
1743 * For both WM_PIPE and WM_LP.
1744 * mem_value must be in 0.1us units.
1745 */
7221fc33 1746static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
43d59eda 1747 const struct intel_plane_state *pstate,
cca32e9a
PZ
1748 uint32_t mem_value,
1749 bool is_lp)
801bcfff 1750{
ac484963
VS
1751 int cpp = pstate->base.fb ?
1752 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
cca32e9a
PZ
1753 uint32_t method1, method2;
1754
7221fc33 1755 if (!cstate->base.active || !pstate->visible)
801bcfff
PZ
1756 return 0;
1757
ac484963 1758 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
cca32e9a
PZ
1759
1760 if (!is_lp)
1761 return method1;
1762
7221fc33
MR
1763 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1764 cstate->base.adjusted_mode.crtc_htotal,
43d59eda 1765 drm_rect_width(&pstate->dst),
ac484963 1766 cpp, mem_value);
cca32e9a
PZ
1767
1768 return min(method1, method2);
801bcfff
PZ
1769}
1770
37126462
VS
1771/*
1772 * For both WM_PIPE and WM_LP.
1773 * mem_value must be in 0.1us units.
1774 */
7221fc33 1775static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
43d59eda 1776 const struct intel_plane_state *pstate,
801bcfff
PZ
1777 uint32_t mem_value)
1778{
ac484963
VS
1779 int cpp = pstate->base.fb ?
1780 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
801bcfff
PZ
1781 uint32_t method1, method2;
1782
7221fc33 1783 if (!cstate->base.active || !pstate->visible)
801bcfff
PZ
1784 return 0;
1785
ac484963 1786 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
7221fc33
MR
1787 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1788 cstate->base.adjusted_mode.crtc_htotal,
43d59eda 1789 drm_rect_width(&pstate->dst),
ac484963 1790 cpp, mem_value);
801bcfff
PZ
1791 return min(method1, method2);
1792}
1793
37126462
VS
1794/*
1795 * For both WM_PIPE and WM_LP.
1796 * mem_value must be in 0.1us units.
1797 */
7221fc33 1798static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
43d59eda 1799 const struct intel_plane_state *pstate,
801bcfff
PZ
1800 uint32_t mem_value)
1801{
b2435692
MR
1802 /*
1803 * We treat the cursor plane as always-on for the purposes of watermark
1804 * calculation. Until we have two-stage watermark programming merged,
1805 * this is necessary to avoid flickering.
1806 */
1807 int cpp = 4;
1808 int width = pstate->visible ? pstate->base.crtc_w : 64;
43d59eda 1809
b2435692 1810 if (!cstate->base.active)
801bcfff
PZ
1811 return 0;
1812
7221fc33
MR
1813 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1814 cstate->base.adjusted_mode.crtc_htotal,
b2435692 1815 width, cpp, mem_value);
801bcfff
PZ
1816}
1817
cca32e9a 1818/* Only for WM_LP. */
7221fc33 1819static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
43d59eda 1820 const struct intel_plane_state *pstate,
1fda9882 1821 uint32_t pri_val)
cca32e9a 1822{
ac484963
VS
1823 int cpp = pstate->base.fb ?
1824 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
43d59eda 1825
7221fc33 1826 if (!cstate->base.active || !pstate->visible)
cca32e9a
PZ
1827 return 0;
1828
ac484963 1829 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp);
cca32e9a
PZ
1830}
1831
158ae64f
VS
1832static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1833{
416f4727
VS
1834 if (INTEL_INFO(dev)->gen >= 8)
1835 return 3072;
1836 else if (INTEL_INFO(dev)->gen >= 7)
158ae64f
VS
1837 return 768;
1838 else
1839 return 512;
1840}
1841
4e975081
VS
1842static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1843 int level, bool is_sprite)
1844{
1845 if (INTEL_INFO(dev)->gen >= 8)
1846 /* BDW primary/sprite plane watermarks */
1847 return level == 0 ? 255 : 2047;
1848 else if (INTEL_INFO(dev)->gen >= 7)
1849 /* IVB/HSW primary/sprite plane watermarks */
1850 return level == 0 ? 127 : 1023;
1851 else if (!is_sprite)
1852 /* ILK/SNB primary plane watermarks */
1853 return level == 0 ? 127 : 511;
1854 else
1855 /* ILK/SNB sprite plane watermarks */
1856 return level == 0 ? 63 : 255;
1857}
1858
1859static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1860 int level)
1861{
1862 if (INTEL_INFO(dev)->gen >= 7)
1863 return level == 0 ? 63 : 255;
1864 else
1865 return level == 0 ? 31 : 63;
1866}
1867
1868static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1869{
1870 if (INTEL_INFO(dev)->gen >= 8)
1871 return 31;
1872 else
1873 return 15;
1874}
1875
158ae64f
VS
1876/* Calculate the maximum primary/sprite plane watermark */
1877static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1878 int level,
240264f4 1879 const struct intel_wm_config *config,
158ae64f
VS
1880 enum intel_ddb_partitioning ddb_partitioning,
1881 bool is_sprite)
1882{
1883 unsigned int fifo_size = ilk_display_fifo_size(dev);
158ae64f
VS
1884
1885 /* if sprites aren't enabled, sprites get nothing */
240264f4 1886 if (is_sprite && !config->sprites_enabled)
158ae64f
VS
1887 return 0;
1888
1889 /* HSW allows LP1+ watermarks even with multiple pipes */
240264f4 1890 if (level == 0 || config->num_pipes_active > 1) {
158ae64f
VS
1891 fifo_size /= INTEL_INFO(dev)->num_pipes;
1892
1893 /*
1894 * For some reason the non self refresh
1895 * FIFO size is only half of the self
1896 * refresh FIFO size on ILK/SNB.
1897 */
1898 if (INTEL_INFO(dev)->gen <= 6)
1899 fifo_size /= 2;
1900 }
1901
240264f4 1902 if (config->sprites_enabled) {
158ae64f
VS
1903 /* level 0 is always calculated with 1:1 split */
1904 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1905 if (is_sprite)
1906 fifo_size *= 5;
1907 fifo_size /= 6;
1908 } else {
1909 fifo_size /= 2;
1910 }
1911 }
1912
1913 /* clamp to max that the registers can hold */
4e975081 1914 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
158ae64f
VS
1915}
1916
1917/* Calculate the maximum cursor plane watermark */
1918static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
240264f4
VS
1919 int level,
1920 const struct intel_wm_config *config)
158ae64f
VS
1921{
1922 /* HSW LP1+ watermarks w/ multiple pipes */
240264f4 1923 if (level > 0 && config->num_pipes_active > 1)
158ae64f
VS
1924 return 64;
1925
1926 /* otherwise just report max that registers can hold */
4e975081 1927 return ilk_cursor_wm_reg_max(dev, level);
158ae64f
VS
1928}
1929
d34ff9c6 1930static void ilk_compute_wm_maximums(const struct drm_device *dev,
34982fe1
VS
1931 int level,
1932 const struct intel_wm_config *config,
1933 enum intel_ddb_partitioning ddb_partitioning,
820c1980 1934 struct ilk_wm_maximums *max)
158ae64f 1935{
240264f4
VS
1936 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1937 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1938 max->cur = ilk_cursor_wm_max(dev, level, config);
4e975081 1939 max->fbc = ilk_fbc_wm_reg_max(dev);
158ae64f
VS
1940}
1941
a3cb4048
VS
1942static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1943 int level,
1944 struct ilk_wm_maximums *max)
1945{
1946 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1947 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1948 max->cur = ilk_cursor_wm_reg_max(dev, level);
1949 max->fbc = ilk_fbc_wm_reg_max(dev);
1950}
1951
d9395655 1952static bool ilk_validate_wm_level(int level,
820c1980 1953 const struct ilk_wm_maximums *max,
d9395655 1954 struct intel_wm_level *result)
a9786a11
VS
1955{
1956 bool ret;
1957
1958 /* already determined to be invalid? */
1959 if (!result->enable)
1960 return false;
1961
1962 result->enable = result->pri_val <= max->pri &&
1963 result->spr_val <= max->spr &&
1964 result->cur_val <= max->cur;
1965
1966 ret = result->enable;
1967
1968 /*
1969 * HACK until we can pre-compute everything,
1970 * and thus fail gracefully if LP0 watermarks
1971 * are exceeded...
1972 */
1973 if (level == 0 && !result->enable) {
1974 if (result->pri_val > max->pri)
1975 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1976 level, result->pri_val, max->pri);
1977 if (result->spr_val > max->spr)
1978 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1979 level, result->spr_val, max->spr);
1980 if (result->cur_val > max->cur)
1981 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1982 level, result->cur_val, max->cur);
1983
1984 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
1985 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
1986 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
1987 result->enable = true;
1988 }
1989
a9786a11
VS
1990 return ret;
1991}
1992
d34ff9c6 1993static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
43d59eda 1994 const struct intel_crtc *intel_crtc,
6f5ddd17 1995 int level,
7221fc33 1996 struct intel_crtc_state *cstate,
86c8bbbe
MR
1997 struct intel_plane_state *pristate,
1998 struct intel_plane_state *sprstate,
1999 struct intel_plane_state *curstate,
1fd527cc 2000 struct intel_wm_level *result)
6f5ddd17
VS
2001{
2002 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2003 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2004 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2005
2006 /* WM1+ latency values stored in 0.5us units */
2007 if (level > 0) {
2008 pri_latency *= 5;
2009 spr_latency *= 5;
2010 cur_latency *= 5;
2011 }
2012
86c8bbbe
MR
2013 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2014 pri_latency, level);
2015 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2016 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2017 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
6f5ddd17
VS
2018 result->enable = true;
2019}
2020
801bcfff 2021static uint32_t
ee91a159
MR
2022hsw_compute_linetime_wm(struct drm_device *dev,
2023 struct intel_crtc_state *cstate)
1f8eeabf
ED
2024{
2025 struct drm_i915_private *dev_priv = dev->dev_private;
ee91a159
MR
2026 const struct drm_display_mode *adjusted_mode =
2027 &cstate->base.adjusted_mode;
85a02deb 2028 u32 linetime, ips_linetime;
1f8eeabf 2029
ee91a159
MR
2030 if (!cstate->base.active)
2031 return 0;
2032 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2033 return 0;
2034 if (WARN_ON(dev_priv->cdclk_freq == 0))
801bcfff 2035 return 0;
1011d8c4 2036
1f8eeabf
ED
2037 /* The WM are computed with base on how long it takes to fill a single
2038 * row at the given clock rate, multiplied by 8.
2039 * */
124abe07
VS
2040 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2041 adjusted_mode->crtc_clock);
2042 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
05024da3 2043 dev_priv->cdclk_freq);
1f8eeabf 2044
801bcfff
PZ
2045 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2046 PIPE_WM_LINETIME_TIME(linetime);
1f8eeabf
ED
2047}
2048
2af30a5c 2049static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
12b134df
VS
2050{
2051 struct drm_i915_private *dev_priv = dev->dev_private;
2052
2af30a5c
PB
2053 if (IS_GEN9(dev)) {
2054 uint32_t val;
4f947386 2055 int ret, i;
367294be 2056 int level, max_level = ilk_wm_max_level(dev);
2af30a5c
PB
2057
2058 /* read the first set of memory latencies[0:3] */
2059 val = 0; /* data0 to be programmed to 0 for first set */
2060 mutex_lock(&dev_priv->rps.hw_lock);
2061 ret = sandybridge_pcode_read(dev_priv,
2062 GEN9_PCODE_READ_MEM_LATENCY,
2063 &val);
2064 mutex_unlock(&dev_priv->rps.hw_lock);
2065
2066 if (ret) {
2067 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2068 return;
2069 }
2070
2071 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2072 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2073 GEN9_MEM_LATENCY_LEVEL_MASK;
2074 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2075 GEN9_MEM_LATENCY_LEVEL_MASK;
2076 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2077 GEN9_MEM_LATENCY_LEVEL_MASK;
2078
2079 /* read the second set of memory latencies[4:7] */
2080 val = 1; /* data0 to be programmed to 1 for second set */
2081 mutex_lock(&dev_priv->rps.hw_lock);
2082 ret = sandybridge_pcode_read(dev_priv,
2083 GEN9_PCODE_READ_MEM_LATENCY,
2084 &val);
2085 mutex_unlock(&dev_priv->rps.hw_lock);
2086 if (ret) {
2087 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2088 return;
2089 }
2090
2091 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2092 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2093 GEN9_MEM_LATENCY_LEVEL_MASK;
2094 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2095 GEN9_MEM_LATENCY_LEVEL_MASK;
2096 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2097 GEN9_MEM_LATENCY_LEVEL_MASK;
2098
367294be 2099 /*
6f97235b
DL
2100 * WaWmMemoryReadLatency:skl
2101 *
367294be
VK
2102 * punit doesn't take into account the read latency so we need
2103 * to add 2us to the various latency levels we retrieve from
2104 * the punit.
2105 * - W0 is a bit special in that it's the only level that
2106 * can't be disabled if we want to have display working, so
2107 * we always add 2us there.
2108 * - For levels >=1, punit returns 0us latency when they are
2109 * disabled, so we respect that and don't add 2us then
4f947386
VK
2110 *
2111 * Additionally, if a level n (n > 1) has a 0us latency, all
2112 * levels m (m >= n) need to be disabled. We make sure to
2113 * sanitize the values out of the punit to satisfy this
2114 * requirement.
367294be
VK
2115 */
2116 wm[0] += 2;
2117 for (level = 1; level <= max_level; level++)
2118 if (wm[level] != 0)
2119 wm[level] += 2;
4f947386
VK
2120 else {
2121 for (i = level + 1; i <= max_level; i++)
2122 wm[i] = 0;
367294be 2123
4f947386
VK
2124 break;
2125 }
2af30a5c 2126 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
12b134df
VS
2127 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2128
2129 wm[0] = (sskpd >> 56) & 0xFF;
2130 if (wm[0] == 0)
2131 wm[0] = sskpd & 0xF;
e5d5019e
VS
2132 wm[1] = (sskpd >> 4) & 0xFF;
2133 wm[2] = (sskpd >> 12) & 0xFF;
2134 wm[3] = (sskpd >> 20) & 0x1FF;
2135 wm[4] = (sskpd >> 32) & 0x1FF;
63cf9a13
VS
2136 } else if (INTEL_INFO(dev)->gen >= 6) {
2137 uint32_t sskpd = I915_READ(MCH_SSKPD);
2138
2139 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2140 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2141 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2142 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
3a88d0ac
VS
2143 } else if (INTEL_INFO(dev)->gen >= 5) {
2144 uint32_t mltr = I915_READ(MLTR_ILK);
2145
2146 /* ILK primary LP0 latency is 700 ns */
2147 wm[0] = 7;
2148 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2149 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
12b134df
VS
2150 }
2151}
2152
53615a5e
VS
2153static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2154{
2155 /* ILK sprite LP0 latency is 1300 ns */
2156 if (INTEL_INFO(dev)->gen == 5)
2157 wm[0] = 13;
2158}
2159
2160static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2161{
2162 /* ILK cursor LP0 latency is 1300 ns */
2163 if (INTEL_INFO(dev)->gen == 5)
2164 wm[0] = 13;
2165
2166 /* WaDoubleCursorLP3Latency:ivb */
2167 if (IS_IVYBRIDGE(dev))
2168 wm[3] *= 2;
2169}
2170
546c81fd 2171int ilk_wm_max_level(const struct drm_device *dev)
26ec971e 2172{
26ec971e 2173 /* how many WM levels are we expecting */
b6e742f6 2174 if (INTEL_INFO(dev)->gen >= 9)
2af30a5c
PB
2175 return 7;
2176 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ad0d6dc4 2177 return 4;
26ec971e 2178 else if (INTEL_INFO(dev)->gen >= 6)
ad0d6dc4 2179 return 3;
26ec971e 2180 else
ad0d6dc4
VS
2181 return 2;
2182}
7526ed79 2183
ad0d6dc4
VS
2184static void intel_print_wm_latency(struct drm_device *dev,
2185 const char *name,
2af30a5c 2186 const uint16_t wm[8])
ad0d6dc4
VS
2187{
2188 int level, max_level = ilk_wm_max_level(dev);
26ec971e
VS
2189
2190 for (level = 0; level <= max_level; level++) {
2191 unsigned int latency = wm[level];
2192
2193 if (latency == 0) {
2194 DRM_ERROR("%s WM%d latency not provided\n",
2195 name, level);
2196 continue;
2197 }
2198
2af30a5c
PB
2199 /*
2200 * - latencies are in us on gen9.
2201 * - before then, WM1+ latency values are in 0.5us units
2202 */
2203 if (IS_GEN9(dev))
2204 latency *= 10;
2205 else if (level > 0)
26ec971e
VS
2206 latency *= 5;
2207
2208 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2209 name, level, wm[level],
2210 latency / 10, latency % 10);
2211 }
2212}
2213
e95a2f75
VS
2214static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2215 uint16_t wm[5], uint16_t min)
2216{
2217 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2218
2219 if (wm[0] >= min)
2220 return false;
2221
2222 wm[0] = max(wm[0], min);
2223 for (level = 1; level <= max_level; level++)
2224 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2225
2226 return true;
2227}
2228
2229static void snb_wm_latency_quirk(struct drm_device *dev)
2230{
2231 struct drm_i915_private *dev_priv = dev->dev_private;
2232 bool changed;
2233
2234 /*
2235 * The BIOS provided WM memory latency values are often
2236 * inadequate for high resolution displays. Adjust them.
2237 */
2238 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2239 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2240 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2241
2242 if (!changed)
2243 return;
2244
2245 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2246 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2247 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2248 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2249}
2250
fa50ad61 2251static void ilk_setup_wm_latency(struct drm_device *dev)
53615a5e
VS
2252{
2253 struct drm_i915_private *dev_priv = dev->dev_private;
2254
2255 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2256
2257 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2258 sizeof(dev_priv->wm.pri_latency));
2259 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2260 sizeof(dev_priv->wm.pri_latency));
2261
2262 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2263 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
26ec971e
VS
2264
2265 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2266 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2267 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
e95a2f75
VS
2268
2269 if (IS_GEN6(dev))
2270 snb_wm_latency_quirk(dev);
53615a5e
VS
2271}
2272
2af30a5c
PB
2273static void skl_setup_wm_latency(struct drm_device *dev)
2274{
2275 struct drm_i915_private *dev_priv = dev->dev_private;
2276
2277 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2278 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2279}
2280
0b2ae6d7 2281/* Compute new watermarks for the pipe */
86c8bbbe
MR
2282static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
2283 struct drm_atomic_state *state)
0b2ae6d7 2284{
86c8bbbe
MR
2285 struct intel_pipe_wm *pipe_wm;
2286 struct drm_device *dev = intel_crtc->base.dev;
d34ff9c6 2287 const struct drm_i915_private *dev_priv = dev->dev_private;
86c8bbbe 2288 struct intel_crtc_state *cstate = NULL;
43d59eda 2289 struct intel_plane *intel_plane;
86c8bbbe
MR
2290 struct drm_plane_state *ps;
2291 struct intel_plane_state *pristate = NULL;
43d59eda 2292 struct intel_plane_state *sprstate = NULL;
86c8bbbe 2293 struct intel_plane_state *curstate = NULL;
0b2ae6d7 2294 int level, max_level = ilk_wm_max_level(dev);
bf220452
MR
2295 /* LP0 watermark maximums depend on this pipe alone */
2296 struct intel_wm_config config = {
2297 .num_pipes_active = 1,
2298 };
820c1980 2299 struct ilk_wm_maximums max;
0b2ae6d7 2300
86c8bbbe
MR
2301 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
2302 if (IS_ERR(cstate))
2303 return PTR_ERR(cstate);
2304
2305 pipe_wm = &cstate->wm.optimal.ilk;
f1ecaf8f 2306 memset(pipe_wm, 0, sizeof(*pipe_wm));
86c8bbbe 2307
43d59eda 2308 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
86c8bbbe
MR
2309 ps = drm_atomic_get_plane_state(state,
2310 &intel_plane->base);
2311 if (IS_ERR(ps))
2312 return PTR_ERR(ps);
2313
2314 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2315 pristate = to_intel_plane_state(ps);
2316 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2317 sprstate = to_intel_plane_state(ps);
2318 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2319 curstate = to_intel_plane_state(ps);
43d59eda
MR
2320 }
2321
bf220452
MR
2322 config.sprites_enabled = sprstate->visible;
2323 config.sprites_scaled = sprstate->visible &&
43d59eda
MR
2324 (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
2325 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
2326
bf220452
MR
2327 pipe_wm->pipe_enabled = cstate->base.active;
2328 pipe_wm->sprites_enabled = config.sprites_enabled;
2329 pipe_wm->sprites_scaled = config.sprites_scaled;
2330
7b39a0b7 2331 /* ILK/SNB: LP2+ watermarks only w/o sprites */
43d59eda 2332 if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible)
7b39a0b7
VS
2333 max_level = 1;
2334
2335 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
bf220452 2336 if (config.sprites_scaled)
7b39a0b7
VS
2337 max_level = 0;
2338
86c8bbbe
MR
2339 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
2340 pristate, sprstate, curstate, &pipe_wm->wm[0]);
0b2ae6d7 2341
a42a5719 2342 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ee91a159 2343 pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
0b2ae6d7 2344
bf220452
MR
2345 /* LP0 watermarks always use 1/2 DDB partitioning */
2346 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2347
2348 /* At least LP0 must be valid */
2349 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2350 return -EINVAL;
a3cb4048
VS
2351
2352 ilk_compute_wm_reg_maximums(dev, 1, &max);
2353
2354 for (level = 1; level <= max_level; level++) {
2355 struct intel_wm_level wm = {};
2356
86c8bbbe
MR
2357 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
2358 pristate, sprstate, curstate, &wm);
a3cb4048
VS
2359
2360 /*
2361 * Disable any watermark level that exceeds the
2362 * register maximums since such watermarks are
2363 * always invalid.
2364 */
2365 if (!ilk_validate_wm_level(level, &max, &wm))
2366 break;
2367
2368 pipe_wm->wm[level] = wm;
2369 }
2370
86c8bbbe 2371 return 0;
0b2ae6d7
VS
2372}
2373
2374/*
2375 * Merge the watermarks from all active pipes for a specific level.
2376 */
2377static void ilk_merge_wm_level(struct drm_device *dev,
2378 int level,
2379 struct intel_wm_level *ret_wm)
2380{
2381 const struct intel_crtc *intel_crtc;
2382
d52fea5b
VS
2383 ret_wm->enable = true;
2384
d3fcc808 2385 for_each_intel_crtc(dev, intel_crtc) {
bf220452
MR
2386 const struct intel_crtc_state *cstate =
2387 to_intel_crtc_state(intel_crtc->base.state);
2388 const struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
fe392efd
VS
2389 const struct intel_wm_level *wm = &active->wm[level];
2390
2391 if (!active->pipe_enabled)
2392 continue;
0b2ae6d7 2393
d52fea5b
VS
2394 /*
2395 * The watermark values may have been used in the past,
2396 * so we must maintain them in the registers for some
2397 * time even if the level is now disabled.
2398 */
0b2ae6d7 2399 if (!wm->enable)
d52fea5b 2400 ret_wm->enable = false;
0b2ae6d7
VS
2401
2402 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2403 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2404 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2405 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2406 }
0b2ae6d7
VS
2407}
2408
2409/*
2410 * Merge all low power watermarks for all active pipes.
2411 */
2412static void ilk_wm_merge(struct drm_device *dev,
0ba22e26 2413 const struct intel_wm_config *config,
820c1980 2414 const struct ilk_wm_maximums *max,
0b2ae6d7
VS
2415 struct intel_pipe_wm *merged)
2416{
7733b49b 2417 struct drm_i915_private *dev_priv = dev->dev_private;
0b2ae6d7 2418 int level, max_level = ilk_wm_max_level(dev);
d52fea5b 2419 int last_enabled_level = max_level;
0b2ae6d7 2420
0ba22e26
VS
2421 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2422 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2423 config->num_pipes_active > 1)
2424 return;
2425
6c8b6c28
VS
2426 /* ILK: FBC WM must be disabled always */
2427 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
0b2ae6d7
VS
2428
2429 /* merge each WM1+ level */
2430 for (level = 1; level <= max_level; level++) {
2431 struct intel_wm_level *wm = &merged->wm[level];
2432
2433 ilk_merge_wm_level(dev, level, wm);
2434
d52fea5b
VS
2435 if (level > last_enabled_level)
2436 wm->enable = false;
2437 else if (!ilk_validate_wm_level(level, max, wm))
2438 /* make sure all following levels get disabled */
2439 last_enabled_level = level - 1;
0b2ae6d7
VS
2440
2441 /*
2442 * The spec says it is preferred to disable
2443 * FBC WMs instead of disabling a WM level.
2444 */
2445 if (wm->fbc_val > max->fbc) {
d52fea5b
VS
2446 if (wm->enable)
2447 merged->fbc_wm_enabled = false;
0b2ae6d7
VS
2448 wm->fbc_val = 0;
2449 }
2450 }
6c8b6c28
VS
2451
2452 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2453 /*
2454 * FIXME this is racy. FBC might get enabled later.
2455 * What we should check here is whether FBC can be
2456 * enabled sometime later.
2457 */
7733b49b 2458 if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
0e631adc 2459 intel_fbc_is_active(dev_priv)) {
6c8b6c28
VS
2460 for (level = 2; level <= max_level; level++) {
2461 struct intel_wm_level *wm = &merged->wm[level];
2462
2463 wm->enable = false;
2464 }
2465 }
0b2ae6d7
VS
2466}
2467
b380ca3c
VS
2468static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2469{
2470 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2471 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2472}
2473
a68d68ee
VS
2474/* The value we need to program into the WM_LPx latency field */
2475static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2476{
2477 struct drm_i915_private *dev_priv = dev->dev_private;
2478
a42a5719 2479 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
a68d68ee
VS
2480 return 2 * level;
2481 else
2482 return dev_priv->wm.pri_latency[level];
2483}
2484
820c1980 2485static void ilk_compute_wm_results(struct drm_device *dev,
0362c781 2486 const struct intel_pipe_wm *merged,
609cedef 2487 enum intel_ddb_partitioning partitioning,
820c1980 2488 struct ilk_wm_values *results)
801bcfff 2489{
0b2ae6d7
VS
2490 struct intel_crtc *intel_crtc;
2491 int level, wm_lp;
cca32e9a 2492
0362c781 2493 results->enable_fbc_wm = merged->fbc_wm_enabled;
609cedef 2494 results->partitioning = partitioning;
cca32e9a 2495
0b2ae6d7 2496 /* LP1+ register values */
cca32e9a 2497 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
1fd527cc 2498 const struct intel_wm_level *r;
801bcfff 2499
b380ca3c 2500 level = ilk_wm_lp_to_level(wm_lp, merged);
0b2ae6d7 2501
0362c781 2502 r = &merged->wm[level];
cca32e9a 2503
d52fea5b
VS
2504 /*
2505 * Maintain the watermark values even if the level is
2506 * disabled. Doing otherwise could cause underruns.
2507 */
2508 results->wm_lp[wm_lp - 1] =
a68d68ee 2509 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
416f4727
VS
2510 (r->pri_val << WM1_LP_SR_SHIFT) |
2511 r->cur_val;
2512
d52fea5b
VS
2513 if (r->enable)
2514 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2515
416f4727
VS
2516 if (INTEL_INFO(dev)->gen >= 8)
2517 results->wm_lp[wm_lp - 1] |=
2518 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2519 else
2520 results->wm_lp[wm_lp - 1] |=
2521 r->fbc_val << WM1_LP_FBC_SHIFT;
2522
d52fea5b
VS
2523 /*
2524 * Always set WM1S_LP_EN when spr_val != 0, even if the
2525 * level is disabled. Doing otherwise could cause underruns.
2526 */
6cef2b8a
VS
2527 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2528 WARN_ON(wm_lp != 1);
2529 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2530 } else
2531 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
cca32e9a 2532 }
801bcfff 2533
0b2ae6d7 2534 /* LP0 register values */
d3fcc808 2535 for_each_intel_crtc(dev, intel_crtc) {
bf220452
MR
2536 const struct intel_crtc_state *cstate =
2537 to_intel_crtc_state(intel_crtc->base.state);
0b2ae6d7 2538 enum pipe pipe = intel_crtc->pipe;
bf220452 2539 const struct intel_wm_level *r = &cstate->wm.optimal.ilk.wm[0];
0b2ae6d7
VS
2540
2541 if (WARN_ON(!r->enable))
2542 continue;
2543
bf220452 2544 results->wm_linetime[pipe] = cstate->wm.optimal.ilk.linetime;
1011d8c4 2545
0b2ae6d7
VS
2546 results->wm_pipe[pipe] =
2547 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2548 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2549 r->cur_val;
801bcfff
PZ
2550 }
2551}
2552
861f3389
PZ
2553/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2554 * case both are at the same level. Prefer r1 in case they're the same. */
820c1980 2555static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
198a1e9b
VS
2556 struct intel_pipe_wm *r1,
2557 struct intel_pipe_wm *r2)
861f3389 2558{
198a1e9b
VS
2559 int level, max_level = ilk_wm_max_level(dev);
2560 int level1 = 0, level2 = 0;
861f3389 2561
198a1e9b
VS
2562 for (level = 1; level <= max_level; level++) {
2563 if (r1->wm[level].enable)
2564 level1 = level;
2565 if (r2->wm[level].enable)
2566 level2 = level;
861f3389
PZ
2567 }
2568
198a1e9b
VS
2569 if (level1 == level2) {
2570 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
861f3389
PZ
2571 return r2;
2572 else
2573 return r1;
198a1e9b 2574 } else if (level1 > level2) {
861f3389
PZ
2575 return r1;
2576 } else {
2577 return r2;
2578 }
2579}
2580
49a687c4
VS
2581/* dirty bits used to track which watermarks need changes */
2582#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2583#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2584#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2585#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2586#define WM_DIRTY_FBC (1 << 24)
2587#define WM_DIRTY_DDB (1 << 25)
2588
055e393f 2589static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
820c1980
ID
2590 const struct ilk_wm_values *old,
2591 const struct ilk_wm_values *new)
49a687c4
VS
2592{
2593 unsigned int dirty = 0;
2594 enum pipe pipe;
2595 int wm_lp;
2596
055e393f 2597 for_each_pipe(dev_priv, pipe) {
49a687c4
VS
2598 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2599 dirty |= WM_DIRTY_LINETIME(pipe);
2600 /* Must disable LP1+ watermarks too */
2601 dirty |= WM_DIRTY_LP_ALL;
2602 }
2603
2604 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2605 dirty |= WM_DIRTY_PIPE(pipe);
2606 /* Must disable LP1+ watermarks too */
2607 dirty |= WM_DIRTY_LP_ALL;
2608 }
2609 }
2610
2611 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2612 dirty |= WM_DIRTY_FBC;
2613 /* Must disable LP1+ watermarks too */
2614 dirty |= WM_DIRTY_LP_ALL;
2615 }
2616
2617 if (old->partitioning != new->partitioning) {
2618 dirty |= WM_DIRTY_DDB;
2619 /* Must disable LP1+ watermarks too */
2620 dirty |= WM_DIRTY_LP_ALL;
2621 }
2622
2623 /* LP1+ watermarks already deemed dirty, no need to continue */
2624 if (dirty & WM_DIRTY_LP_ALL)
2625 return dirty;
2626
2627 /* Find the lowest numbered LP1+ watermark in need of an update... */
2628 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2629 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2630 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2631 break;
2632 }
2633
2634 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2635 for (; wm_lp <= 3; wm_lp++)
2636 dirty |= WM_DIRTY_LP(wm_lp);
2637
2638 return dirty;
2639}
2640
8553c18e
VS
2641static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2642 unsigned int dirty)
801bcfff 2643{
820c1980 2644 struct ilk_wm_values *previous = &dev_priv->wm.hw;
8553c18e 2645 bool changed = false;
801bcfff 2646
facd619b
VS
2647 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2648 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2649 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
8553c18e 2650 changed = true;
facd619b
VS
2651 }
2652 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2653 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2654 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
8553c18e 2655 changed = true;
facd619b
VS
2656 }
2657 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2658 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2659 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
8553c18e 2660 changed = true;
facd619b 2661 }
801bcfff 2662
facd619b
VS
2663 /*
2664 * Don't touch WM1S_LP_EN here.
2665 * Doing so could cause underruns.
2666 */
6cef2b8a 2667
8553c18e
VS
2668 return changed;
2669}
2670
2671/*
2672 * The spec says we shouldn't write when we don't need, because every write
2673 * causes WMs to be re-evaluated, expending some power.
2674 */
820c1980
ID
2675static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2676 struct ilk_wm_values *results)
8553c18e
VS
2677{
2678 struct drm_device *dev = dev_priv->dev;
820c1980 2679 struct ilk_wm_values *previous = &dev_priv->wm.hw;
8553c18e
VS
2680 unsigned int dirty;
2681 uint32_t val;
2682
055e393f 2683 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
8553c18e
VS
2684 if (!dirty)
2685 return;
2686
2687 _ilk_disable_lp_wm(dev_priv, dirty);
2688
49a687c4 2689 if (dirty & WM_DIRTY_PIPE(PIPE_A))
801bcfff 2690 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
49a687c4 2691 if (dirty & WM_DIRTY_PIPE(PIPE_B))
801bcfff 2692 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
49a687c4 2693 if (dirty & WM_DIRTY_PIPE(PIPE_C))
801bcfff
PZ
2694 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2695
49a687c4 2696 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
801bcfff 2697 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
49a687c4 2698 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
801bcfff 2699 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
49a687c4 2700 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
801bcfff
PZ
2701 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2702
49a687c4 2703 if (dirty & WM_DIRTY_DDB) {
a42a5719 2704 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ac9545fd
VS
2705 val = I915_READ(WM_MISC);
2706 if (results->partitioning == INTEL_DDB_PART_1_2)
2707 val &= ~WM_MISC_DATA_PARTITION_5_6;
2708 else
2709 val |= WM_MISC_DATA_PARTITION_5_6;
2710 I915_WRITE(WM_MISC, val);
2711 } else {
2712 val = I915_READ(DISP_ARB_CTL2);
2713 if (results->partitioning == INTEL_DDB_PART_1_2)
2714 val &= ~DISP_DATA_PARTITION_5_6;
2715 else
2716 val |= DISP_DATA_PARTITION_5_6;
2717 I915_WRITE(DISP_ARB_CTL2, val);
2718 }
1011d8c4
PZ
2719 }
2720
49a687c4 2721 if (dirty & WM_DIRTY_FBC) {
cca32e9a
PZ
2722 val = I915_READ(DISP_ARB_CTL);
2723 if (results->enable_fbc_wm)
2724 val &= ~DISP_FBC_WM_DIS;
2725 else
2726 val |= DISP_FBC_WM_DIS;
2727 I915_WRITE(DISP_ARB_CTL, val);
2728 }
2729
954911eb
ID
2730 if (dirty & WM_DIRTY_LP(1) &&
2731 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2732 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2733
2734 if (INTEL_INFO(dev)->gen >= 7) {
6cef2b8a
VS
2735 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2736 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2737 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2738 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2739 }
801bcfff 2740
facd619b 2741 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
801bcfff 2742 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
facd619b 2743 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
801bcfff 2744 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
facd619b 2745 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
801bcfff 2746 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
609cedef
VS
2747
2748 dev_priv->wm.hw = *results;
801bcfff
PZ
2749}
2750
bf220452 2751static bool ilk_disable_lp_wm(struct drm_device *dev)
8553c18e
VS
2752{
2753 struct drm_i915_private *dev_priv = dev->dev_private;
2754
2755 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2756}
2757
b9cec075
DL
2758/*
2759 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2760 * different active planes.
2761 */
2762
2763#define SKL_DDB_SIZE 896 /* in blocks */
43d735a6 2764#define BXT_DDB_SIZE 512
b9cec075 2765
024c9045
MR
2766/*
2767 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
2768 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2769 * other universal planes are in indices 1..n. Note that this may leave unused
2770 * indices between the top "sprite" plane and the cursor.
2771 */
2772static int
2773skl_wm_plane_id(const struct intel_plane *plane)
2774{
2775 switch (plane->base.type) {
2776 case DRM_PLANE_TYPE_PRIMARY:
2777 return 0;
2778 case DRM_PLANE_TYPE_CURSOR:
2779 return PLANE_CURSOR;
2780 case DRM_PLANE_TYPE_OVERLAY:
2781 return plane->plane + 1;
2782 default:
2783 MISSING_CASE(plane->base.type);
2784 return plane->plane;
2785 }
2786}
2787
b9cec075
DL
2788static void
2789skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
024c9045 2790 const struct intel_crtc_state *cstate,
b9cec075 2791 const struct intel_wm_config *config,
b9cec075
DL
2792 struct skl_ddb_entry *alloc /* out */)
2793{
024c9045 2794 struct drm_crtc *for_crtc = cstate->base.crtc;
b9cec075
DL
2795 struct drm_crtc *crtc;
2796 unsigned int pipe_size, ddb_size;
2797 int nth_active_pipe;
2798
024c9045 2799 if (!cstate->base.active) {
b9cec075
DL
2800 alloc->start = 0;
2801 alloc->end = 0;
2802 return;
2803 }
2804
43d735a6
DL
2805 if (IS_BROXTON(dev))
2806 ddb_size = BXT_DDB_SIZE;
2807 else
2808 ddb_size = SKL_DDB_SIZE;
b9cec075
DL
2809
2810 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2811
2812 nth_active_pipe = 0;
2813 for_each_crtc(dev, crtc) {
3ef00284 2814 if (!to_intel_crtc(crtc)->active)
b9cec075
DL
2815 continue;
2816
2817 if (crtc == for_crtc)
2818 break;
2819
2820 nth_active_pipe++;
2821 }
2822
2823 pipe_size = ddb_size / config->num_pipes_active;
2824 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
16160e3d 2825 alloc->end = alloc->start + pipe_size;
b9cec075
DL
2826}
2827
2828static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
2829{
2830 if (config->num_pipes_active == 1)
2831 return 32;
2832
2833 return 8;
2834}
2835
a269c583
DL
2836static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
2837{
2838 entry->start = reg & 0x3ff;
2839 entry->end = (reg >> 16) & 0x3ff;
16160e3d
DL
2840 if (entry->end)
2841 entry->end += 1;
a269c583
DL
2842}
2843
08db6652
DL
2844void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2845 struct skl_ddb_allocation *ddb /* out */)
a269c583 2846{
a269c583
DL
2847 enum pipe pipe;
2848 int plane;
2849 u32 val;
2850
b10f1b20
ML
2851 memset(ddb, 0, sizeof(*ddb));
2852
a269c583 2853 for_each_pipe(dev_priv, pipe) {
4d800030
ID
2854 enum intel_display_power_domain power_domain;
2855
2856 power_domain = POWER_DOMAIN_PIPE(pipe);
2857 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
b10f1b20
ML
2858 continue;
2859
dd740780 2860 for_each_plane(dev_priv, pipe, plane) {
a269c583
DL
2861 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2862 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
2863 val);
2864 }
2865
2866 val = I915_READ(CUR_BUF_CFG(pipe));
4969d33e
MR
2867 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
2868 val);
4d800030
ID
2869
2870 intel_display_power_put(dev_priv, power_domain);
a269c583
DL
2871 }
2872}
2873
b9cec075 2874static unsigned int
024c9045
MR
2875skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2876 const struct drm_plane_state *pstate,
2877 int y)
b9cec075 2878{
024c9045
MR
2879 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2880 struct drm_framebuffer *fb = pstate->fb;
2cd601c6
CK
2881
2882 /* for planar format */
024c9045 2883 if (fb->pixel_format == DRM_FORMAT_NV12) {
2cd601c6 2884 if (y) /* y-plane data rate */
024c9045
MR
2885 return intel_crtc->config->pipe_src_w *
2886 intel_crtc->config->pipe_src_h *
2887 drm_format_plane_cpp(fb->pixel_format, 0);
2cd601c6 2888 else /* uv-plane data rate */
024c9045
MR
2889 return (intel_crtc->config->pipe_src_w/2) *
2890 (intel_crtc->config->pipe_src_h/2) *
2891 drm_format_plane_cpp(fb->pixel_format, 1);
2cd601c6
CK
2892 }
2893
2894 /* for packed formats */
024c9045
MR
2895 return intel_crtc->config->pipe_src_w *
2896 intel_crtc->config->pipe_src_h *
2897 drm_format_plane_cpp(fb->pixel_format, 0);
b9cec075
DL
2898}
2899
2900/*
2901 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
2902 * a 8192x4096@32bpp framebuffer:
2903 * 3 * 4096 * 8192 * 4 < 2^32
2904 */
2905static unsigned int
024c9045 2906skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate)
b9cec075 2907{
024c9045
MR
2908 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2909 struct drm_device *dev = intel_crtc->base.dev;
2910 const struct intel_plane *intel_plane;
b9cec075 2911 unsigned int total_data_rate = 0;
b9cec075 2912
024c9045
MR
2913 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2914 const struct drm_plane_state *pstate = intel_plane->base.state;
b9cec075 2915
024c9045 2916 if (pstate->fb == NULL)
b9cec075
DL
2917 continue;
2918
024c9045
MR
2919 if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2920 continue;
2921
2922 /* packed/uv */
2923 total_data_rate += skl_plane_relative_data_rate(cstate,
2924 pstate,
2925 0);
2926
2927 if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
2928 /* y-plane */
2929 total_data_rate += skl_plane_relative_data_rate(cstate,
2930 pstate,
2931 1);
b9cec075
DL
2932 }
2933
2934 return total_data_rate;
2935}
2936
2937static void
024c9045 2938skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
b9cec075
DL
2939 struct skl_ddb_allocation *ddb /* out */)
2940{
024c9045 2941 struct drm_crtc *crtc = cstate->base.crtc;
b9cec075 2942 struct drm_device *dev = crtc->dev;
aa363136
MR
2943 struct drm_i915_private *dev_priv = to_i915(dev);
2944 struct intel_wm_config *config = &dev_priv->wm.config;
b9cec075 2945 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
024c9045 2946 struct intel_plane *intel_plane;
b9cec075 2947 enum pipe pipe = intel_crtc->pipe;
34bb56af 2948 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
b9cec075 2949 uint16_t alloc_size, start, cursor_blocks;
80958155 2950 uint16_t minimum[I915_MAX_PLANES];
2cd601c6 2951 uint16_t y_minimum[I915_MAX_PLANES];
b9cec075 2952 unsigned int total_data_rate;
b9cec075 2953
024c9045 2954 skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc);
34bb56af 2955 alloc_size = skl_ddb_entry_size(alloc);
b9cec075
DL
2956 if (alloc_size == 0) {
2957 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
4969d33e
MR
2958 memset(&ddb->plane[pipe][PLANE_CURSOR], 0,
2959 sizeof(ddb->plane[pipe][PLANE_CURSOR]));
b9cec075
DL
2960 return;
2961 }
2962
2963 cursor_blocks = skl_cursor_allocation(config);
4969d33e
MR
2964 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
2965 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
b9cec075
DL
2966
2967 alloc_size -= cursor_blocks;
34bb56af 2968 alloc->end -= cursor_blocks;
b9cec075 2969
80958155 2970 /* 1. Allocate the mininum required blocks for each active plane */
024c9045
MR
2971 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2972 struct drm_plane *plane = &intel_plane->base;
2973 struct drm_framebuffer *fb = plane->state->fb;
2974 int id = skl_wm_plane_id(intel_plane);
80958155 2975
024c9045
MR
2976 if (fb == NULL)
2977 continue;
2978 if (plane->type == DRM_PLANE_TYPE_CURSOR)
80958155
DL
2979 continue;
2980
024c9045
MR
2981 minimum[id] = 8;
2982 alloc_size -= minimum[id];
2983 y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0;
2984 alloc_size -= y_minimum[id];
80958155
DL
2985 }
2986
b9cec075 2987 /*
80958155
DL
2988 * 2. Distribute the remaining space in proportion to the amount of
2989 * data each plane needs to fetch from memory.
b9cec075
DL
2990 *
2991 * FIXME: we may not allocate every single block here.
2992 */
024c9045 2993 total_data_rate = skl_get_total_relative_data_rate(cstate);
b9cec075 2994
34bb56af 2995 start = alloc->start;
024c9045
MR
2996 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2997 struct drm_plane *plane = &intel_plane->base;
2998 struct drm_plane_state *pstate = intel_plane->base.state;
2cd601c6
CK
2999 unsigned int data_rate, y_data_rate;
3000 uint16_t plane_blocks, y_plane_blocks = 0;
024c9045 3001 int id = skl_wm_plane_id(intel_plane);
b9cec075 3002
024c9045
MR
3003 if (pstate->fb == NULL)
3004 continue;
3005 if (plane->type == DRM_PLANE_TYPE_CURSOR)
b9cec075
DL
3006 continue;
3007
024c9045 3008 data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
b9cec075
DL
3009
3010 /*
2cd601c6 3011 * allocation for (packed formats) or (uv-plane part of planar format):
b9cec075
DL
3012 * promote the expression to 64 bits to avoid overflowing, the
3013 * result is < available as data_rate / total_data_rate < 1
3014 */
024c9045 3015 plane_blocks = minimum[id];
80958155
DL
3016 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3017 total_data_rate);
b9cec075 3018
024c9045
MR
3019 ddb->plane[pipe][id].start = start;
3020 ddb->plane[pipe][id].end = start + plane_blocks;
b9cec075
DL
3021
3022 start += plane_blocks;
2cd601c6
CK
3023
3024 /*
3025 * allocation for y_plane part of planar format:
3026 */
024c9045
MR
3027 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) {
3028 y_data_rate = skl_plane_relative_data_rate(cstate,
3029 pstate,
3030 1);
3031 y_plane_blocks = y_minimum[id];
2cd601c6
CK
3032 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3033 total_data_rate);
3034
024c9045
MR
3035 ddb->y_plane[pipe][id].start = start;
3036 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
2cd601c6
CK
3037
3038 start += y_plane_blocks;
3039 }
3040
b9cec075
DL
3041 }
3042
3043}
3044
5cec258b 3045static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
2d41c0b5
PB
3046{
3047 /* TODO: Take into account the scalers once we support them */
2d112de7 3048 return config->base.adjusted_mode.crtc_clock;
2d41c0b5
PB
3049}
3050
3051/*
3052 * The max latency should be 257 (max the punit can code is 255 and we add 2us
ac484963 3053 * for the read latency) and cpp should always be <= 8, so that
2d41c0b5
PB
3054 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3055 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3056*/
ac484963 3057static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
2d41c0b5
PB
3058{
3059 uint32_t wm_intermediate_val, ret;
3060
3061 if (latency == 0)
3062 return UINT_MAX;
3063
ac484963 3064 wm_intermediate_val = latency * pixel_rate * cpp / 512;
2d41c0b5
PB
3065 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3066
3067 return ret;
3068}
3069
3070static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
ac484963 3071 uint32_t horiz_pixels, uint8_t cpp,
0fda6568 3072 uint64_t tiling, uint32_t latency)
2d41c0b5 3073{
d4c2aa60
TU
3074 uint32_t ret;
3075 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3076 uint32_t wm_intermediate_val;
2d41c0b5
PB
3077
3078 if (latency == 0)
3079 return UINT_MAX;
3080
ac484963 3081 plane_bytes_per_line = horiz_pixels * cpp;
0fda6568
TU
3082
3083 if (tiling == I915_FORMAT_MOD_Y_TILED ||
3084 tiling == I915_FORMAT_MOD_Yf_TILED) {
3085 plane_bytes_per_line *= 4;
3086 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3087 plane_blocks_per_line /= 4;
3088 } else {
3089 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3090 }
3091
2d41c0b5
PB
3092 wm_intermediate_val = latency * pixel_rate;
3093 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
d4c2aa60 3094 plane_blocks_per_line;
2d41c0b5
PB
3095
3096 return ret;
3097}
3098
2d41c0b5
PB
3099static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
3100 const struct intel_crtc *intel_crtc)
3101{
3102 struct drm_device *dev = intel_crtc->base.dev;
3103 struct drm_i915_private *dev_priv = dev->dev_private;
3104 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
2d41c0b5 3105
e6d90023
KM
3106 /*
3107 * If ddb allocation of pipes changed, it may require recalculation of
3108 * watermarks
3109 */
3110 if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe)))
2d41c0b5
PB
3111 return true;
3112
3113 return false;
3114}
3115
d4c2aa60 3116static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
024c9045
MR
3117 struct intel_crtc_state *cstate,
3118 struct intel_plane *intel_plane,
afb024aa 3119 uint16_t ddb_allocation,
d4c2aa60 3120 int level,
afb024aa
DL
3121 uint16_t *out_blocks, /* out */
3122 uint8_t *out_lines /* out */)
2d41c0b5 3123{
024c9045
MR
3124 struct drm_plane *plane = &intel_plane->base;
3125 struct drm_framebuffer *fb = plane->state->fb;
d4c2aa60
TU
3126 uint32_t latency = dev_priv->wm.skl_latency[level];
3127 uint32_t method1, method2;
3128 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3129 uint32_t res_blocks, res_lines;
3130 uint32_t selected_result;
ac484963 3131 uint8_t cpp;
2d41c0b5 3132
024c9045 3133 if (latency == 0 || !cstate->base.active || !fb)
2d41c0b5
PB
3134 return false;
3135
ac484963 3136 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
024c9045 3137 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
ac484963 3138 cpp, latency);
024c9045
MR
3139 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
3140 cstate->base.adjusted_mode.crtc_htotal,
3141 cstate->pipe_src_w,
ac484963 3142 cpp, fb->modifier[0],
d4c2aa60 3143 latency);
2d41c0b5 3144
ac484963 3145 plane_bytes_per_line = cstate->pipe_src_w * cpp;
d4c2aa60 3146 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2d41c0b5 3147
024c9045
MR
3148 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3149 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
1fc0a8f7
TU
3150 uint32_t min_scanlines = 4;
3151 uint32_t y_tile_minimum;
024c9045 3152 if (intel_rotation_90_or_270(plane->state->rotation)) {
ac484963 3153 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
024c9045
MR
3154 drm_format_plane_cpp(fb->pixel_format, 1) :
3155 drm_format_plane_cpp(fb->pixel_format, 0);
3156
ac484963 3157 switch (cpp) {
1fc0a8f7
TU
3158 case 1:
3159 min_scanlines = 16;
3160 break;
3161 case 2:
3162 min_scanlines = 8;
3163 break;
3164 case 8:
3165 WARN(1, "Unsupported pixel depth for rotation");
2f0b5790 3166 }
1fc0a8f7
TU
3167 }
3168 y_tile_minimum = plane_blocks_per_line * min_scanlines;
0fda6568
TU
3169 selected_result = max(method2, y_tile_minimum);
3170 } else {
3171 if ((ddb_allocation / plane_blocks_per_line) >= 1)
3172 selected_result = min(method1, method2);
3173 else
3174 selected_result = method1;
3175 }
2d41c0b5 3176
d4c2aa60
TU
3177 res_blocks = selected_result + 1;
3178 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
e6d66171 3179
0fda6568 3180 if (level >= 1 && level <= 7) {
024c9045
MR
3181 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3182 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
0fda6568
TU
3183 res_lines += 4;
3184 else
3185 res_blocks++;
3186 }
e6d66171 3187
d4c2aa60 3188 if (res_blocks >= ddb_allocation || res_lines > 31)
e6d66171
DL
3189 return false;
3190
3191 *out_blocks = res_blocks;
3192 *out_lines = res_lines;
2d41c0b5
PB
3193
3194 return true;
3195}
3196
3197static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3198 struct skl_ddb_allocation *ddb,
024c9045 3199 struct intel_crtc_state *cstate,
2d41c0b5 3200 int level,
2d41c0b5
PB
3201 struct skl_wm_level *result)
3202{
024c9045
MR
3203 struct drm_device *dev = dev_priv->dev;
3204 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3205 struct intel_plane *intel_plane;
2d41c0b5 3206 uint16_t ddb_blocks;
024c9045
MR
3207 enum pipe pipe = intel_crtc->pipe;
3208
3209 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3210 int i = skl_wm_plane_id(intel_plane);
2d41c0b5 3211
2d41c0b5
PB
3212 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3213
d4c2aa60 3214 result->plane_en[i] = skl_compute_plane_wm(dev_priv,
024c9045
MR
3215 cstate,
3216 intel_plane,
2d41c0b5 3217 ddb_blocks,
d4c2aa60 3218 level,
2d41c0b5
PB
3219 &result->plane_res_b[i],
3220 &result->plane_res_l[i]);
3221 }
2d41c0b5
PB
3222}
3223
407b50f3 3224static uint32_t
024c9045 3225skl_compute_linetime_wm(struct intel_crtc_state *cstate)
407b50f3 3226{
024c9045 3227 if (!cstate->base.active)
407b50f3
DL
3228 return 0;
3229
024c9045 3230 if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
661abfc0 3231 return 0;
407b50f3 3232
024c9045
MR
3233 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3234 skl_pipe_pixel_rate(cstate));
407b50f3
DL
3235}
3236
024c9045 3237static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
9414f563 3238 struct skl_wm_level *trans_wm /* out */)
407b50f3 3239{
024c9045 3240 struct drm_crtc *crtc = cstate->base.crtc;
9414f563 3241 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
024c9045 3242 struct intel_plane *intel_plane;
9414f563 3243
024c9045 3244 if (!cstate->base.active)
407b50f3 3245 return;
9414f563
DL
3246
3247 /* Until we know more, just disable transition WMs */
024c9045
MR
3248 for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
3249 int i = skl_wm_plane_id(intel_plane);
3250
9414f563 3251 trans_wm->plane_en[i] = false;
024c9045 3252 }
407b50f3
DL
3253}
3254
024c9045 3255static void skl_compute_pipe_wm(struct intel_crtc_state *cstate,
2d41c0b5 3256 struct skl_ddb_allocation *ddb,
2d41c0b5
PB
3257 struct skl_pipe_wm *pipe_wm)
3258{
024c9045 3259 struct drm_device *dev = cstate->base.crtc->dev;
2d41c0b5 3260 const struct drm_i915_private *dev_priv = dev->dev_private;
2d41c0b5
PB
3261 int level, max_level = ilk_wm_max_level(dev);
3262
3263 for (level = 0; level <= max_level; level++) {
024c9045
MR
3264 skl_compute_wm_level(dev_priv, ddb, cstate,
3265 level, &pipe_wm->wm[level]);
2d41c0b5 3266 }
024c9045 3267 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
2d41c0b5 3268
024c9045 3269 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
2d41c0b5
PB
3270}
3271
3272static void skl_compute_wm_results(struct drm_device *dev,
2d41c0b5
PB
3273 struct skl_pipe_wm *p_wm,
3274 struct skl_wm_values *r,
3275 struct intel_crtc *intel_crtc)
3276{
3277 int level, max_level = ilk_wm_max_level(dev);
3278 enum pipe pipe = intel_crtc->pipe;
9414f563
DL
3279 uint32_t temp;
3280 int i;
2d41c0b5
PB
3281
3282 for (level = 0; level <= max_level; level++) {
2d41c0b5
PB
3283 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3284 temp = 0;
2d41c0b5
PB
3285
3286 temp |= p_wm->wm[level].plane_res_l[i] <<
3287 PLANE_WM_LINES_SHIFT;
3288 temp |= p_wm->wm[level].plane_res_b[i];
3289 if (p_wm->wm[level].plane_en[i])
3290 temp |= PLANE_WM_EN;
3291
3292 r->plane[pipe][i][level] = temp;
2d41c0b5
PB
3293 }
3294
3295 temp = 0;
2d41c0b5 3296
4969d33e
MR
3297 temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3298 temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
2d41c0b5 3299
4969d33e 3300 if (p_wm->wm[level].plane_en[PLANE_CURSOR])
2d41c0b5
PB
3301 temp |= PLANE_WM_EN;
3302
4969d33e 3303 r->plane[pipe][PLANE_CURSOR][level] = temp;
2d41c0b5
PB
3304
3305 }
3306
9414f563
DL
3307 /* transition WMs */
3308 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3309 temp = 0;
3310 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3311 temp |= p_wm->trans_wm.plane_res_b[i];
3312 if (p_wm->trans_wm.plane_en[i])
3313 temp |= PLANE_WM_EN;
3314
3315 r->plane_trans[pipe][i] = temp;
3316 }
3317
3318 temp = 0;
4969d33e
MR
3319 temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3320 temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
3321 if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
9414f563
DL
3322 temp |= PLANE_WM_EN;
3323
4969d33e 3324 r->plane_trans[pipe][PLANE_CURSOR] = temp;
9414f563 3325
2d41c0b5
PB
3326 r->wm_linetime[pipe] = p_wm->linetime;
3327}
3328
f0f59a00
VS
3329static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3330 i915_reg_t reg,
16160e3d
DL
3331 const struct skl_ddb_entry *entry)
3332{
3333 if (entry->end)
3334 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3335 else
3336 I915_WRITE(reg, 0);
3337}
3338
2d41c0b5
PB
3339static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3340 const struct skl_wm_values *new)
3341{
3342 struct drm_device *dev = dev_priv->dev;
3343 struct intel_crtc *crtc;
3344
19c8054c 3345 for_each_intel_crtc(dev, crtc) {
2d41c0b5
PB
3346 int i, level, max_level = ilk_wm_max_level(dev);
3347 enum pipe pipe = crtc->pipe;
3348
5d374d96
DL
3349 if (!new->dirty[pipe])
3350 continue;
8211bd5b 3351
5d374d96 3352 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
8211bd5b 3353
5d374d96
DL
3354 for (level = 0; level <= max_level; level++) {
3355 for (i = 0; i < intel_num_planes(crtc); i++)
3356 I915_WRITE(PLANE_WM(pipe, i, level),
3357 new->plane[pipe][i][level]);
3358 I915_WRITE(CUR_WM(pipe, level),
4969d33e 3359 new->plane[pipe][PLANE_CURSOR][level]);
2d41c0b5 3360 }
5d374d96
DL
3361 for (i = 0; i < intel_num_planes(crtc); i++)
3362 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3363 new->plane_trans[pipe][i]);
4969d33e
MR
3364 I915_WRITE(CUR_WM_TRANS(pipe),
3365 new->plane_trans[pipe][PLANE_CURSOR]);
5d374d96 3366
2cd601c6 3367 for (i = 0; i < intel_num_planes(crtc); i++) {
5d374d96
DL
3368 skl_ddb_entry_write(dev_priv,
3369 PLANE_BUF_CFG(pipe, i),
3370 &new->ddb.plane[pipe][i]);
2cd601c6
CK
3371 skl_ddb_entry_write(dev_priv,
3372 PLANE_NV12_BUF_CFG(pipe, i),
3373 &new->ddb.y_plane[pipe][i]);
3374 }
5d374d96
DL
3375
3376 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
4969d33e 3377 &new->ddb.plane[pipe][PLANE_CURSOR]);
2d41c0b5 3378 }
2d41c0b5
PB
3379}
3380
0e8fb7ba
DL
3381/*
3382 * When setting up a new DDB allocation arrangement, we need to correctly
3383 * sequence the times at which the new allocations for the pipes are taken into
3384 * account or we'll have pipes fetching from space previously allocated to
3385 * another pipe.
3386 *
3387 * Roughly the sequence looks like:
3388 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3389 * overlapping with a previous light-up pipe (another way to put it is:
3390 * pipes with their new allocation strickly included into their old ones).
3391 * 2. re-allocate the other pipes that get their allocation reduced
3392 * 3. allocate the pipes having their allocation increased
3393 *
3394 * Steps 1. and 2. are here to take care of the following case:
3395 * - Initially DDB looks like this:
3396 * | B | C |
3397 * - enable pipe A.
3398 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3399 * allocation
3400 * | A | B | C |
3401 *
3402 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3403 */
3404
d21b795c
DL
3405static void
3406skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
0e8fb7ba 3407{
0e8fb7ba
DL
3408 int plane;
3409
d21b795c
DL
3410 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
3411
dd740780 3412 for_each_plane(dev_priv, pipe, plane) {
0e8fb7ba
DL
3413 I915_WRITE(PLANE_SURF(pipe, plane),
3414 I915_READ(PLANE_SURF(pipe, plane)));
3415 }
3416 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3417}
3418
3419static bool
3420skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3421 const struct skl_ddb_allocation *new,
3422 enum pipe pipe)
3423{
3424 uint16_t old_size, new_size;
3425
3426 old_size = skl_ddb_entry_size(&old->pipe[pipe]);
3427 new_size = skl_ddb_entry_size(&new->pipe[pipe]);
3428
3429 return old_size != new_size &&
3430 new->pipe[pipe].start >= old->pipe[pipe].start &&
3431 new->pipe[pipe].end <= old->pipe[pipe].end;
3432}
3433
3434static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3435 struct skl_wm_values *new_values)
3436{
3437 struct drm_device *dev = dev_priv->dev;
3438 struct skl_ddb_allocation *cur_ddb, *new_ddb;
c929cb45 3439 bool reallocated[I915_MAX_PIPES] = {};
0e8fb7ba
DL
3440 struct intel_crtc *crtc;
3441 enum pipe pipe;
3442
3443 new_ddb = &new_values->ddb;
3444 cur_ddb = &dev_priv->wm.skl_hw.ddb;
3445
3446 /*
3447 * First pass: flush the pipes with the new allocation contained into
3448 * the old space.
3449 *
3450 * We'll wait for the vblank on those pipes to ensure we can safely
3451 * re-allocate the freed space without this pipe fetching from it.
3452 */
3453 for_each_intel_crtc(dev, crtc) {
3454 if (!crtc->active)
3455 continue;
3456
3457 pipe = crtc->pipe;
3458
3459 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
3460 continue;
3461
d21b795c 3462 skl_wm_flush_pipe(dev_priv, pipe, 1);
0e8fb7ba
DL
3463 intel_wait_for_vblank(dev, pipe);
3464
3465 reallocated[pipe] = true;
3466 }
3467
3468
3469 /*
3470 * Second pass: flush the pipes that are having their allocation
3471 * reduced, but overlapping with a previous allocation.
3472 *
3473 * Here as well we need to wait for the vblank to make sure the freed
3474 * space is not used anymore.
3475 */
3476 for_each_intel_crtc(dev, crtc) {
3477 if (!crtc->active)
3478 continue;
3479
3480 pipe = crtc->pipe;
3481
3482 if (reallocated[pipe])
3483 continue;
3484
3485 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3486 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
d21b795c 3487 skl_wm_flush_pipe(dev_priv, pipe, 2);
0e8fb7ba 3488 intel_wait_for_vblank(dev, pipe);
d9d8e6b3 3489 reallocated[pipe] = true;
0e8fb7ba 3490 }
0e8fb7ba
DL
3491 }
3492
3493 /*
3494 * Third pass: flush the pipes that got more space allocated.
3495 *
3496 * We don't need to actively wait for the update here, next vblank
3497 * will just get more DDB space with the correct WM values.
3498 */
3499 for_each_intel_crtc(dev, crtc) {
3500 if (!crtc->active)
3501 continue;
3502
3503 pipe = crtc->pipe;
3504
3505 /*
3506 * At this point, only the pipes more space than before are
3507 * left to re-allocate.
3508 */
3509 if (reallocated[pipe])
3510 continue;
3511
d21b795c 3512 skl_wm_flush_pipe(dev_priv, pipe, 3);
0e8fb7ba
DL
3513 }
3514}
3515
2d41c0b5 3516static bool skl_update_pipe_wm(struct drm_crtc *crtc,
2d41c0b5
PB
3517 struct skl_ddb_allocation *ddb, /* out */
3518 struct skl_pipe_wm *pipe_wm /* out */)
3519{
3520 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
024c9045 3521 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
2d41c0b5 3522
aa363136 3523 skl_allocate_pipe_ddb(cstate, ddb);
024c9045 3524 skl_compute_pipe_wm(cstate, ddb, pipe_wm);
2d41c0b5 3525
4e0963c7 3526 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
2d41c0b5
PB
3527 return false;
3528
4e0963c7 3529 intel_crtc->wm.active.skl = *pipe_wm;
2cd601c6 3530
2d41c0b5
PB
3531 return true;
3532}
3533
3534static void skl_update_other_pipe_wm(struct drm_device *dev,
3535 struct drm_crtc *crtc,
2d41c0b5
PB
3536 struct skl_wm_values *r)
3537{
3538 struct intel_crtc *intel_crtc;
3539 struct intel_crtc *this_crtc = to_intel_crtc(crtc);
3540
3541 /*
3542 * If the WM update hasn't changed the allocation for this_crtc (the
3543 * crtc we are currently computing the new WM values for), other
3544 * enabled crtcs will keep the same allocation and we don't need to
3545 * recompute anything for them.
3546 */
3547 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
3548 return;
3549
3550 /*
3551 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3552 * other active pipes need new DDB allocation and WM values.
3553 */
19c8054c 3554 for_each_intel_crtc(dev, intel_crtc) {
2d41c0b5
PB
3555 struct skl_pipe_wm pipe_wm = {};
3556 bool wm_changed;
3557
3558 if (this_crtc->pipe == intel_crtc->pipe)
3559 continue;
3560
3561 if (!intel_crtc->active)
3562 continue;
3563
aa363136 3564 wm_changed = skl_update_pipe_wm(&intel_crtc->base,
2d41c0b5
PB
3565 &r->ddb, &pipe_wm);
3566
3567 /*
3568 * If we end up re-computing the other pipe WM values, it's
3569 * because it was really needed, so we expect the WM values to
3570 * be different.
3571 */
3572 WARN_ON(!wm_changed);
3573
024c9045 3574 skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc);
2d41c0b5
PB
3575 r->dirty[intel_crtc->pipe] = true;
3576 }
3577}
3578
adda50b8
BP
3579static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
3580{
3581 watermarks->wm_linetime[pipe] = 0;
3582 memset(watermarks->plane[pipe], 0,
3583 sizeof(uint32_t) * 8 * I915_MAX_PLANES);
adda50b8
BP
3584 memset(watermarks->plane_trans[pipe],
3585 0, sizeof(uint32_t) * I915_MAX_PLANES);
4969d33e 3586 watermarks->plane_trans[pipe][PLANE_CURSOR] = 0;
adda50b8
BP
3587
3588 /* Clear ddb entries for pipe */
3589 memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
3590 memset(&watermarks->ddb.plane[pipe], 0,
3591 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3592 memset(&watermarks->ddb.y_plane[pipe], 0,
3593 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
4969d33e
MR
3594 memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0,
3595 sizeof(struct skl_ddb_entry));
adda50b8
BP
3596
3597}
3598
2d41c0b5
PB
3599static void skl_update_wm(struct drm_crtc *crtc)
3600{
3601 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3602 struct drm_device *dev = crtc->dev;
3603 struct drm_i915_private *dev_priv = dev->dev_private;
2d41c0b5 3604 struct skl_wm_values *results = &dev_priv->wm.skl_results;
4e0963c7
MR
3605 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3606 struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl;
2d41c0b5 3607
adda50b8
BP
3608
3609 /* Clear all dirty flags */
3610 memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
3611
3612 skl_clear_wm(results, intel_crtc->pipe);
2d41c0b5 3613
aa363136 3614 if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm))
2d41c0b5
PB
3615 return;
3616
4e0963c7 3617 skl_compute_wm_results(dev, pipe_wm, results, intel_crtc);
2d41c0b5
PB
3618 results->dirty[intel_crtc->pipe] = true;
3619
aa363136 3620 skl_update_other_pipe_wm(dev, crtc, results);
2d41c0b5 3621 skl_write_wm_values(dev_priv, results);
0e8fb7ba 3622 skl_flush_wm_values(dev_priv, results);
53b0deb4
DL
3623
3624 /* store the new configuration */
3625 dev_priv->wm.skl_hw = *results;
2d41c0b5
PB
3626}
3627
d890565c
VS
3628static void ilk_compute_wm_config(struct drm_device *dev,
3629 struct intel_wm_config *config)
3630{
3631 struct intel_crtc *crtc;
3632
3633 /* Compute the currently _active_ config */
3634 for_each_intel_crtc(dev, crtc) {
3635 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
3636
3637 if (!wm->pipe_enabled)
3638 continue;
3639
3640 config->sprites_enabled |= wm->sprites_enabled;
3641 config->sprites_scaled |= wm->sprites_scaled;
3642 config->num_pipes_active++;
3643 }
3644}
3645
bf220452 3646static void ilk_program_watermarks(struct intel_crtc_state *cstate)
801bcfff 3647{
bf220452
MR
3648 struct drm_crtc *crtc = cstate->base.crtc;
3649 struct drm_device *dev = crtc->dev;
3650 struct drm_i915_private *dev_priv = to_i915(dev);
b9d5c839 3651 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
820c1980 3652 struct ilk_wm_maximums max;
d890565c 3653 struct intel_wm_config config = {};
820c1980 3654 struct ilk_wm_values results = {};
77c122bc 3655 enum intel_ddb_partitioning partitioning;
261a27d1 3656
d890565c
VS
3657 ilk_compute_wm_config(dev, &config);
3658
3659 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
3660 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
a485bfb8
VS
3661
3662 /* 5/6 split only in single pipe config on IVB+ */
ec98c8d1 3663 if (INTEL_INFO(dev)->gen >= 7 &&
d890565c
VS
3664 config.num_pipes_active == 1 && config.sprites_enabled) {
3665 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
3666 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
0362c781 3667
820c1980 3668 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
861f3389 3669 } else {
198a1e9b 3670 best_lp_wm = &lp_wm_1_2;
861f3389
PZ
3671 }
3672
198a1e9b 3673 partitioning = (best_lp_wm == &lp_wm_1_2) ?
77c122bc 3674 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
801bcfff 3675
820c1980 3676 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
609cedef 3677
820c1980 3678 ilk_write_wm_values(dev_priv, &results);
1011d8c4
PZ
3679}
3680
bf220452 3681static void ilk_update_wm(struct drm_crtc *crtc)
b9d5c839 3682{
bf220452
MR
3683 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3684 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
b9d5c839 3685
bf220452 3686 WARN_ON(cstate->base.active != intel_crtc->active);
b9d5c839 3687
bf220452
MR
3688 /*
3689 * IVB workaround: must disable low power watermarks for at least
3690 * one frame before enabling scaling. LP watermarks can be re-enabled
3691 * when scaling is disabled.
3692 *
3693 * WaCxSRDisabledForSpriteScaling:ivb
3694 */
3695 if (cstate->disable_lp_wm) {
3696 ilk_disable_lp_wm(crtc->dev);
3697 intel_wait_for_vblank(crtc->dev, intel_crtc->pipe);
396e33ae 3698 }
bf220452
MR
3699
3700 intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
3701
3702 ilk_program_watermarks(cstate);
b9d5c839
VS
3703}
3704
3078999f
PB
3705static void skl_pipe_wm_active_state(uint32_t val,
3706 struct skl_pipe_wm *active,
3707 bool is_transwm,
3708 bool is_cursor,
3709 int i,
3710 int level)
3711{
3712 bool is_enabled = (val & PLANE_WM_EN) != 0;
3713
3714 if (!is_transwm) {
3715 if (!is_cursor) {
3716 active->wm[level].plane_en[i] = is_enabled;
3717 active->wm[level].plane_res_b[i] =
3718 val & PLANE_WM_BLOCKS_MASK;
3719 active->wm[level].plane_res_l[i] =
3720 (val >> PLANE_WM_LINES_SHIFT) &
3721 PLANE_WM_LINES_MASK;
3722 } else {
4969d33e
MR
3723 active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
3724 active->wm[level].plane_res_b[PLANE_CURSOR] =
3078999f 3725 val & PLANE_WM_BLOCKS_MASK;
4969d33e 3726 active->wm[level].plane_res_l[PLANE_CURSOR] =
3078999f
PB
3727 (val >> PLANE_WM_LINES_SHIFT) &
3728 PLANE_WM_LINES_MASK;
3729 }
3730 } else {
3731 if (!is_cursor) {
3732 active->trans_wm.plane_en[i] = is_enabled;
3733 active->trans_wm.plane_res_b[i] =
3734 val & PLANE_WM_BLOCKS_MASK;
3735 active->trans_wm.plane_res_l[i] =
3736 (val >> PLANE_WM_LINES_SHIFT) &
3737 PLANE_WM_LINES_MASK;
3738 } else {
4969d33e
MR
3739 active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
3740 active->trans_wm.plane_res_b[PLANE_CURSOR] =
3078999f 3741 val & PLANE_WM_BLOCKS_MASK;
4969d33e 3742 active->trans_wm.plane_res_l[PLANE_CURSOR] =
3078999f
PB
3743 (val >> PLANE_WM_LINES_SHIFT) &
3744 PLANE_WM_LINES_MASK;
3745 }
3746 }
3747}
3748
3749static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3750{
3751 struct drm_device *dev = crtc->dev;
3752 struct drm_i915_private *dev_priv = dev->dev_private;
3753 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3754 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4e0963c7
MR
3755 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3756 struct skl_pipe_wm *active = &cstate->wm.optimal.skl;
3078999f
PB
3757 enum pipe pipe = intel_crtc->pipe;
3758 int level, i, max_level;
3759 uint32_t temp;
3760
3761 max_level = ilk_wm_max_level(dev);
3762
3763 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3764
3765 for (level = 0; level <= max_level; level++) {
3766 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3767 hw->plane[pipe][i][level] =
3768 I915_READ(PLANE_WM(pipe, i, level));
4969d33e 3769 hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
3078999f
PB
3770 }
3771
3772 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3773 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
4969d33e 3774 hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
3078999f 3775
3ef00284 3776 if (!intel_crtc->active)
3078999f
PB
3777 return;
3778
3779 hw->dirty[pipe] = true;
3780
3781 active->linetime = hw->wm_linetime[pipe];
3782
3783 for (level = 0; level <= max_level; level++) {
3784 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3785 temp = hw->plane[pipe][i][level];
3786 skl_pipe_wm_active_state(temp, active, false,
3787 false, i, level);
3788 }
4969d33e 3789 temp = hw->plane[pipe][PLANE_CURSOR][level];
3078999f
PB
3790 skl_pipe_wm_active_state(temp, active, false, true, i, level);
3791 }
3792
3793 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3794 temp = hw->plane_trans[pipe][i];
3795 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
3796 }
3797
4969d33e 3798 temp = hw->plane_trans[pipe][PLANE_CURSOR];
3078999f 3799 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
4e0963c7
MR
3800
3801 intel_crtc->wm.active.skl = *active;
3078999f
PB
3802}
3803
3804void skl_wm_get_hw_state(struct drm_device *dev)
3805{
a269c583
DL
3806 struct drm_i915_private *dev_priv = dev->dev_private;
3807 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
3078999f
PB
3808 struct drm_crtc *crtc;
3809
a269c583 3810 skl_ddb_get_hw_state(dev_priv, ddb);
3078999f
PB
3811 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3812 skl_pipe_wm_get_hw_state(crtc);
3813}
3814
243e6a44
VS
3815static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3816{
3817 struct drm_device *dev = crtc->dev;
3818 struct drm_i915_private *dev_priv = dev->dev_private;
820c1980 3819 struct ilk_wm_values *hw = &dev_priv->wm.hw;
243e6a44 3820 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4e0963c7
MR
3821 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3822 struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
243e6a44 3823 enum pipe pipe = intel_crtc->pipe;
f0f59a00 3824 static const i915_reg_t wm0_pipe_reg[] = {
243e6a44
VS
3825 [PIPE_A] = WM0_PIPEA_ILK,
3826 [PIPE_B] = WM0_PIPEB_ILK,
3827 [PIPE_C] = WM0_PIPEC_IVB,
3828 };
3829
3830 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
a42a5719 3831 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ce0e0713 3832 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
243e6a44 3833
3ef00284 3834 active->pipe_enabled = intel_crtc->active;
2a44b76b
VS
3835
3836 if (active->pipe_enabled) {
243e6a44
VS
3837 u32 tmp = hw->wm_pipe[pipe];
3838
3839 /*
3840 * For active pipes LP0 watermark is marked as
3841 * enabled, and LP1+ watermaks as disabled since
3842 * we can't really reverse compute them in case
3843 * multiple pipes are active.
3844 */
3845 active->wm[0].enable = true;
3846 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
3847 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
3848 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
3849 active->linetime = hw->wm_linetime[pipe];
3850 } else {
3851 int level, max_level = ilk_wm_max_level(dev);
3852
3853 /*
3854 * For inactive pipes, all watermark levels
3855 * should be marked as enabled but zeroed,
3856 * which is what we'd compute them to.
3857 */
3858 for (level = 0; level <= max_level; level++)
3859 active->wm[level].enable = true;
3860 }
4e0963c7
MR
3861
3862 intel_crtc->wm.active.ilk = *active;
243e6a44
VS
3863}
3864
6eb1a681
VS
3865#define _FW_WM(value, plane) \
3866 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
3867#define _FW_WM_VLV(value, plane) \
3868 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
3869
3870static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
3871 struct vlv_wm_values *wm)
3872{
3873 enum pipe pipe;
3874 uint32_t tmp;
3875
3876 for_each_pipe(dev_priv, pipe) {
3877 tmp = I915_READ(VLV_DDL(pipe));
3878
3879 wm->ddl[pipe].primary =
3880 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3881 wm->ddl[pipe].cursor =
3882 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3883 wm->ddl[pipe].sprite[0] =
3884 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3885 wm->ddl[pipe].sprite[1] =
3886 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3887 }
3888
3889 tmp = I915_READ(DSPFW1);
3890 wm->sr.plane = _FW_WM(tmp, SR);
3891 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
3892 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
3893 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
3894
3895 tmp = I915_READ(DSPFW2);
3896 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
3897 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
3898 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
3899
3900 tmp = I915_READ(DSPFW3);
3901 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
3902
3903 if (IS_CHERRYVIEW(dev_priv)) {
3904 tmp = I915_READ(DSPFW7_CHV);
3905 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
3906 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
3907
3908 tmp = I915_READ(DSPFW8_CHV);
3909 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
3910 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
3911
3912 tmp = I915_READ(DSPFW9_CHV);
3913 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
3914 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
3915
3916 tmp = I915_READ(DSPHOWM);
3917 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
3918 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
3919 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
3920 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
3921 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
3922 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
3923 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
3924 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
3925 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
3926 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
3927 } else {
3928 tmp = I915_READ(DSPFW7);
3929 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
3930 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
3931
3932 tmp = I915_READ(DSPHOWM);
3933 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
3934 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
3935 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
3936 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
3937 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
3938 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
3939 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
3940 }
3941}
3942
3943#undef _FW_WM
3944#undef _FW_WM_VLV
3945
3946void vlv_wm_get_hw_state(struct drm_device *dev)
3947{
3948 struct drm_i915_private *dev_priv = to_i915(dev);
3949 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
3950 struct intel_plane *plane;
3951 enum pipe pipe;
3952 u32 val;
3953
3954 vlv_read_wm_values(dev_priv, wm);
3955
3956 for_each_intel_plane(dev, plane) {
3957 switch (plane->base.type) {
3958 int sprite;
3959 case DRM_PLANE_TYPE_CURSOR:
3960 plane->wm.fifo_size = 63;
3961 break;
3962 case DRM_PLANE_TYPE_PRIMARY:
3963 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
3964 break;
3965 case DRM_PLANE_TYPE_OVERLAY:
3966 sprite = plane->plane;
3967 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
3968 break;
3969 }
3970 }
3971
3972 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
3973 wm->level = VLV_WM_LEVEL_PM2;
3974
3975 if (IS_CHERRYVIEW(dev_priv)) {
3976 mutex_lock(&dev_priv->rps.hw_lock);
3977
3978 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
3979 if (val & DSP_MAXFIFO_PM5_ENABLE)
3980 wm->level = VLV_WM_LEVEL_PM5;
3981
58590c14
VS
3982 /*
3983 * If DDR DVFS is disabled in the BIOS, Punit
3984 * will never ack the request. So if that happens
3985 * assume we don't have to enable/disable DDR DVFS
3986 * dynamically. To test that just set the REQ_ACK
3987 * bit to poke the Punit, but don't change the
3988 * HIGH/LOW bits so that we don't actually change
3989 * the current state.
3990 */
6eb1a681 3991 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
58590c14
VS
3992 val |= FORCE_DDR_FREQ_REQ_ACK;
3993 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
3994
3995 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
3996 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
3997 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
3998 "assuming DDR DVFS is disabled\n");
3999 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
4000 } else {
4001 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4002 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4003 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4004 }
6eb1a681
VS
4005
4006 mutex_unlock(&dev_priv->rps.hw_lock);
4007 }
4008
4009 for_each_pipe(dev_priv, pipe)
4010 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4011 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
4012 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
4013
4014 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4015 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4016}
4017
243e6a44
VS
4018void ilk_wm_get_hw_state(struct drm_device *dev)
4019{
4020 struct drm_i915_private *dev_priv = dev->dev_private;
820c1980 4021 struct ilk_wm_values *hw = &dev_priv->wm.hw;
243e6a44
VS
4022 struct drm_crtc *crtc;
4023
70e1e0ec 4024 for_each_crtc(dev, crtc)
243e6a44
VS
4025 ilk_pipe_wm_get_hw_state(crtc);
4026
4027 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4028 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4029 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4030
4031 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
cfa7698b
VS
4032 if (INTEL_INFO(dev)->gen >= 7) {
4033 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4034 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4035 }
243e6a44 4036
a42a5719 4037 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ac9545fd
VS
4038 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4039 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4040 else if (IS_IVYBRIDGE(dev))
4041 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4042 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
243e6a44
VS
4043
4044 hw->enable_fbc_wm =
4045 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4046}
4047
b445e3b0
ED
4048/**
4049 * intel_update_watermarks - update FIFO watermark values based on current modes
4050 *
4051 * Calculate watermark values for the various WM regs based on current mode
4052 * and plane configuration.
4053 *
4054 * There are several cases to deal with here:
4055 * - normal (i.e. non-self-refresh)
4056 * - self-refresh (SR) mode
4057 * - lines are large relative to FIFO size (buffer can hold up to 2)
4058 * - lines are small relative to FIFO size (buffer can hold more than 2
4059 * lines), so need to account for TLB latency
4060 *
4061 * The normal calculation is:
4062 * watermark = dotclock * bytes per pixel * latency
4063 * where latency is platform & configuration dependent (we assume pessimal
4064 * values here).
4065 *
4066 * The SR calculation is:
4067 * watermark = (trunc(latency/line time)+1) * surface width *
4068 * bytes per pixel
4069 * where
4070 * line time = htotal / dotclock
4071 * surface width = hdisplay for normal plane and 64 for cursor
4072 * and latency is assumed to be high, as above.
4073 *
4074 * The final value programmed to the register should always be rounded up,
4075 * and include an extra 2 entries to account for clock crossings.
4076 *
4077 * We don't use the sprite, so we can ignore that. And on Crestline we have
4078 * to set the non-SR watermarks to 8.
4079 */
46ba614c 4080void intel_update_watermarks(struct drm_crtc *crtc)
b445e3b0 4081{
46ba614c 4082 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
b445e3b0
ED
4083
4084 if (dev_priv->display.update_wm)
46ba614c 4085 dev_priv->display.update_wm(crtc);
b445e3b0
ED
4086}
4087
e2828914 4088/*
9270388e 4089 * Lock protecting IPS related data structures
9270388e
DV
4090 */
4091DEFINE_SPINLOCK(mchdev_lock);
4092
4093/* Global for IPS driver to get at the current i915 device. Protected by
4094 * mchdev_lock. */
4095static struct drm_i915_private *i915_mch_dev;
4096
2b4e57bd
ED
4097bool ironlake_set_drps(struct drm_device *dev, u8 val)
4098{
4099 struct drm_i915_private *dev_priv = dev->dev_private;
4100 u16 rgvswctl;
4101
9270388e
DV
4102 assert_spin_locked(&mchdev_lock);
4103
2b4e57bd
ED
4104 rgvswctl = I915_READ16(MEMSWCTL);
4105 if (rgvswctl & MEMCTL_CMD_STS) {
4106 DRM_DEBUG("gpu busy, RCS change rejected\n");
4107 return false; /* still busy with another command */
4108 }
4109
4110 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4111 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4112 I915_WRITE16(MEMSWCTL, rgvswctl);
4113 POSTING_READ16(MEMSWCTL);
4114
4115 rgvswctl |= MEMCTL_CMD_STS;
4116 I915_WRITE16(MEMSWCTL, rgvswctl);
4117
4118 return true;
4119}
4120
8090c6b9 4121static void ironlake_enable_drps(struct drm_device *dev)
2b4e57bd
ED
4122{
4123 struct drm_i915_private *dev_priv = dev->dev_private;
84f1b20f 4124 u32 rgvmodectl;
2b4e57bd
ED
4125 u8 fmax, fmin, fstart, vstart;
4126
9270388e
DV
4127 spin_lock_irq(&mchdev_lock);
4128
84f1b20f
TU
4129 rgvmodectl = I915_READ(MEMMODECTL);
4130
2b4e57bd
ED
4131 /* Enable temp reporting */
4132 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4133 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4134
4135 /* 100ms RC evaluation intervals */
4136 I915_WRITE(RCUPEI, 100000);
4137 I915_WRITE(RCDNEI, 100000);
4138
4139 /* Set max/min thresholds to 90ms and 80ms respectively */
4140 I915_WRITE(RCBMAXAVG, 90000);
4141 I915_WRITE(RCBMINAVG, 80000);
4142
4143 I915_WRITE(MEMIHYST, 1);
4144
4145 /* Set up min, max, and cur for interrupt handling */
4146 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4147 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4148 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4149 MEMMODE_FSTART_SHIFT;
4150
616847e7 4151 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
2b4e57bd
ED
4152 PXVFREQ_PX_SHIFT;
4153
20e4d407
DV
4154 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4155 dev_priv->ips.fstart = fstart;
2b4e57bd 4156
20e4d407
DV
4157 dev_priv->ips.max_delay = fstart;
4158 dev_priv->ips.min_delay = fmin;
4159 dev_priv->ips.cur_delay = fstart;
2b4e57bd
ED
4160
4161 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4162 fmax, fmin, fstart);
4163
4164 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4165
4166 /*
4167 * Interrupts will be enabled in ironlake_irq_postinstall
4168 */
4169
4170 I915_WRITE(VIDSTART, vstart);
4171 POSTING_READ(VIDSTART);
4172
4173 rgvmodectl |= MEMMODE_SWMODE_EN;
4174 I915_WRITE(MEMMODECTL, rgvmodectl);
4175
9270388e 4176 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2b4e57bd 4177 DRM_ERROR("stuck trying to change perf mode\n");
dd92d8de 4178 mdelay(1);
2b4e57bd
ED
4179
4180 ironlake_set_drps(dev, fstart);
4181
7d81c3e0
VS
4182 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4183 I915_READ(DDREC) + I915_READ(CSIEC);
20e4d407 4184 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
7d81c3e0 4185 dev_priv->ips.last_count2 = I915_READ(GFXEC);
5ed0bdf2 4186 dev_priv->ips.last_time2 = ktime_get_raw_ns();
9270388e
DV
4187
4188 spin_unlock_irq(&mchdev_lock);
2b4e57bd
ED
4189}
4190
8090c6b9 4191static void ironlake_disable_drps(struct drm_device *dev)
2b4e57bd
ED
4192{
4193 struct drm_i915_private *dev_priv = dev->dev_private;
9270388e
DV
4194 u16 rgvswctl;
4195
4196 spin_lock_irq(&mchdev_lock);
4197
4198 rgvswctl = I915_READ16(MEMSWCTL);
2b4e57bd
ED
4199
4200 /* Ack interrupts, disable EFC interrupt */
4201 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4202 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4203 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4204 I915_WRITE(DEIIR, DE_PCU_EVENT);
4205 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4206
4207 /* Go back to the starting frequency */
20e4d407 4208 ironlake_set_drps(dev, dev_priv->ips.fstart);
dd92d8de 4209 mdelay(1);
2b4e57bd
ED
4210 rgvswctl |= MEMCTL_CMD_STS;
4211 I915_WRITE(MEMSWCTL, rgvswctl);
dd92d8de 4212 mdelay(1);
2b4e57bd 4213
9270388e 4214 spin_unlock_irq(&mchdev_lock);
2b4e57bd
ED
4215}
4216
acbe9475
DV
4217/* There's a funny hw issue where the hw returns all 0 when reading from
4218 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4219 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4220 * all limits and the gpu stuck at whatever frequency it is at atm).
4221 */
74ef1173 4222static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
2b4e57bd 4223{
7b9e0ae6 4224 u32 limits;
2b4e57bd 4225
20b46e59
DV
4226 /* Only set the down limit when we've reached the lowest level to avoid
4227 * getting more interrupts, otherwise leave this clear. This prevents a
4228 * race in the hw when coming out of rc6: There's a tiny window where
4229 * the hw runs at the minimal clock before selecting the desired
4230 * frequency, if the down threshold expires in that window we will not
4231 * receive a down interrupt. */
74ef1173
AG
4232 if (IS_GEN9(dev_priv->dev)) {
4233 limits = (dev_priv->rps.max_freq_softlimit) << 23;
4234 if (val <= dev_priv->rps.min_freq_softlimit)
4235 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4236 } else {
4237 limits = dev_priv->rps.max_freq_softlimit << 24;
4238 if (val <= dev_priv->rps.min_freq_softlimit)
4239 limits |= dev_priv->rps.min_freq_softlimit << 16;
4240 }
20b46e59
DV
4241
4242 return limits;
4243}
4244
dd75fdc8
CW
4245static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4246{
4247 int new_power;
8a586437
AG
4248 u32 threshold_up = 0, threshold_down = 0; /* in % */
4249 u32 ei_up = 0, ei_down = 0;
dd75fdc8
CW
4250
4251 new_power = dev_priv->rps.power;
4252 switch (dev_priv->rps.power) {
4253 case LOW_POWER:
b39fb297 4254 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
dd75fdc8
CW
4255 new_power = BETWEEN;
4256 break;
4257
4258 case BETWEEN:
b39fb297 4259 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
dd75fdc8 4260 new_power = LOW_POWER;
b39fb297 4261 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
dd75fdc8
CW
4262 new_power = HIGH_POWER;
4263 break;
4264
4265 case HIGH_POWER:
b39fb297 4266 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
dd75fdc8
CW
4267 new_power = BETWEEN;
4268 break;
4269 }
4270 /* Max/min bins are special */
aed242ff 4271 if (val <= dev_priv->rps.min_freq_softlimit)
dd75fdc8 4272 new_power = LOW_POWER;
aed242ff 4273 if (val >= dev_priv->rps.max_freq_softlimit)
dd75fdc8
CW
4274 new_power = HIGH_POWER;
4275 if (new_power == dev_priv->rps.power)
4276 return;
4277
4278 /* Note the units here are not exactly 1us, but 1280ns. */
4279 switch (new_power) {
4280 case LOW_POWER:
4281 /* Upclock if more than 95% busy over 16ms */
8a586437
AG
4282 ei_up = 16000;
4283 threshold_up = 95;
dd75fdc8
CW
4284
4285 /* Downclock if less than 85% busy over 32ms */
8a586437
AG
4286 ei_down = 32000;
4287 threshold_down = 85;
dd75fdc8
CW
4288 break;
4289
4290 case BETWEEN:
4291 /* Upclock if more than 90% busy over 13ms */
8a586437
AG
4292 ei_up = 13000;
4293 threshold_up = 90;
dd75fdc8
CW
4294
4295 /* Downclock if less than 75% busy over 32ms */
8a586437
AG
4296 ei_down = 32000;
4297 threshold_down = 75;
dd75fdc8
CW
4298 break;
4299
4300 case HIGH_POWER:
4301 /* Upclock if more than 85% busy over 10ms */
8a586437
AG
4302 ei_up = 10000;
4303 threshold_up = 85;
dd75fdc8
CW
4304
4305 /* Downclock if less than 60% busy over 32ms */
8a586437
AG
4306 ei_down = 32000;
4307 threshold_down = 60;
dd75fdc8
CW
4308 break;
4309 }
4310
8a586437
AG
4311 I915_WRITE(GEN6_RP_UP_EI,
4312 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4313 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4314 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
4315
4316 I915_WRITE(GEN6_RP_DOWN_EI,
4317 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4318 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4319 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
4320
4321 I915_WRITE(GEN6_RP_CONTROL,
4322 GEN6_RP_MEDIA_TURBO |
4323 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4324 GEN6_RP_MEDIA_IS_GFX |
4325 GEN6_RP_ENABLE |
4326 GEN6_RP_UP_BUSY_AVG |
4327 GEN6_RP_DOWN_IDLE_AVG);
4328
dd75fdc8 4329 dev_priv->rps.power = new_power;
8fb55197
CW
4330 dev_priv->rps.up_threshold = threshold_up;
4331 dev_priv->rps.down_threshold = threshold_down;
dd75fdc8
CW
4332 dev_priv->rps.last_adj = 0;
4333}
4334
2876ce73
CW
4335static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4336{
4337 u32 mask = 0;
4338
4339 if (val > dev_priv->rps.min_freq_softlimit)
6f4b12f8 4340 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
2876ce73 4341 if (val < dev_priv->rps.max_freq_softlimit)
6f4b12f8 4342 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
2876ce73 4343
7b3c29f6
CW
4344 mask &= dev_priv->pm_rps_events;
4345
59d02a1f 4346 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
2876ce73
CW
4347}
4348
b8a5ff8d
JM
4349/* gen6_set_rps is called to update the frequency request, but should also be
4350 * called when the range (min_delay and max_delay) is modified so that we can
4351 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
ffe02b40 4352static void gen6_set_rps(struct drm_device *dev, u8 val)
20b46e59
DV
4353{
4354 struct drm_i915_private *dev_priv = dev->dev_private;
7b9e0ae6 4355
23eafea6 4356 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
e87a005d 4357 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
23eafea6
SAK
4358 return;
4359
4fc688ce 4360 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
aed242ff
CW
4361 WARN_ON(val > dev_priv->rps.max_freq);
4362 WARN_ON(val < dev_priv->rps.min_freq);
004777cb 4363
eb64cad1
CW
4364 /* min/max delay may still have been modified so be sure to
4365 * write the limits value.
4366 */
4367 if (val != dev_priv->rps.cur_freq) {
4368 gen6_set_rps_thresholds(dev_priv, val);
b8a5ff8d 4369
5704195c
AG
4370 if (IS_GEN9(dev))
4371 I915_WRITE(GEN6_RPNSWREQ,
4372 GEN9_FREQUENCY(val));
4373 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
eb64cad1
CW
4374 I915_WRITE(GEN6_RPNSWREQ,
4375 HSW_FREQUENCY(val));
4376 else
4377 I915_WRITE(GEN6_RPNSWREQ,
4378 GEN6_FREQUENCY(val) |
4379 GEN6_OFFSET(0) |
4380 GEN6_AGGRESSIVE_TURBO);
b8a5ff8d 4381 }
7b9e0ae6 4382
7b9e0ae6
CW
4383 /* Make sure we continue to get interrupts
4384 * until we hit the minimum or maximum frequencies.
4385 */
74ef1173 4386 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
2876ce73 4387 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
7b9e0ae6 4388
d5570a72
BW
4389 POSTING_READ(GEN6_RPNSWREQ);
4390
b39fb297 4391 dev_priv->rps.cur_freq = val;
0f94592e 4392 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
2b4e57bd
ED
4393}
4394
ffe02b40
VS
4395static void valleyview_set_rps(struct drm_device *dev, u8 val)
4396{
4397 struct drm_i915_private *dev_priv = dev->dev_private;
4398
4399 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
aed242ff
CW
4400 WARN_ON(val > dev_priv->rps.max_freq);
4401 WARN_ON(val < dev_priv->rps.min_freq);
ffe02b40
VS
4402
4403 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
4404 "Odd GPU freq value\n"))
4405 val &= ~1;
4406
cd25dd5b
D
4407 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4408
8fb55197 4409 if (val != dev_priv->rps.cur_freq) {
ffe02b40 4410 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
8fb55197
CW
4411 if (!IS_CHERRYVIEW(dev_priv))
4412 gen6_set_rps_thresholds(dev_priv, val);
4413 }
ffe02b40 4414
ffe02b40
VS
4415 dev_priv->rps.cur_freq = val;
4416 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4417}
4418
a7f6e231 4419/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
76c3552f
D
4420 *
4421 * * If Gfx is Idle, then
a7f6e231
D
4422 * 1. Forcewake Media well.
4423 * 2. Request idle freq.
4424 * 3. Release Forcewake of Media well.
76c3552f
D
4425*/
4426static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4427{
aed242ff 4428 u32 val = dev_priv->rps.idle_freq;
5549d25f 4429
aed242ff 4430 if (dev_priv->rps.cur_freq <= val)
76c3552f
D
4431 return;
4432
a7f6e231
D
4433 /* Wake up the media well, as that takes a lot less
4434 * power than the Render well. */
4435 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4436 valleyview_set_rps(dev_priv->dev, val);
4437 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
76c3552f
D
4438}
4439
43cf3bf0
CW
4440void gen6_rps_busy(struct drm_i915_private *dev_priv)
4441{
4442 mutex_lock(&dev_priv->rps.hw_lock);
4443 if (dev_priv->rps.enabled) {
4444 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4445 gen6_rps_reset_ei(dev_priv);
4446 I915_WRITE(GEN6_PMINTRMSK,
4447 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4448 }
4449 mutex_unlock(&dev_priv->rps.hw_lock);
4450}
4451
b29c19b6
CW
4452void gen6_rps_idle(struct drm_i915_private *dev_priv)
4453{
691bb717
DL
4454 struct drm_device *dev = dev_priv->dev;
4455
b29c19b6 4456 mutex_lock(&dev_priv->rps.hw_lock);
c0951f0c 4457 if (dev_priv->rps.enabled) {
666a4537 4458 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
76c3552f 4459 vlv_set_rps_idle(dev_priv);
7526ed79 4460 else
aed242ff 4461 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
c0951f0c 4462 dev_priv->rps.last_adj = 0;
43cf3bf0 4463 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
c0951f0c 4464 }
8d3afd7d 4465 mutex_unlock(&dev_priv->rps.hw_lock);
1854d5ca 4466
8d3afd7d 4467 spin_lock(&dev_priv->rps.client_lock);
1854d5ca
CW
4468 while (!list_empty(&dev_priv->rps.clients))
4469 list_del_init(dev_priv->rps.clients.next);
8d3afd7d 4470 spin_unlock(&dev_priv->rps.client_lock);
b29c19b6
CW
4471}
4472
1854d5ca 4473void gen6_rps_boost(struct drm_i915_private *dev_priv,
e61b9958
CW
4474 struct intel_rps_client *rps,
4475 unsigned long submitted)
b29c19b6 4476{
8d3afd7d
CW
4477 /* This is intentionally racy! We peek at the state here, then
4478 * validate inside the RPS worker.
4479 */
4480 if (!(dev_priv->mm.busy &&
4481 dev_priv->rps.enabled &&
4482 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
4483 return;
43cf3bf0 4484
e61b9958
CW
4485 /* Force a RPS boost (and don't count it against the client) if
4486 * the GPU is severely congested.
4487 */
d0bc54f2 4488 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
e61b9958
CW
4489 rps = NULL;
4490
8d3afd7d
CW
4491 spin_lock(&dev_priv->rps.client_lock);
4492 if (rps == NULL || list_empty(&rps->link)) {
4493 spin_lock_irq(&dev_priv->irq_lock);
4494 if (dev_priv->rps.interrupts_enabled) {
4495 dev_priv->rps.client_boost = true;
4496 queue_work(dev_priv->wq, &dev_priv->rps.work);
4497 }
4498 spin_unlock_irq(&dev_priv->irq_lock);
1854d5ca 4499
2e1b8730
CW
4500 if (rps != NULL) {
4501 list_add(&rps->link, &dev_priv->rps.clients);
4502 rps->boosts++;
1854d5ca
CW
4503 } else
4504 dev_priv->rps.boosts++;
c0951f0c 4505 }
8d3afd7d 4506 spin_unlock(&dev_priv->rps.client_lock);
b29c19b6
CW
4507}
4508
ffe02b40 4509void intel_set_rps(struct drm_device *dev, u8 val)
0a073b84 4510{
666a4537 4511 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
ffe02b40
VS
4512 valleyview_set_rps(dev, val);
4513 else
4514 gen6_set_rps(dev, val);
0a073b84
JB
4515}
4516
20e49366
ZW
4517static void gen9_disable_rps(struct drm_device *dev)
4518{
4519 struct drm_i915_private *dev_priv = dev->dev_private;
4520
4521 I915_WRITE(GEN6_RC_CONTROL, 0);
38c23527 4522 I915_WRITE(GEN9_PG_ENABLE, 0);
20e49366
ZW
4523}
4524
44fc7d5c 4525static void gen6_disable_rps(struct drm_device *dev)
d20d4f0c
JB
4526{
4527 struct drm_i915_private *dev_priv = dev->dev_private;
4528
4529 I915_WRITE(GEN6_RC_CONTROL, 0);
44fc7d5c 4530 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
44fc7d5c
DV
4531}
4532
38807746
D
4533static void cherryview_disable_rps(struct drm_device *dev)
4534{
4535 struct drm_i915_private *dev_priv = dev->dev_private;
4536
4537 I915_WRITE(GEN6_RC_CONTROL, 0);
4538}
4539
44fc7d5c
DV
4540static void valleyview_disable_rps(struct drm_device *dev)
4541{
4542 struct drm_i915_private *dev_priv = dev->dev_private;
4543
98a2e5f9
D
4544 /* we're doing forcewake before Disabling RC6,
4545 * This what the BIOS expects when going into suspend */
59bad947 4546 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
98a2e5f9 4547
44fc7d5c 4548 I915_WRITE(GEN6_RC_CONTROL, 0);
d20d4f0c 4549
59bad947 4550 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
d20d4f0c
JB
4551}
4552
dc39fff7
BW
4553static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4554{
666a4537 4555 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
91ca689a
ID
4556 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4557 mode = GEN6_RC_CTL_RC6_ENABLE;
4558 else
4559 mode = 0;
4560 }
58abf1da
RV
4561 if (HAS_RC6p(dev))
4562 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
87ad3212
JN
4563 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
4564 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
4565 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
58abf1da
RV
4566
4567 else
4568 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
87ad3212 4569 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
dc39fff7
BW
4570}
4571
274008e8
SAK
4572static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
4573{
4574 struct drm_i915_private *dev_priv = dev->dev_private;
4575 bool enable_rc6 = true;
4576 unsigned long rc6_ctx_base;
4577
4578 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
4579 DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
4580 enable_rc6 = false;
4581 }
4582
4583 /*
4584 * The exact context size is not known for BXT, so assume a page size
4585 * for this check.
4586 */
4587 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
4588 if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) &&
4589 (rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base +
4590 dev_priv->gtt.stolen_reserved_size))) {
4591 DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
4592 enable_rc6 = false;
4593 }
4594
4595 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
4596 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
4597 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
4598 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
4599 DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
4600 enable_rc6 = false;
4601 }
4602
4603 if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE |
4604 GEN6_RC_CTL_HW_ENABLE)) &&
4605 ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
4606 !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
4607 DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
4608 enable_rc6 = false;
4609 }
4610
4611 return enable_rc6;
4612}
4613
4614int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
2b4e57bd 4615{
e7d66d89
DV
4616 /* No RC6 before Ironlake and code is gone for ilk. */
4617 if (INTEL_INFO(dev)->gen < 6)
e6069ca8
ID
4618 return 0;
4619
274008e8
SAK
4620 if (!enable_rc6)
4621 return 0;
4622
4623 if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) {
4624 DRM_INFO("RC6 disabled by BIOS\n");
4625 return 0;
4626 }
4627
456470eb 4628 /* Respect the kernel parameter if it is set */
e6069ca8
ID
4629 if (enable_rc6 >= 0) {
4630 int mask;
4631
58abf1da 4632 if (HAS_RC6p(dev))
e6069ca8
ID
4633 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4634 INTEL_RC6pp_ENABLE;
4635 else
4636 mask = INTEL_RC6_ENABLE;
4637
4638 if ((enable_rc6 & mask) != enable_rc6)
8dfd1f04
DV
4639 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
4640 enable_rc6 & mask, enable_rc6, mask);
e6069ca8
ID
4641
4642 return enable_rc6 & mask;
4643 }
2b4e57bd 4644
8bade1ad 4645 if (IS_IVYBRIDGE(dev))
cca84a1f 4646 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
8bade1ad
BW
4647
4648 return INTEL_RC6_ENABLE;
2b4e57bd
ED
4649}
4650
e6069ca8
ID
4651int intel_enable_rc6(const struct drm_device *dev)
4652{
4653 return i915.enable_rc6;
4654}
4655
93ee2920 4656static void gen6_init_rps_frequencies(struct drm_device *dev)
3280e8b0 4657{
93ee2920
TR
4658 struct drm_i915_private *dev_priv = dev->dev_private;
4659 uint32_t rp_state_cap;
4660 u32 ddcc_status = 0;
4661 int ret;
4662
3280e8b0
BW
4663 /* All of these values are in units of 50MHz */
4664 dev_priv->rps.cur_freq = 0;
93ee2920 4665 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
35040562
BP
4666 if (IS_BROXTON(dev)) {
4667 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
4668 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
4669 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4670 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
4671 } else {
4672 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4673 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
4674 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4675 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
4676 }
4677
3280e8b0
BW
4678 /* hw_max = RP0 until we check for overclocking */
4679 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4680
93ee2920 4681 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
ef11bdb3
RV
4682 if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
4683 IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
93ee2920
TR
4684 ret = sandybridge_pcode_read(dev_priv,
4685 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4686 &ddcc_status);
4687 if (0 == ret)
4688 dev_priv->rps.efficient_freq =
46efa4ab
TR
4689 clamp_t(u8,
4690 ((ddcc_status >> 8) & 0xff),
4691 dev_priv->rps.min_freq,
4692 dev_priv->rps.max_freq);
93ee2920
TR
4693 }
4694
ef11bdb3 4695 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
c5e0688c
AG
4696 /* Store the frequency values in 16.66 MHZ units, which is
4697 the natural hardware unit for SKL */
4698 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
4699 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
4700 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
4701 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
4702 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
4703 }
4704
aed242ff
CW
4705 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4706
3280e8b0
BW
4707 /* Preserve min/max settings in case of re-init */
4708 if (dev_priv->rps.max_freq_softlimit == 0)
4709 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4710
93ee2920
TR
4711 if (dev_priv->rps.min_freq_softlimit == 0) {
4712 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4713 dev_priv->rps.min_freq_softlimit =
813b5e69
VS
4714 max_t(int, dev_priv->rps.efficient_freq,
4715 intel_freq_opcode(dev_priv, 450));
93ee2920
TR
4716 else
4717 dev_priv->rps.min_freq_softlimit =
4718 dev_priv->rps.min_freq;
4719 }
3280e8b0
BW
4720}
4721
b6fef0ef 4722/* See the Gen9_GT_PM_Programming_Guide doc for the below */
20e49366 4723static void gen9_enable_rps(struct drm_device *dev)
b6fef0ef
JB
4724{
4725 struct drm_i915_private *dev_priv = dev->dev_private;
4726
4727 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4728
ba1c554c
DL
4729 gen6_init_rps_frequencies(dev);
4730
23eafea6 4731 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
e87a005d 4732 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
23eafea6
SAK
4733 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4734 return;
4735 }
4736
0beb059a
AG
4737 /* Program defaults and thresholds for RPS*/
4738 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4739 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
4740
4741 /* 1 second timeout*/
4742 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
4743 GT_INTERVAL_FROM_US(dev_priv, 1000000));
4744
b6fef0ef 4745 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
b6fef0ef 4746
0beb059a
AG
4747 /* Leaning on the below call to gen6_set_rps to program/setup the
4748 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
4749 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
4750 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4751 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
b6fef0ef
JB
4752
4753 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4754}
4755
4756static void gen9_enable_rc6(struct drm_device *dev)
20e49366
ZW
4757{
4758 struct drm_i915_private *dev_priv = dev->dev_private;
4759 struct intel_engine_cs *ring;
4760 uint32_t rc6_mask = 0;
4761 int unused;
4762
4763 /* 1a: Software RC state - RC0 */
4764 I915_WRITE(GEN6_RC_STATE, 0);
4765
4766 /* 1b: Get forcewake during program sequence. Although the driver
4767 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
59bad947 4768 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
20e49366
ZW
4769
4770 /* 2a: Disable RC states. */
4771 I915_WRITE(GEN6_RC_CONTROL, 0);
4772
4773 /* 2b: Program RC6 thresholds.*/
63a4dec2
SAK
4774
4775 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
e7674b8c 4776 if (IS_SKYLAKE(dev))
63a4dec2
SAK
4777 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
4778 else
4779 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
20e49366
ZW
4780 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4781 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4782 for_each_ring(ring, dev_priv, unused)
4783 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
97c322e7
SAK
4784
4785 if (HAS_GUC_UCODE(dev))
4786 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
4787
20e49366 4788 I915_WRITE(GEN6_RC_SLEEP, 0);
20e49366 4789
38c23527
ZW
4790 /* 2c: Program Coarse Power Gating Policies. */
4791 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
4792 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
4793
20e49366
ZW
4794 /* 3a: Enable RC6 */
4795 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4796 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
87ad3212 4797 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
3e7732a0 4798 /* WaRsUseTimeoutMode */
e87a005d 4799 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
cbdc12a9 4800 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
3e7732a0 4801 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
e3429cd2
SAK
4802 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4803 GEN7_RC_CTL_TO_MODE |
4804 rc6_mask);
3e7732a0
SAK
4805 } else {
4806 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
e3429cd2
SAK
4807 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4808 GEN6_RC_CTL_EI_MODE(1) |
4809 rc6_mask);
3e7732a0 4810 }
20e49366 4811
cb07bae0
SK
4812 /*
4813 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
f2d2fe95 4814 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
cb07bae0 4815 */
06e668ac 4816 if (NEEDS_WaRsDisableCoarsePowerGating(dev))
f2d2fe95
SAK
4817 I915_WRITE(GEN9_PG_ENABLE, 0);
4818 else
4819 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4820 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
38c23527 4821
59bad947 4822 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
20e49366
ZW
4823
4824}
4825
6edee7f3
BW
4826static void gen8_enable_rps(struct drm_device *dev)
4827{
4828 struct drm_i915_private *dev_priv = dev->dev_private;
a4872ba6 4829 struct intel_engine_cs *ring;
93ee2920 4830 uint32_t rc6_mask = 0;
6edee7f3
BW
4831 int unused;
4832
4833 /* 1a: Software RC state - RC0 */
4834 I915_WRITE(GEN6_RC_STATE, 0);
4835
4836 /* 1c & 1d: Get forcewake during program sequence. Although the driver
4837 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
59bad947 4838 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6edee7f3
BW
4839
4840 /* 2a: Disable RC states. */
4841 I915_WRITE(GEN6_RC_CONTROL, 0);
4842
93ee2920
TR
4843 /* Initialize rps frequencies */
4844 gen6_init_rps_frequencies(dev);
6edee7f3
BW
4845
4846 /* 2b: Program RC6 thresholds.*/
4847 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4848 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4849 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4850 for_each_ring(ring, dev_priv, unused)
4851 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4852 I915_WRITE(GEN6_RC_SLEEP, 0);
0d68b25e
TR
4853 if (IS_BROADWELL(dev))
4854 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4855 else
4856 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
6edee7f3
BW
4857
4858 /* 3: Enable RC6 */
4859 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4860 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
abbf9d2c 4861 intel_print_rc6_info(dev, rc6_mask);
0d68b25e
TR
4862 if (IS_BROADWELL(dev))
4863 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4864 GEN7_RC_CTL_TO_MODE |
4865 rc6_mask);
4866 else
4867 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4868 GEN6_RC_CTL_EI_MODE(1) |
4869 rc6_mask);
6edee7f3
BW
4870
4871 /* 4 Program defaults and thresholds for RPS*/
f9bdc585
BW
4872 I915_WRITE(GEN6_RPNSWREQ,
4873 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4874 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4875 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
7526ed79
DV
4876 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
4877 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
4878
4879 /* Docs recommend 900MHz, and 300 MHz respectively */
4880 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
4881 dev_priv->rps.max_freq_softlimit << 24 |
4882 dev_priv->rps.min_freq_softlimit << 16);
4883
4884 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
4885 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
4886 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
4887 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
4888
4889 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6edee7f3
BW
4890
4891 /* 5: Enable RPS */
7526ed79
DV
4892 I915_WRITE(GEN6_RP_CONTROL,
4893 GEN6_RP_MEDIA_TURBO |
4894 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4895 GEN6_RP_MEDIA_IS_GFX |
4896 GEN6_RP_ENABLE |
4897 GEN6_RP_UP_BUSY_AVG |
4898 GEN6_RP_DOWN_IDLE_AVG);
4899
4900 /* 6: Ring frequency + overclocking (our driver does this later */
4901
c7f3153a 4902 dev_priv->rps.power = HIGH_POWER; /* force a reset */
aed242ff 4903 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
7526ed79 4904
59bad947 4905 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6edee7f3
BW
4906}
4907
79f5b2c7 4908static void gen6_enable_rps(struct drm_device *dev)
2b4e57bd 4909{
79f5b2c7 4910 struct drm_i915_private *dev_priv = dev->dev_private;
a4872ba6 4911 struct intel_engine_cs *ring;
d060c169 4912 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
2b4e57bd 4913 u32 gtfifodbg;
2b4e57bd 4914 int rc6_mode;
42c0526c 4915 int i, ret;
2b4e57bd 4916
4fc688ce 4917 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
79f5b2c7 4918
2b4e57bd
ED
4919 /* Here begins a magic sequence of register writes to enable
4920 * auto-downclocking.
4921 *
4922 * Perhaps there might be some value in exposing these to
4923 * userspace...
4924 */
4925 I915_WRITE(GEN6_RC_STATE, 0);
2b4e57bd
ED
4926
4927 /* Clear the DBG now so we don't confuse earlier errors */
4928 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4929 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
4930 I915_WRITE(GTFIFODBG, gtfifodbg);
4931 }
4932
59bad947 4933 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2b4e57bd 4934
93ee2920
TR
4935 /* Initialize rps frequencies */
4936 gen6_init_rps_frequencies(dev);
dd0a1aa1 4937
2b4e57bd
ED
4938 /* disable the counters and set deterministic thresholds */
4939 I915_WRITE(GEN6_RC_CONTROL, 0);
4940
4941 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
4942 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
4943 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
4944 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4945 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4946
b4519513
CW
4947 for_each_ring(ring, dev_priv, i)
4948 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
2b4e57bd
ED
4949
4950 I915_WRITE(GEN6_RC_SLEEP, 0);
4951 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
29c78f60 4952 if (IS_IVYBRIDGE(dev))
351aa566
SM
4953 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
4954 else
4955 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
0920a487 4956 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
2b4e57bd
ED
4957 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
4958
5a7dc92a 4959 /* Check if we are enabling RC6 */
2b4e57bd
ED
4960 rc6_mode = intel_enable_rc6(dev_priv->dev);
4961 if (rc6_mode & INTEL_RC6_ENABLE)
4962 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
4963
5a7dc92a
ED
4964 /* We don't use those on Haswell */
4965 if (!IS_HASWELL(dev)) {
4966 if (rc6_mode & INTEL_RC6p_ENABLE)
4967 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2b4e57bd 4968
5a7dc92a
ED
4969 if (rc6_mode & INTEL_RC6pp_ENABLE)
4970 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
4971 }
2b4e57bd 4972
dc39fff7 4973 intel_print_rc6_info(dev, rc6_mask);
2b4e57bd
ED
4974
4975 I915_WRITE(GEN6_RC_CONTROL,
4976 rc6_mask |
4977 GEN6_RC_CTL_EI_MODE(1) |
4978 GEN6_RC_CTL_HW_ENABLE);
4979
dd75fdc8
CW
4980 /* Power down if completely idle for over 50ms */
4981 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
2b4e57bd 4982 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2b4e57bd 4983
42c0526c 4984 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
d060c169 4985 if (ret)
42c0526c 4986 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
d060c169
BW
4987
4988 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
4989 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
4990 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
b39fb297 4991 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
d060c169 4992 (pcu_mbox & 0xff) * 50);
b39fb297 4993 dev_priv->rps.max_freq = pcu_mbox & 0xff;
2b4e57bd
ED
4994 }
4995
dd75fdc8 4996 dev_priv->rps.power = HIGH_POWER; /* force a reset */
aed242ff 4997 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
2b4e57bd 4998
31643d54
BW
4999 rc6vids = 0;
5000 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5001 if (IS_GEN6(dev) && ret) {
5002 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5003 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5004 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5005 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5006 rc6vids &= 0xffff00;
5007 rc6vids |= GEN6_ENCODE_RC6_VID(450);
5008 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
5009 if (ret)
5010 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5011 }
5012
59bad947 5013 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2b4e57bd
ED
5014}
5015
c2bc2fc5 5016static void __gen6_update_ring_freq(struct drm_device *dev)
2b4e57bd 5017{
79f5b2c7 5018 struct drm_i915_private *dev_priv = dev->dev_private;
2b4e57bd 5019 int min_freq = 15;
3ebecd07
CW
5020 unsigned int gpu_freq;
5021 unsigned int max_ia_freq, min_ring_freq;
4c8c7743 5022 unsigned int max_gpu_freq, min_gpu_freq;
2b4e57bd 5023 int scaling_factor = 180;
eda79642 5024 struct cpufreq_policy *policy;
2b4e57bd 5025
4fc688ce 5026 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
79f5b2c7 5027
eda79642
BW
5028 policy = cpufreq_cpu_get(0);
5029 if (policy) {
5030 max_ia_freq = policy->cpuinfo.max_freq;
5031 cpufreq_cpu_put(policy);
5032 } else {
5033 /*
5034 * Default to measured freq if none found, PCU will ensure we
5035 * don't go over
5036 */
2b4e57bd 5037 max_ia_freq = tsc_khz;
eda79642 5038 }
2b4e57bd
ED
5039
5040 /* Convert from kHz to MHz */
5041 max_ia_freq /= 1000;
5042
153b4b95 5043 min_ring_freq = I915_READ(DCLK) & 0xf;
f6aca45c
BW
5044 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5045 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3ebecd07 5046
ef11bdb3 5047 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
4c8c7743
AG
5048 /* Convert GT frequency to 50 HZ units */
5049 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5050 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5051 } else {
5052 min_gpu_freq = dev_priv->rps.min_freq;
5053 max_gpu_freq = dev_priv->rps.max_freq;
5054 }
5055
2b4e57bd
ED
5056 /*
5057 * For each potential GPU frequency, load a ring frequency we'd like
5058 * to use for memory access. We do this by specifying the IA frequency
5059 * the PCU should use as a reference to determine the ring frequency.
5060 */
4c8c7743
AG
5061 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5062 int diff = max_gpu_freq - gpu_freq;
3ebecd07
CW
5063 unsigned int ia_freq = 0, ring_freq = 0;
5064
ef11bdb3 5065 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
4c8c7743
AG
5066 /*
5067 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5068 * No floor required for ring frequency on SKL.
5069 */
5070 ring_freq = gpu_freq;
5071 } else if (INTEL_INFO(dev)->gen >= 8) {
46c764d4
BW
5072 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5073 ring_freq = max(min_ring_freq, gpu_freq);
5074 } else if (IS_HASWELL(dev)) {
f6aca45c 5075 ring_freq = mult_frac(gpu_freq, 5, 4);
3ebecd07
CW
5076 ring_freq = max(min_ring_freq, ring_freq);
5077 /* leave ia_freq as the default, chosen by cpufreq */
5078 } else {
5079 /* On older processors, there is no separate ring
5080 * clock domain, so in order to boost the bandwidth
5081 * of the ring, we need to upclock the CPU (ia_freq).
5082 *
5083 * For GPU frequencies less than 750MHz,
5084 * just use the lowest ring freq.
5085 */
5086 if (gpu_freq < min_freq)
5087 ia_freq = 800;
5088 else
5089 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5090 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5091 }
2b4e57bd 5092
42c0526c
BW
5093 sandybridge_pcode_write(dev_priv,
5094 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3ebecd07
CW
5095 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5096 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5097 gpu_freq);
2b4e57bd 5098 }
2b4e57bd
ED
5099}
5100
c2bc2fc5
ID
5101void gen6_update_ring_freq(struct drm_device *dev)
5102{
5103 struct drm_i915_private *dev_priv = dev->dev_private;
5104
97d3308a 5105 if (!HAS_CORE_RING_FREQ(dev))
c2bc2fc5
ID
5106 return;
5107
5108 mutex_lock(&dev_priv->rps.hw_lock);
5109 __gen6_update_ring_freq(dev);
5110 mutex_unlock(&dev_priv->rps.hw_lock);
5111}
5112
03af2045 5113static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
2b6b3a09 5114{
095acd5f 5115 struct drm_device *dev = dev_priv->dev;
2b6b3a09
D
5116 u32 val, rp0;
5117
5b5929cb 5118 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
2b6b3a09 5119
5b5929cb
JN
5120 switch (INTEL_INFO(dev)->eu_total) {
5121 case 8:
5122 /* (2 * 4) config */
5123 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5124 break;
5125 case 12:
5126 /* (2 * 6) config */
5127 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5128 break;
5129 case 16:
5130 /* (2 * 8) config */
5131 default:
5132 /* Setting (2 * 8) Min RP0 for any other combination */
5133 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5134 break;
095acd5f 5135 }
5b5929cb
JN
5136
5137 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5138
2b6b3a09
D
5139 return rp0;
5140}
5141
5142static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5143{
5144 u32 val, rpe;
5145
5146 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5147 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5148
5149 return rpe;
5150}
5151
7707df4a
D
5152static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5153{
5154 u32 val, rp1;
5155
5b5929cb
JN
5156 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5157 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5158
7707df4a
D
5159 return rp1;
5160}
5161
f8f2b001
D
5162static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5163{
5164 u32 val, rp1;
5165
5166 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5167
5168 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5169
5170 return rp1;
5171}
5172
03af2045 5173static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
0a073b84
JB
5174{
5175 u32 val, rp0;
5176
64936258 5177 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
0a073b84
JB
5178
5179 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5180 /* Clamp to max */
5181 rp0 = min_t(u32, rp0, 0xea);
5182
5183 return rp0;
5184}
5185
5186static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5187{
5188 u32 val, rpe;
5189
64936258 5190 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
0a073b84 5191 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
64936258 5192 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
0a073b84
JB
5193 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5194
5195 return rpe;
5196}
5197
03af2045 5198static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
0a073b84 5199{
36146035
ID
5200 u32 val;
5201
5202 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
5203 /*
5204 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
5205 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
5206 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
5207 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
5208 * to make sure it matches what Punit accepts.
5209 */
5210 return max_t(u32, val, 0xc0);
0a073b84
JB
5211}
5212
ae48434c
ID
5213/* Check that the pctx buffer wasn't move under us. */
5214static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5215{
5216 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5217
5218 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5219 dev_priv->vlv_pctx->stolen->start);
5220}
5221
38807746
D
5222
5223/* Check that the pcbr address is not empty. */
5224static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5225{
5226 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5227
5228 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5229}
5230
5231static void cherryview_setup_pctx(struct drm_device *dev)
5232{
5233 struct drm_i915_private *dev_priv = dev->dev_private;
5234 unsigned long pctx_paddr, paddr;
5235 struct i915_gtt *gtt = &dev_priv->gtt;
5236 u32 pcbr;
5237 int pctx_size = 32*1024;
5238
38807746
D
5239 pcbr = I915_READ(VLV_PCBR);
5240 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
ce611ef8 5241 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
38807746
D
5242 paddr = (dev_priv->mm.stolen_base +
5243 (gtt->stolen_size - pctx_size));
5244
5245 pctx_paddr = (paddr & (~4095));
5246 I915_WRITE(VLV_PCBR, pctx_paddr);
5247 }
ce611ef8
VS
5248
5249 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
38807746
D
5250}
5251
c9cddffc
JB
5252static void valleyview_setup_pctx(struct drm_device *dev)
5253{
5254 struct drm_i915_private *dev_priv = dev->dev_private;
5255 struct drm_i915_gem_object *pctx;
5256 unsigned long pctx_paddr;
5257 u32 pcbr;
5258 int pctx_size = 24*1024;
5259
ee504898 5260 mutex_lock(&dev->struct_mutex);
17b0c1f7 5261
c9cddffc
JB
5262 pcbr = I915_READ(VLV_PCBR);
5263 if (pcbr) {
5264 /* BIOS set it up already, grab the pre-alloc'd space */
5265 int pcbr_offset;
5266
5267 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5268 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
5269 pcbr_offset,
190d6cd5 5270 I915_GTT_OFFSET_NONE,
c9cddffc
JB
5271 pctx_size);
5272 goto out;
5273 }
5274
ce611ef8
VS
5275 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5276
c9cddffc
JB
5277 /*
5278 * From the Gunit register HAS:
5279 * The Gfx driver is expected to program this register and ensure
5280 * proper allocation within Gfx stolen memory. For example, this
5281 * register should be programmed such than the PCBR range does not
5282 * overlap with other ranges, such as the frame buffer, protected
5283 * memory, or any other relevant ranges.
5284 */
5285 pctx = i915_gem_object_create_stolen(dev, pctx_size);
5286 if (!pctx) {
5287 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
ee504898 5288 goto out;
c9cddffc
JB
5289 }
5290
5291 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5292 I915_WRITE(VLV_PCBR, pctx_paddr);
5293
5294out:
ce611ef8 5295 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
c9cddffc 5296 dev_priv->vlv_pctx = pctx;
ee504898 5297 mutex_unlock(&dev->struct_mutex);
c9cddffc
JB
5298}
5299
ae48434c
ID
5300static void valleyview_cleanup_pctx(struct drm_device *dev)
5301{
5302 struct drm_i915_private *dev_priv = dev->dev_private;
5303
5304 if (WARN_ON(!dev_priv->vlv_pctx))
5305 return;
5306
ee504898 5307 drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base);
ae48434c
ID
5308 dev_priv->vlv_pctx = NULL;
5309}
5310
4e80519e
ID
5311static void valleyview_init_gt_powersave(struct drm_device *dev)
5312{
5313 struct drm_i915_private *dev_priv = dev->dev_private;
2bb25c17 5314 u32 val;
4e80519e
ID
5315
5316 valleyview_setup_pctx(dev);
5317
5318 mutex_lock(&dev_priv->rps.hw_lock);
5319
2bb25c17
VS
5320 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5321 switch ((val >> 6) & 3) {
5322 case 0:
5323 case 1:
5324 dev_priv->mem_freq = 800;
5325 break;
5326 case 2:
5327 dev_priv->mem_freq = 1066;
5328 break;
5329 case 3:
5330 dev_priv->mem_freq = 1333;
5331 break;
5332 }
80b83b62 5333 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
2bb25c17 5334
4e80519e
ID
5335 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5336 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5337 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7c59a9c1 5338 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4e80519e
ID
5339 dev_priv->rps.max_freq);
5340
5341 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5342 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7c59a9c1 5343 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4e80519e
ID
5344 dev_priv->rps.efficient_freq);
5345
f8f2b001
D
5346 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5347 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
7c59a9c1 5348 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
f8f2b001
D
5349 dev_priv->rps.rp1_freq);
5350
4e80519e
ID
5351 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5352 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7c59a9c1 5353 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4e80519e
ID
5354 dev_priv->rps.min_freq);
5355
aed242ff
CW
5356 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5357
4e80519e
ID
5358 /* Preserve min/max settings in case of re-init */
5359 if (dev_priv->rps.max_freq_softlimit == 0)
5360 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5361
5362 if (dev_priv->rps.min_freq_softlimit == 0)
5363 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5364
5365 mutex_unlock(&dev_priv->rps.hw_lock);
5366}
5367
38807746
D
5368static void cherryview_init_gt_powersave(struct drm_device *dev)
5369{
2b6b3a09 5370 struct drm_i915_private *dev_priv = dev->dev_private;
2bb25c17 5371 u32 val;
2b6b3a09 5372
38807746 5373 cherryview_setup_pctx(dev);
2b6b3a09
D
5374
5375 mutex_lock(&dev_priv->rps.hw_lock);
5376
a580516d 5377 mutex_lock(&dev_priv->sb_lock);
c6e8f39d 5378 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
a580516d 5379 mutex_unlock(&dev_priv->sb_lock);
c6e8f39d 5380
2bb25c17 5381 switch ((val >> 2) & 0x7) {
2bb25c17 5382 case 3:
2bb25c17
VS
5383 dev_priv->mem_freq = 2000;
5384 break;
bfa7df01 5385 default:
2bb25c17
VS
5386 dev_priv->mem_freq = 1600;
5387 break;
5388 }
80b83b62 5389 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
2bb25c17 5390
2b6b3a09
D
5391 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5392 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5393 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7c59a9c1 5394 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
2b6b3a09
D
5395 dev_priv->rps.max_freq);
5396
5397 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5398 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7c59a9c1 5399 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
2b6b3a09
D
5400 dev_priv->rps.efficient_freq);
5401
7707df4a
D
5402 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5403 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
7c59a9c1 5404 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
7707df4a
D
5405 dev_priv->rps.rp1_freq);
5406
5b7c91b7
D
5407 /* PUnit validated range is only [RPe, RP0] */
5408 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
2b6b3a09 5409 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7c59a9c1 5410 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
2b6b3a09
D
5411 dev_priv->rps.min_freq);
5412
1c14762d
VS
5413 WARN_ONCE((dev_priv->rps.max_freq |
5414 dev_priv->rps.efficient_freq |
5415 dev_priv->rps.rp1_freq |
5416 dev_priv->rps.min_freq) & 1,
5417 "Odd GPU freq values\n");
5418
aed242ff
CW
5419 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5420
2b6b3a09
D
5421 /* Preserve min/max settings in case of re-init */
5422 if (dev_priv->rps.max_freq_softlimit == 0)
5423 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5424
5425 if (dev_priv->rps.min_freq_softlimit == 0)
5426 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5427
5428 mutex_unlock(&dev_priv->rps.hw_lock);
38807746
D
5429}
5430
4e80519e
ID
5431static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
5432{
5433 valleyview_cleanup_pctx(dev);
5434}
5435
38807746
D
5436static void cherryview_enable_rps(struct drm_device *dev)
5437{
5438 struct drm_i915_private *dev_priv = dev->dev_private;
5439 struct intel_engine_cs *ring;
2b6b3a09 5440 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
38807746
D
5441 int i;
5442
5443 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5444
5445 gtfifodbg = I915_READ(GTFIFODBG);
5446 if (gtfifodbg) {
5447 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5448 gtfifodbg);
5449 I915_WRITE(GTFIFODBG, gtfifodbg);
5450 }
5451
5452 cherryview_check_pctx(dev_priv);
5453
5454 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5455 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
59bad947 5456 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
38807746 5457
160614a2
VS
5458 /* Disable RC states. */
5459 I915_WRITE(GEN6_RC_CONTROL, 0);
5460
38807746
D
5461 /* 2a: Program RC6 thresholds.*/
5462 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5463 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5464 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5465
5466 for_each_ring(ring, dev_priv, i)
5467 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5468 I915_WRITE(GEN6_RC_SLEEP, 0);
5469
f4f71c7d
D
5470 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5471 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
38807746
D
5472
5473 /* allows RC6 residency counter to work */
5474 I915_WRITE(VLV_COUNTER_CONTROL,
5475 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5476 VLV_MEDIA_RC6_COUNT_EN |
5477 VLV_RENDER_RC6_COUNT_EN));
5478
5479 /* For now we assume BIOS is allocating and populating the PCBR */
5480 pcbr = I915_READ(VLV_PCBR);
5481
38807746
D
5482 /* 3: Enable RC6 */
5483 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
5484 (pcbr >> VLV_PCBR_ADDR_SHIFT))
af5a75a3 5485 rc6_mode = GEN7_RC_CTL_TO_MODE;
38807746
D
5486
5487 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5488
2b6b3a09 5489 /* 4 Program defaults and thresholds for RPS*/
3cbdb48f 5490 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2b6b3a09
D
5491 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5492 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5493 I915_WRITE(GEN6_RP_UP_EI, 66000);
5494 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5495
5496 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5497
5498 /* 5: Enable RPS */
5499 I915_WRITE(GEN6_RP_CONTROL,
5500 GEN6_RP_MEDIA_HW_NORMAL_MODE |
eb973a5e 5501 GEN6_RP_MEDIA_IS_GFX |
2b6b3a09
D
5502 GEN6_RP_ENABLE |
5503 GEN6_RP_UP_BUSY_AVG |
5504 GEN6_RP_DOWN_IDLE_AVG);
5505
3ef62342
D
5506 /* Setting Fixed Bias */
5507 val = VLV_OVERRIDE_EN |
5508 VLV_SOC_TDP_EN |
5509 CHV_BIAS_CPU_50_SOC_50;
5510 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5511
2b6b3a09
D
5512 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5513
8d40c3ae
VS
5514 /* RPS code assumes GPLL is used */
5515 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5516
742f491d 5517 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
2b6b3a09
D
5518 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5519
5520 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5521 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
7c59a9c1 5522 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
2b6b3a09
D
5523 dev_priv->rps.cur_freq);
5524
5525 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
7c59a9c1 5526 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
2b6b3a09
D
5527 dev_priv->rps.efficient_freq);
5528
5529 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5530
59bad947 5531 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
38807746
D
5532}
5533
0a073b84
JB
5534static void valleyview_enable_rps(struct drm_device *dev)
5535{
5536 struct drm_i915_private *dev_priv = dev->dev_private;
a4872ba6 5537 struct intel_engine_cs *ring;
2a5913a8 5538 u32 gtfifodbg, val, rc6_mode = 0;
0a073b84
JB
5539 int i;
5540
5541 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5542
ae48434c
ID
5543 valleyview_check_pctx(dev_priv);
5544
0a073b84 5545 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
f7d85c1e
JB
5546 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5547 gtfifodbg);
0a073b84
JB
5548 I915_WRITE(GTFIFODBG, gtfifodbg);
5549 }
5550
c8d9a590 5551 /* If VLV, Forcewake all wells, else re-direct to regular path */
59bad947 5552 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
0a073b84 5553
160614a2
VS
5554 /* Disable RC states. */
5555 I915_WRITE(GEN6_RC_CONTROL, 0);
5556
cad725fe 5557 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
0a073b84
JB
5558 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5559 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5560 I915_WRITE(GEN6_RP_UP_EI, 66000);
5561 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5562
5563 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5564
5565 I915_WRITE(GEN6_RP_CONTROL,
5566 GEN6_RP_MEDIA_TURBO |
5567 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5568 GEN6_RP_MEDIA_IS_GFX |
5569 GEN6_RP_ENABLE |
5570 GEN6_RP_UP_BUSY_AVG |
5571 GEN6_RP_DOWN_IDLE_CONT);
5572
5573 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
5574 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5575 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5576
5577 for_each_ring(ring, dev_priv, i)
5578 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5579
2f0aa304 5580 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
0a073b84
JB
5581
5582 /* allows RC6 residency counter to work */
49798eb2 5583 I915_WRITE(VLV_COUNTER_CONTROL,
31685c25
D
5584 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
5585 VLV_RENDER_RC0_COUNT_EN |
49798eb2
JB
5586 VLV_MEDIA_RC6_COUNT_EN |
5587 VLV_RENDER_RC6_COUNT_EN));
31685c25 5588
a2b23fe0 5589 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
6b88f295 5590 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
dc39fff7
BW
5591
5592 intel_print_rc6_info(dev, rc6_mode);
5593
a2b23fe0 5594 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
0a073b84 5595
3ef62342
D
5596 /* Setting Fixed Bias */
5597 val = VLV_OVERRIDE_EN |
5598 VLV_SOC_TDP_EN |
5599 VLV_BIAS_CPU_125_SOC_875;
5600 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5601
64936258 5602 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
0a073b84 5603
8d40c3ae
VS
5604 /* RPS code assumes GPLL is used */
5605 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5606
742f491d 5607 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
0a073b84
JB
5608 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5609
b39fb297 5610 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
73008b98 5611 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
7c59a9c1 5612 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
b39fb297 5613 dev_priv->rps.cur_freq);
0a073b84 5614
73008b98 5615 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
7c59a9c1 5616 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
b39fb297 5617 dev_priv->rps.efficient_freq);
0a073b84 5618
b39fb297 5619 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
0a073b84 5620
59bad947 5621 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
0a073b84
JB
5622}
5623
dde18883
ED
5624static unsigned long intel_pxfreq(u32 vidfreq)
5625{
5626 unsigned long freq;
5627 int div = (vidfreq & 0x3f0000) >> 16;
5628 int post = (vidfreq & 0x3000) >> 12;
5629 int pre = (vidfreq & 0x7);
5630
5631 if (!pre)
5632 return 0;
5633
5634 freq = ((div * 133333) / ((1<<post) * pre));
5635
5636 return freq;
5637}
5638
eb48eb00
DV
5639static const struct cparams {
5640 u16 i;
5641 u16 t;
5642 u16 m;
5643 u16 c;
5644} cparams[] = {
5645 { 1, 1333, 301, 28664 },
5646 { 1, 1066, 294, 24460 },
5647 { 1, 800, 294, 25192 },
5648 { 0, 1333, 276, 27605 },
5649 { 0, 1066, 276, 27605 },
5650 { 0, 800, 231, 23784 },
5651};
5652
f531dcb2 5653static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
eb48eb00
DV
5654{
5655 u64 total_count, diff, ret;
5656 u32 count1, count2, count3, m = 0, c = 0;
5657 unsigned long now = jiffies_to_msecs(jiffies), diff1;
5658 int i;
5659
02d71956
DV
5660 assert_spin_locked(&mchdev_lock);
5661
20e4d407 5662 diff1 = now - dev_priv->ips.last_time1;
eb48eb00
DV
5663
5664 /* Prevent division-by-zero if we are asking too fast.
5665 * Also, we don't get interesting results if we are polling
5666 * faster than once in 10ms, so just return the saved value
5667 * in such cases.
5668 */
5669 if (diff1 <= 10)
20e4d407 5670 return dev_priv->ips.chipset_power;
eb48eb00
DV
5671
5672 count1 = I915_READ(DMIEC);
5673 count2 = I915_READ(DDREC);
5674 count3 = I915_READ(CSIEC);
5675
5676 total_count = count1 + count2 + count3;
5677
5678 /* FIXME: handle per-counter overflow */
20e4d407
DV
5679 if (total_count < dev_priv->ips.last_count1) {
5680 diff = ~0UL - dev_priv->ips.last_count1;
eb48eb00
DV
5681 diff += total_count;
5682 } else {
20e4d407 5683 diff = total_count - dev_priv->ips.last_count1;
eb48eb00
DV
5684 }
5685
5686 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
20e4d407
DV
5687 if (cparams[i].i == dev_priv->ips.c_m &&
5688 cparams[i].t == dev_priv->ips.r_t) {
eb48eb00
DV
5689 m = cparams[i].m;
5690 c = cparams[i].c;
5691 break;
5692 }
5693 }
5694
5695 diff = div_u64(diff, diff1);
5696 ret = ((m * diff) + c);
5697 ret = div_u64(ret, 10);
5698
20e4d407
DV
5699 dev_priv->ips.last_count1 = total_count;
5700 dev_priv->ips.last_time1 = now;
eb48eb00 5701
20e4d407 5702 dev_priv->ips.chipset_power = ret;
eb48eb00
DV
5703
5704 return ret;
5705}
5706
f531dcb2
CW
5707unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5708{
3d13ef2e 5709 struct drm_device *dev = dev_priv->dev;
f531dcb2
CW
5710 unsigned long val;
5711
3d13ef2e 5712 if (INTEL_INFO(dev)->gen != 5)
f531dcb2
CW
5713 return 0;
5714
5715 spin_lock_irq(&mchdev_lock);
5716
5717 val = __i915_chipset_val(dev_priv);
5718
5719 spin_unlock_irq(&mchdev_lock);
5720
5721 return val;
5722}
5723
eb48eb00
DV
5724unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
5725{
5726 unsigned long m, x, b;
5727 u32 tsfs;
5728
5729 tsfs = I915_READ(TSFS);
5730
5731 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
5732 x = I915_READ8(TR1);
5733
5734 b = tsfs & TSFS_INTR_MASK;
5735
5736 return ((m * x) / 127) - b;
5737}
5738
d972d6ee
MK
5739static int _pxvid_to_vd(u8 pxvid)
5740{
5741 if (pxvid == 0)
5742 return 0;
5743
5744 if (pxvid >= 8 && pxvid < 31)
5745 pxvid = 31;
5746
5747 return (pxvid + 2) * 125;
5748}
5749
5750static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
eb48eb00 5751{
3d13ef2e 5752 struct drm_device *dev = dev_priv->dev;
d972d6ee
MK
5753 const int vd = _pxvid_to_vd(pxvid);
5754 const int vm = vd - 1125;
5755
3d13ef2e 5756 if (INTEL_INFO(dev)->is_mobile)
d972d6ee
MK
5757 return vm > 0 ? vm : 0;
5758
5759 return vd;
eb48eb00
DV
5760}
5761
02d71956 5762static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
eb48eb00 5763{
5ed0bdf2 5764 u64 now, diff, diffms;
eb48eb00
DV
5765 u32 count;
5766
02d71956 5767 assert_spin_locked(&mchdev_lock);
eb48eb00 5768
5ed0bdf2
TG
5769 now = ktime_get_raw_ns();
5770 diffms = now - dev_priv->ips.last_time2;
5771 do_div(diffms, NSEC_PER_MSEC);
eb48eb00
DV
5772
5773 /* Don't divide by 0 */
eb48eb00
DV
5774 if (!diffms)
5775 return;
5776
5777 count = I915_READ(GFXEC);
5778
20e4d407
DV
5779 if (count < dev_priv->ips.last_count2) {
5780 diff = ~0UL - dev_priv->ips.last_count2;
eb48eb00
DV
5781 diff += count;
5782 } else {
20e4d407 5783 diff = count - dev_priv->ips.last_count2;
eb48eb00
DV
5784 }
5785
20e4d407
DV
5786 dev_priv->ips.last_count2 = count;
5787 dev_priv->ips.last_time2 = now;
eb48eb00
DV
5788
5789 /* More magic constants... */
5790 diff = diff * 1181;
5791 diff = div_u64(diff, diffms * 10);
20e4d407 5792 dev_priv->ips.gfx_power = diff;
eb48eb00
DV
5793}
5794
02d71956
DV
5795void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5796{
3d13ef2e
DL
5797 struct drm_device *dev = dev_priv->dev;
5798
5799 if (INTEL_INFO(dev)->gen != 5)
02d71956
DV
5800 return;
5801
9270388e 5802 spin_lock_irq(&mchdev_lock);
02d71956
DV
5803
5804 __i915_update_gfx_val(dev_priv);
5805
9270388e 5806 spin_unlock_irq(&mchdev_lock);
02d71956
DV
5807}
5808
f531dcb2 5809static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
eb48eb00
DV
5810{
5811 unsigned long t, corr, state1, corr2, state2;
5812 u32 pxvid, ext_v;
5813
02d71956
DV
5814 assert_spin_locked(&mchdev_lock);
5815
616847e7 5816 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
eb48eb00
DV
5817 pxvid = (pxvid >> 24) & 0x7f;
5818 ext_v = pvid_to_extvid(dev_priv, pxvid);
5819
5820 state1 = ext_v;
5821
5822 t = i915_mch_val(dev_priv);
5823
5824 /* Revel in the empirically derived constants */
5825
5826 /* Correction factor in 1/100000 units */
5827 if (t > 80)
5828 corr = ((t * 2349) + 135940);
5829 else if (t >= 50)
5830 corr = ((t * 964) + 29317);
5831 else /* < 50 */
5832 corr = ((t * 301) + 1004);
5833
5834 corr = corr * ((150142 * state1) / 10000 - 78642);
5835 corr /= 100000;
20e4d407 5836 corr2 = (corr * dev_priv->ips.corr);
eb48eb00
DV
5837
5838 state2 = (corr2 * state1) / 10000;
5839 state2 /= 100; /* convert to mW */
5840
02d71956 5841 __i915_update_gfx_val(dev_priv);
eb48eb00 5842
20e4d407 5843 return dev_priv->ips.gfx_power + state2;
eb48eb00
DV
5844}
5845
f531dcb2
CW
5846unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5847{
3d13ef2e 5848 struct drm_device *dev = dev_priv->dev;
f531dcb2
CW
5849 unsigned long val;
5850
3d13ef2e 5851 if (INTEL_INFO(dev)->gen != 5)
f531dcb2
CW
5852 return 0;
5853
5854 spin_lock_irq(&mchdev_lock);
5855
5856 val = __i915_gfx_val(dev_priv);
5857
5858 spin_unlock_irq(&mchdev_lock);
5859
5860 return val;
5861}
5862
eb48eb00
DV
5863/**
5864 * i915_read_mch_val - return value for IPS use
5865 *
5866 * Calculate and return a value for the IPS driver to use when deciding whether
5867 * we have thermal and power headroom to increase CPU or GPU power budget.
5868 */
5869unsigned long i915_read_mch_val(void)
5870{
5871 struct drm_i915_private *dev_priv;
5872 unsigned long chipset_val, graphics_val, ret = 0;
5873
9270388e 5874 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
5875 if (!i915_mch_dev)
5876 goto out_unlock;
5877 dev_priv = i915_mch_dev;
5878
f531dcb2
CW
5879 chipset_val = __i915_chipset_val(dev_priv);
5880 graphics_val = __i915_gfx_val(dev_priv);
eb48eb00
DV
5881
5882 ret = chipset_val + graphics_val;
5883
5884out_unlock:
9270388e 5885 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
5886
5887 return ret;
5888}
5889EXPORT_SYMBOL_GPL(i915_read_mch_val);
5890
5891/**
5892 * i915_gpu_raise - raise GPU frequency limit
5893 *
5894 * Raise the limit; IPS indicates we have thermal headroom.
5895 */
5896bool i915_gpu_raise(void)
5897{
5898 struct drm_i915_private *dev_priv;
5899 bool ret = true;
5900
9270388e 5901 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
5902 if (!i915_mch_dev) {
5903 ret = false;
5904 goto out_unlock;
5905 }
5906 dev_priv = i915_mch_dev;
5907
20e4d407
DV
5908 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
5909 dev_priv->ips.max_delay--;
eb48eb00
DV
5910
5911out_unlock:
9270388e 5912 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
5913
5914 return ret;
5915}
5916EXPORT_SYMBOL_GPL(i915_gpu_raise);
5917
5918/**
5919 * i915_gpu_lower - lower GPU frequency limit
5920 *
5921 * IPS indicates we're close to a thermal limit, so throttle back the GPU
5922 * frequency maximum.
5923 */
5924bool i915_gpu_lower(void)
5925{
5926 struct drm_i915_private *dev_priv;
5927 bool ret = true;
5928
9270388e 5929 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
5930 if (!i915_mch_dev) {
5931 ret = false;
5932 goto out_unlock;
5933 }
5934 dev_priv = i915_mch_dev;
5935
20e4d407
DV
5936 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
5937 dev_priv->ips.max_delay++;
eb48eb00
DV
5938
5939out_unlock:
9270388e 5940 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
5941
5942 return ret;
5943}
5944EXPORT_SYMBOL_GPL(i915_gpu_lower);
5945
5946/**
5947 * i915_gpu_busy - indicate GPU business to IPS
5948 *
5949 * Tell the IPS driver whether or not the GPU is busy.
5950 */
5951bool i915_gpu_busy(void)
5952{
5953 struct drm_i915_private *dev_priv;
a4872ba6 5954 struct intel_engine_cs *ring;
eb48eb00 5955 bool ret = false;
f047e395 5956 int i;
eb48eb00 5957
9270388e 5958 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
5959 if (!i915_mch_dev)
5960 goto out_unlock;
5961 dev_priv = i915_mch_dev;
5962
f047e395
CW
5963 for_each_ring(ring, dev_priv, i)
5964 ret |= !list_empty(&ring->request_list);
eb48eb00
DV
5965
5966out_unlock:
9270388e 5967 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
5968
5969 return ret;
5970}
5971EXPORT_SYMBOL_GPL(i915_gpu_busy);
5972
5973/**
5974 * i915_gpu_turbo_disable - disable graphics turbo
5975 *
5976 * Disable graphics turbo by resetting the max frequency and setting the
5977 * current frequency to the default.
5978 */
5979bool i915_gpu_turbo_disable(void)
5980{
5981 struct drm_i915_private *dev_priv;
5982 bool ret = true;
5983
9270388e 5984 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
5985 if (!i915_mch_dev) {
5986 ret = false;
5987 goto out_unlock;
5988 }
5989 dev_priv = i915_mch_dev;
5990
20e4d407 5991 dev_priv->ips.max_delay = dev_priv->ips.fstart;
eb48eb00 5992
20e4d407 5993 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
eb48eb00
DV
5994 ret = false;
5995
5996out_unlock:
9270388e 5997 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
5998
5999 return ret;
6000}
6001EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6002
6003/**
6004 * Tells the intel_ips driver that the i915 driver is now loaded, if
6005 * IPS got loaded first.
6006 *
6007 * This awkward dance is so that neither module has to depend on the
6008 * other in order for IPS to do the appropriate communication of
6009 * GPU turbo limits to i915.
6010 */
6011static void
6012ips_ping_for_i915_load(void)
6013{
6014 void (*link)(void);
6015
6016 link = symbol_get(ips_link_to_i915_driver);
6017 if (link) {
6018 link();
6019 symbol_put(ips_link_to_i915_driver);
6020 }
6021}
6022
6023void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6024{
02d71956
DV
6025 /* We only register the i915 ips part with intel-ips once everything is
6026 * set up, to avoid intel-ips sneaking in and reading bogus values. */
9270388e 6027 spin_lock_irq(&mchdev_lock);
eb48eb00 6028 i915_mch_dev = dev_priv;
9270388e 6029 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
6030
6031 ips_ping_for_i915_load();
6032}
6033
6034void intel_gpu_ips_teardown(void)
6035{
9270388e 6036 spin_lock_irq(&mchdev_lock);
eb48eb00 6037 i915_mch_dev = NULL;
9270388e 6038 spin_unlock_irq(&mchdev_lock);
eb48eb00 6039}
76c3552f 6040
8090c6b9 6041static void intel_init_emon(struct drm_device *dev)
dde18883
ED
6042{
6043 struct drm_i915_private *dev_priv = dev->dev_private;
6044 u32 lcfuse;
6045 u8 pxw[16];
6046 int i;
6047
6048 /* Disable to program */
6049 I915_WRITE(ECR, 0);
6050 POSTING_READ(ECR);
6051
6052 /* Program energy weights for various events */
6053 I915_WRITE(SDEW, 0x15040d00);
6054 I915_WRITE(CSIEW0, 0x007f0000);
6055 I915_WRITE(CSIEW1, 0x1e220004);
6056 I915_WRITE(CSIEW2, 0x04000004);
6057
6058 for (i = 0; i < 5; i++)
616847e7 6059 I915_WRITE(PEW(i), 0);
dde18883 6060 for (i = 0; i < 3; i++)
616847e7 6061 I915_WRITE(DEW(i), 0);
dde18883
ED
6062
6063 /* Program P-state weights to account for frequency power adjustment */
6064 for (i = 0; i < 16; i++) {
616847e7 6065 u32 pxvidfreq = I915_READ(PXVFREQ(i));
dde18883
ED
6066 unsigned long freq = intel_pxfreq(pxvidfreq);
6067 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6068 PXVFREQ_PX_SHIFT;
6069 unsigned long val;
6070
6071 val = vid * vid;
6072 val *= (freq / 1000);
6073 val *= 255;
6074 val /= (127*127*900);
6075 if (val > 0xff)
6076 DRM_ERROR("bad pxval: %ld\n", val);
6077 pxw[i] = val;
6078 }
6079 /* Render standby states get 0 weight */
6080 pxw[14] = 0;
6081 pxw[15] = 0;
6082
6083 for (i = 0; i < 4; i++) {
6084 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6085 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
616847e7 6086 I915_WRITE(PXW(i), val);
dde18883
ED
6087 }
6088
6089 /* Adjust magic regs to magic values (more experimental results) */
6090 I915_WRITE(OGW0, 0);
6091 I915_WRITE(OGW1, 0);
6092 I915_WRITE(EG0, 0x00007f00);
6093 I915_WRITE(EG1, 0x0000000e);
6094 I915_WRITE(EG2, 0x000e0000);
6095 I915_WRITE(EG3, 0x68000300);
6096 I915_WRITE(EG4, 0x42000000);
6097 I915_WRITE(EG5, 0x00140031);
6098 I915_WRITE(EG6, 0);
6099 I915_WRITE(EG7, 0);
6100
6101 for (i = 0; i < 8; i++)
616847e7 6102 I915_WRITE(PXWL(i), 0);
dde18883
ED
6103
6104 /* Enable PMON + select events */
6105 I915_WRITE(ECR, 0x80000019);
6106
6107 lcfuse = I915_READ(LCFUSE02);
6108
20e4d407 6109 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
dde18883
ED
6110}
6111
ae48434c
ID
6112void intel_init_gt_powersave(struct drm_device *dev)
6113{
b268c699
ID
6114 struct drm_i915_private *dev_priv = dev->dev_private;
6115
b268c699
ID
6116 /*
6117 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6118 * requirement.
6119 */
6120 if (!i915.enable_rc6) {
6121 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6122 intel_runtime_pm_get(dev_priv);
6123 }
e6069ca8 6124
38807746
D
6125 if (IS_CHERRYVIEW(dev))
6126 cherryview_init_gt_powersave(dev);
6127 else if (IS_VALLEYVIEW(dev))
4e80519e 6128 valleyview_init_gt_powersave(dev);
ae48434c
ID
6129}
6130
6131void intel_cleanup_gt_powersave(struct drm_device *dev)
6132{
b268c699
ID
6133 struct drm_i915_private *dev_priv = dev->dev_private;
6134
38807746
D
6135 if (IS_CHERRYVIEW(dev))
6136 return;
6137 else if (IS_VALLEYVIEW(dev))
4e80519e 6138 valleyview_cleanup_gt_powersave(dev);
b268c699
ID
6139
6140 if (!i915.enable_rc6)
6141 intel_runtime_pm_put(dev_priv);
ae48434c
ID
6142}
6143
dbea3cea
ID
6144static void gen6_suspend_rps(struct drm_device *dev)
6145{
6146 struct drm_i915_private *dev_priv = dev->dev_private;
6147
6148 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6149
4c2a8897 6150 gen6_disable_rps_interrupts(dev);
dbea3cea
ID
6151}
6152
156c7ca0
JB
6153/**
6154 * intel_suspend_gt_powersave - suspend PM work and helper threads
6155 * @dev: drm device
6156 *
6157 * We don't want to disable RC6 or other features here, we just want
6158 * to make sure any work we've queued has finished and won't bother
6159 * us while we're suspended.
6160 */
6161void intel_suspend_gt_powersave(struct drm_device *dev)
6162{
6163 struct drm_i915_private *dev_priv = dev->dev_private;
6164
d4d70aa5
ID
6165 if (INTEL_INFO(dev)->gen < 6)
6166 return;
6167
dbea3cea 6168 gen6_suspend_rps(dev);
b47adc17
D
6169
6170 /* Force GPU to min freq during suspend */
6171 gen6_rps_idle(dev_priv);
156c7ca0
JB
6172}
6173
8090c6b9
DV
6174void intel_disable_gt_powersave(struct drm_device *dev)
6175{
1a01ab3b
JB
6176 struct drm_i915_private *dev_priv = dev->dev_private;
6177
930ebb46 6178 if (IS_IRONLAKE_M(dev)) {
8090c6b9 6179 ironlake_disable_drps(dev);
38807746 6180 } else if (INTEL_INFO(dev)->gen >= 6) {
10d8d366 6181 intel_suspend_gt_powersave(dev);
e494837a 6182
4fc688ce 6183 mutex_lock(&dev_priv->rps.hw_lock);
20e49366
ZW
6184 if (INTEL_INFO(dev)->gen >= 9)
6185 gen9_disable_rps(dev);
6186 else if (IS_CHERRYVIEW(dev))
38807746
D
6187 cherryview_disable_rps(dev);
6188 else if (IS_VALLEYVIEW(dev))
d20d4f0c
JB
6189 valleyview_disable_rps(dev);
6190 else
6191 gen6_disable_rps(dev);
e534770a 6192
c0951f0c 6193 dev_priv->rps.enabled = false;
4fc688ce 6194 mutex_unlock(&dev_priv->rps.hw_lock);
930ebb46 6195 }
8090c6b9
DV
6196}
6197
1a01ab3b
JB
6198static void intel_gen6_powersave_work(struct work_struct *work)
6199{
6200 struct drm_i915_private *dev_priv =
6201 container_of(work, struct drm_i915_private,
6202 rps.delayed_resume_work.work);
6203 struct drm_device *dev = dev_priv->dev;
6204
4fc688ce 6205 mutex_lock(&dev_priv->rps.hw_lock);
0a073b84 6206
4c2a8897 6207 gen6_reset_rps_interrupts(dev);
3cc134e3 6208
38807746
D
6209 if (IS_CHERRYVIEW(dev)) {
6210 cherryview_enable_rps(dev);
6211 } else if (IS_VALLEYVIEW(dev)) {
0a073b84 6212 valleyview_enable_rps(dev);
20e49366 6213 } else if (INTEL_INFO(dev)->gen >= 9) {
b6fef0ef 6214 gen9_enable_rc6(dev);
20e49366 6215 gen9_enable_rps(dev);
ef11bdb3 6216 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
cc017fb4 6217 __gen6_update_ring_freq(dev);
6edee7f3
BW
6218 } else if (IS_BROADWELL(dev)) {
6219 gen8_enable_rps(dev);
c2bc2fc5 6220 __gen6_update_ring_freq(dev);
0a073b84
JB
6221 } else {
6222 gen6_enable_rps(dev);
c2bc2fc5 6223 __gen6_update_ring_freq(dev);
0a073b84 6224 }
aed242ff
CW
6225
6226 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6227 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6228
6229 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6230 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6231
c0951f0c 6232 dev_priv->rps.enabled = true;
3cc134e3 6233
4c2a8897 6234 gen6_enable_rps_interrupts(dev);
3cc134e3 6235
4fc688ce 6236 mutex_unlock(&dev_priv->rps.hw_lock);
c6df39b5
ID
6237
6238 intel_runtime_pm_put(dev_priv);
1a01ab3b
JB
6239}
6240
8090c6b9
DV
6241void intel_enable_gt_powersave(struct drm_device *dev)
6242{
1a01ab3b
JB
6243 struct drm_i915_private *dev_priv = dev->dev_private;
6244
f61018b1
YZ
6245 /* Powersaving is controlled by the host when inside a VM */
6246 if (intel_vgpu_active(dev))
6247 return;
6248
8090c6b9
DV
6249 if (IS_IRONLAKE_M(dev)) {
6250 ironlake_enable_drps(dev);
84f1b20f 6251 mutex_lock(&dev->struct_mutex);
8090c6b9 6252 intel_init_emon(dev);
dc1d0136 6253 mutex_unlock(&dev->struct_mutex);
38807746 6254 } else if (INTEL_INFO(dev)->gen >= 6) {
1a01ab3b
JB
6255 /*
6256 * PCU communication is slow and this doesn't need to be
6257 * done at any specific time, so do this out of our fast path
6258 * to make resume and init faster.
c6df39b5
ID
6259 *
6260 * We depend on the HW RC6 power context save/restore
6261 * mechanism when entering D3 through runtime PM suspend. So
6262 * disable RPM until RPS/RC6 is properly setup. We can only
6263 * get here via the driver load/system resume/runtime resume
6264 * paths, so the _noresume version is enough (and in case of
6265 * runtime resume it's necessary).
1a01ab3b 6266 */
c6df39b5
ID
6267 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
6268 round_jiffies_up_relative(HZ)))
6269 intel_runtime_pm_get_noresume(dev_priv);
8090c6b9
DV
6270 }
6271}
6272
c6df39b5
ID
6273void intel_reset_gt_powersave(struct drm_device *dev)
6274{
6275 struct drm_i915_private *dev_priv = dev->dev_private;
6276
dbea3cea
ID
6277 if (INTEL_INFO(dev)->gen < 6)
6278 return;
6279
6280 gen6_suspend_rps(dev);
c6df39b5 6281 dev_priv->rps.enabled = false;
c6df39b5
ID
6282}
6283
3107bd48
DV
6284static void ibx_init_clock_gating(struct drm_device *dev)
6285{
6286 struct drm_i915_private *dev_priv = dev->dev_private;
6287
6288 /*
6289 * On Ibex Peak and Cougar Point, we need to disable clock
6290 * gating for the panel power sequencer or it will fail to
6291 * start up when no ports are active.
6292 */
6293 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6294}
6295
0e088b8f
VS
6296static void g4x_disable_trickle_feed(struct drm_device *dev)
6297{
6298 struct drm_i915_private *dev_priv = dev->dev_private;
b12ce1d8 6299 enum pipe pipe;
0e088b8f 6300
055e393f 6301 for_each_pipe(dev_priv, pipe) {
0e088b8f
VS
6302 I915_WRITE(DSPCNTR(pipe),
6303 I915_READ(DSPCNTR(pipe)) |
6304 DISPPLANE_TRICKLE_FEED_DISABLE);
b12ce1d8
VS
6305
6306 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6307 POSTING_READ(DSPSURF(pipe));
0e088b8f
VS
6308 }
6309}
6310
017636cc
VS
6311static void ilk_init_lp_watermarks(struct drm_device *dev)
6312{
6313 struct drm_i915_private *dev_priv = dev->dev_private;
6314
6315 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6316 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6317 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6318
6319 /*
6320 * Don't touch WM1S_LP_EN here.
6321 * Doing so could cause underruns.
6322 */
6323}
6324
1fa61106 6325static void ironlake_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
6326{
6327 struct drm_i915_private *dev_priv = dev->dev_private;
231e54f6 6328 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6f1d69b0 6329
f1e8fa56
DL
6330 /*
6331 * Required for FBC
6332 * WaFbcDisableDpfcClockGating:ilk
6333 */
4d47e4f5
DL
6334 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6335 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6336 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6f1d69b0
ED
6337
6338 I915_WRITE(PCH_3DCGDIS0,
6339 MARIUNIT_CLOCK_GATE_DISABLE |
6340 SVSMUNIT_CLOCK_GATE_DISABLE);
6341 I915_WRITE(PCH_3DCGDIS1,
6342 VFMUNIT_CLOCK_GATE_DISABLE);
6343
6f1d69b0
ED
6344 /*
6345 * According to the spec the following bits should be set in
6346 * order to enable memory self-refresh
6347 * The bit 22/21 of 0x42004
6348 * The bit 5 of 0x42020
6349 * The bit 15 of 0x45000
6350 */
6351 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6352 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6353 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
4d47e4f5 6354 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6f1d69b0
ED
6355 I915_WRITE(DISP_ARB_CTL,
6356 (I915_READ(DISP_ARB_CTL) |
6357 DISP_FBC_WM_DIS));
017636cc
VS
6358
6359 ilk_init_lp_watermarks(dev);
6f1d69b0
ED
6360
6361 /*
6362 * Based on the document from hardware guys the following bits
6363 * should be set unconditionally in order to enable FBC.
6364 * The bit 22 of 0x42000
6365 * The bit 22 of 0x42004
6366 * The bit 7,8,9 of 0x42020.
6367 */
6368 if (IS_IRONLAKE_M(dev)) {
4bb35334 6369 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6f1d69b0
ED
6370 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6371 I915_READ(ILK_DISPLAY_CHICKEN1) |
6372 ILK_FBCQ_DIS);
6373 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6374 I915_READ(ILK_DISPLAY_CHICKEN2) |
6375 ILK_DPARB_GATE);
6f1d69b0
ED
6376 }
6377
4d47e4f5
DL
6378 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6379
6f1d69b0
ED
6380 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6381 I915_READ(ILK_DISPLAY_CHICKEN2) |
6382 ILK_ELPIN_409_SELECT);
6383 I915_WRITE(_3D_CHICKEN2,
6384 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6385 _3D_CHICKEN2_WM_READ_PIPELINED);
4358a374 6386
ecdb4eb7 6387 /* WaDisableRenderCachePipelinedFlush:ilk */
4358a374
DV
6388 I915_WRITE(CACHE_MODE_0,
6389 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
3107bd48 6390
4e04632e
AG
6391 /* WaDisable_RenderCache_OperationalFlush:ilk */
6392 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6393
0e088b8f 6394 g4x_disable_trickle_feed(dev);
bdad2b2f 6395
3107bd48
DV
6396 ibx_init_clock_gating(dev);
6397}
6398
6399static void cpt_init_clock_gating(struct drm_device *dev)
6400{
6401 struct drm_i915_private *dev_priv = dev->dev_private;
6402 int pipe;
3f704fa2 6403 uint32_t val;
3107bd48
DV
6404
6405 /*
6406 * On Ibex Peak and Cougar Point, we need to disable clock
6407 * gating for the panel power sequencer or it will fail to
6408 * start up when no ports are active.
6409 */
cd664078
JB
6410 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6411 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6412 PCH_CPUNIT_CLOCK_GATE_DISABLE);
3107bd48
DV
6413 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6414 DPLS_EDP_PPS_FIX_DIS);
335c07b7
TI
6415 /* The below fixes the weird display corruption, a few pixels shifted
6416 * downward, on (only) LVDS of some HP laptops with IVY.
6417 */
055e393f 6418 for_each_pipe(dev_priv, pipe) {
dc4bd2d1
PZ
6419 val = I915_READ(TRANS_CHICKEN2(pipe));
6420 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6421 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
41aa3448 6422 if (dev_priv->vbt.fdi_rx_polarity_inverted)
3f704fa2 6423 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
dc4bd2d1
PZ
6424 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6425 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6426 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
3f704fa2
PZ
6427 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6428 }
3107bd48 6429 /* WADP0ClockGatingDisable */
055e393f 6430 for_each_pipe(dev_priv, pipe) {
3107bd48
DV
6431 I915_WRITE(TRANS_CHICKEN1(pipe),
6432 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6433 }
6f1d69b0
ED
6434}
6435
1d7aaa0c
DV
6436static void gen6_check_mch_setup(struct drm_device *dev)
6437{
6438 struct drm_i915_private *dev_priv = dev->dev_private;
6439 uint32_t tmp;
6440
6441 tmp = I915_READ(MCH_SSKPD);
df662a28
DV
6442 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6443 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6444 tmp);
1d7aaa0c
DV
6445}
6446
1fa61106 6447static void gen6_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
6448{
6449 struct drm_i915_private *dev_priv = dev->dev_private;
231e54f6 6450 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6f1d69b0 6451
231e54f6 6452 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6f1d69b0
ED
6453
6454 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6455 I915_READ(ILK_DISPLAY_CHICKEN2) |
6456 ILK_ELPIN_409_SELECT);
6457
ecdb4eb7 6458 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
4283908e
DV
6459 I915_WRITE(_3D_CHICKEN,
6460 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6461
4e04632e
AG
6462 /* WaDisable_RenderCache_OperationalFlush:snb */
6463 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6464
8d85d272
VS
6465 /*
6466 * BSpec recoomends 8x4 when MSAA is used,
6467 * however in practice 16x4 seems fastest.
c5c98a58
VS
6468 *
6469 * Note that PS/WM thread counts depend on the WIZ hashing
6470 * disable bit, which we don't touch here, but it's good
6471 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8d85d272
VS
6472 */
6473 I915_WRITE(GEN6_GT_MODE,
98533251 6474 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8d85d272 6475
017636cc 6476 ilk_init_lp_watermarks(dev);
6f1d69b0 6477
6f1d69b0 6478 I915_WRITE(CACHE_MODE_0,
50743298 6479 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6f1d69b0
ED
6480
6481 I915_WRITE(GEN6_UCGCTL1,
6482 I915_READ(GEN6_UCGCTL1) |
6483 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6484 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6485
6486 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6487 * gating disable must be set. Failure to set it results in
6488 * flickering pixels due to Z write ordering failures after
6489 * some amount of runtime in the Mesa "fire" demo, and Unigine
6490 * Sanctuary and Tropics, and apparently anything else with
6491 * alpha test or pixel discard.
6492 *
6493 * According to the spec, bit 11 (RCCUNIT) must also be set,
6494 * but we didn't debug actual testcases to find it out.
0f846f81 6495 *
ef59318c
VS
6496 * WaDisableRCCUnitClockGating:snb
6497 * WaDisableRCPBUnitClockGating:snb
6f1d69b0
ED
6498 */
6499 I915_WRITE(GEN6_UCGCTL2,
6500 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6501 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6502
5eb146dd 6503 /* WaStripsFansDisableFastClipPerformanceFix:snb */
743b57d8
VS
6504 I915_WRITE(_3D_CHICKEN3,
6505 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6f1d69b0 6506
e927ecde
VS
6507 /*
6508 * Bspec says:
6509 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6510 * 3DSTATE_SF number of SF output attributes is more than 16."
6511 */
6512 I915_WRITE(_3D_CHICKEN3,
6513 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6514
6f1d69b0
ED
6515 /*
6516 * According to the spec the following bits should be
6517 * set in order to enable memory self-refresh and fbc:
6518 * The bit21 and bit22 of 0x42000
6519 * The bit21 and bit22 of 0x42004
6520 * The bit5 and bit7 of 0x42020
6521 * The bit14 of 0x70180
6522 * The bit14 of 0x71180
4bb35334
DL
6523 *
6524 * WaFbcAsynchFlipDisableFbcQueue:snb
6f1d69b0
ED
6525 */
6526 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6527 I915_READ(ILK_DISPLAY_CHICKEN1) |
6528 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6529 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6530 I915_READ(ILK_DISPLAY_CHICKEN2) |
6531 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
231e54f6
DL
6532 I915_WRITE(ILK_DSPCLK_GATE_D,
6533 I915_READ(ILK_DSPCLK_GATE_D) |
6534 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
6535 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6f1d69b0 6536
0e088b8f 6537 g4x_disable_trickle_feed(dev);
f8f2ac9a 6538
3107bd48 6539 cpt_init_clock_gating(dev);
1d7aaa0c
DV
6540
6541 gen6_check_mch_setup(dev);
6f1d69b0
ED
6542}
6543
6544static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6545{
6546 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
6547
3aad9059 6548 /*
46680e0a 6549 * WaVSThreadDispatchOverride:ivb,vlv
3aad9059
VS
6550 *
6551 * This actually overrides the dispatch
6552 * mode for all thread types.
6553 */
6f1d69b0
ED
6554 reg &= ~GEN7_FF_SCHED_MASK;
6555 reg |= GEN7_FF_TS_SCHED_HW;
6556 reg |= GEN7_FF_VS_SCHED_HW;
6557 reg |= GEN7_FF_DS_SCHED_HW;
6558
6559 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6560}
6561
17a303ec
PZ
6562static void lpt_init_clock_gating(struct drm_device *dev)
6563{
6564 struct drm_i915_private *dev_priv = dev->dev_private;
6565
6566 /*
6567 * TODO: this bit should only be enabled when really needed, then
6568 * disabled when not needed anymore in order to save power.
6569 */
c2699524 6570 if (HAS_PCH_LPT_LP(dev))
17a303ec
PZ
6571 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6572 I915_READ(SOUTH_DSPCLK_GATE_D) |
6573 PCH_LP_PARTITION_LEVEL_DISABLE);
0a790cdb
PZ
6574
6575 /* WADPOClockGatingDisable:hsw */
36c0d0cf
VS
6576 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
6577 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
0a790cdb 6578 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
17a303ec
PZ
6579}
6580
7d708ee4
ID
6581static void lpt_suspend_hw(struct drm_device *dev)
6582{
6583 struct drm_i915_private *dev_priv = dev->dev_private;
6584
c2699524 6585 if (HAS_PCH_LPT_LP(dev)) {
7d708ee4
ID
6586 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
6587
6588 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6589 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6590 }
6591}
6592
47c2bd97 6593static void broadwell_init_clock_gating(struct drm_device *dev)
1020a5c2
BW
6594{
6595 struct drm_i915_private *dev_priv = dev->dev_private;
07d27e20 6596 enum pipe pipe;
4d487cff 6597 uint32_t misccpctl;
1020a5c2 6598
7ad0dbab 6599 ilk_init_lp_watermarks(dev);
50ed5fbd 6600
ab57fff1 6601 /* WaSwitchSolVfFArbitrationPriority:bdw */
50ed5fbd 6602 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
fe4ab3ce 6603
ab57fff1 6604 /* WaPsrDPAMaskVBlankInSRD:bdw */
fe4ab3ce
BW
6605 I915_WRITE(CHICKEN_PAR1_1,
6606 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6607
ab57fff1 6608 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
055e393f 6609 for_each_pipe(dev_priv, pipe) {
07d27e20 6610 I915_WRITE(CHICKEN_PIPESL_1(pipe),
c7c65622 6611 I915_READ(CHICKEN_PIPESL_1(pipe)) |
8f670bb1 6612 BDW_DPRS_MASK_VBLANK_SRD);
fe4ab3ce 6613 }
63801f21 6614
ab57fff1
BW
6615 /* WaVSRefCountFullforceMissDisable:bdw */
6616 /* WaDSRefCountFullforceMissDisable:bdw */
6617 I915_WRITE(GEN7_FF_THREAD_MODE,
6618 I915_READ(GEN7_FF_THREAD_MODE) &
6619 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
36075a4c 6620
295e8bb7
VS
6621 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6622 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
4f1ca9e9
VS
6623
6624 /* WaDisableSDEUnitClockGating:bdw */
6625 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6626 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5d708680 6627
4d487cff
VS
6628 /*
6629 * WaProgramL3SqcReg1Default:bdw
6630 * WaTempDisableDOPClkGating:bdw
6631 */
6632 misccpctl = I915_READ(GEN7_MISCCPCTL);
6633 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6634 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
6635 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6636
6d50b065
VS
6637 /*
6638 * WaGttCachingOffByDefault:bdw
6639 * GTT cache may not work with big pages, so if those
6640 * are ever enabled GTT cache may need to be disabled.
6641 */
6642 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
6643
89d6b2b8 6644 lpt_init_clock_gating(dev);
1020a5c2
BW
6645}
6646
cad2a2d7
ED
6647static void haswell_init_clock_gating(struct drm_device *dev)
6648{
6649 struct drm_i915_private *dev_priv = dev->dev_private;
cad2a2d7 6650
017636cc 6651 ilk_init_lp_watermarks(dev);
cad2a2d7 6652
f3fc4884
FJ
6653 /* L3 caching of data atomics doesn't work -- disable it. */
6654 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6655 I915_WRITE(HSW_ROW_CHICKEN3,
6656 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6657
ecdb4eb7 6658 /* This is required by WaCatErrorRejectionIssue:hsw */
cad2a2d7
ED
6659 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6660 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6661 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6662
e36ea7ff
VS
6663 /* WaVSRefCountFullforceMissDisable:hsw */
6664 I915_WRITE(GEN7_FF_THREAD_MODE,
6665 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
cad2a2d7 6666
4e04632e
AG
6667 /* WaDisable_RenderCache_OperationalFlush:hsw */
6668 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6669
fe27c606
CW
6670 /* enable HiZ Raw Stall Optimization */
6671 I915_WRITE(CACHE_MODE_0_GEN7,
6672 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6673
ecdb4eb7 6674 /* WaDisable4x2SubspanOptimization:hsw */
cad2a2d7
ED
6675 I915_WRITE(CACHE_MODE_1,
6676 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
1544d9d5 6677
a12c4967
VS
6678 /*
6679 * BSpec recommends 8x4 when MSAA is used,
6680 * however in practice 16x4 seems fastest.
c5c98a58
VS
6681 *
6682 * Note that PS/WM thread counts depend on the WIZ hashing
6683 * disable bit, which we don't touch here, but it's good
6684 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
a12c4967
VS
6685 */
6686 I915_WRITE(GEN7_GT_MODE,
98533251 6687 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
a12c4967 6688
94411593
KG
6689 /* WaSampleCChickenBitEnable:hsw */
6690 I915_WRITE(HALF_SLICE_CHICKEN3,
6691 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
6692
ecdb4eb7 6693 /* WaSwitchSolVfFArbitrationPriority:hsw */
e3dff585
BW
6694 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6695
90a88643
PZ
6696 /* WaRsPkgCStateDisplayPMReq:hsw */
6697 I915_WRITE(CHICKEN_PAR1_1,
6698 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
1544d9d5 6699
17a303ec 6700 lpt_init_clock_gating(dev);
cad2a2d7
ED
6701}
6702
1fa61106 6703static void ivybridge_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
6704{
6705 struct drm_i915_private *dev_priv = dev->dev_private;
20848223 6706 uint32_t snpcr;
6f1d69b0 6707
017636cc 6708 ilk_init_lp_watermarks(dev);
6f1d69b0 6709
231e54f6 6710 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
6f1d69b0 6711
ecdb4eb7 6712 /* WaDisableEarlyCull:ivb */
87f8020e
JB
6713 I915_WRITE(_3D_CHICKEN3,
6714 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6715
ecdb4eb7 6716 /* WaDisableBackToBackFlipFix:ivb */
6f1d69b0
ED
6717 I915_WRITE(IVB_CHICKEN3,
6718 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6719 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6720
ecdb4eb7 6721 /* WaDisablePSDDualDispatchEnable:ivb */
12f3382b
JB
6722 if (IS_IVB_GT1(dev))
6723 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6724 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
12f3382b 6725
4e04632e
AG
6726 /* WaDisable_RenderCache_OperationalFlush:ivb */
6727 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6728
ecdb4eb7 6729 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6f1d69b0
ED
6730 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
6731 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
6732
ecdb4eb7 6733 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6f1d69b0
ED
6734 I915_WRITE(GEN7_L3CNTLREG1,
6735 GEN7_WA_FOR_GEN7_L3_CONTROL);
6736 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8ab43976
JB
6737 GEN7_WA_L3_CHICKEN_MODE);
6738 if (IS_IVB_GT1(dev))
6739 I915_WRITE(GEN7_ROW_CHICKEN2,
6740 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
412236c2
VS
6741 else {
6742 /* must write both registers */
6743 I915_WRITE(GEN7_ROW_CHICKEN2,
6744 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8ab43976
JB
6745 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
6746 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
412236c2 6747 }
6f1d69b0 6748
ecdb4eb7 6749 /* WaForceL3Serialization:ivb */
61939d97
JB
6750 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6751 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6752
1b80a19a 6753 /*
0f846f81 6754 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
ecdb4eb7 6755 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
0f846f81
JB
6756 */
6757 I915_WRITE(GEN6_UCGCTL2,
28acf3b2 6758 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
0f846f81 6759
ecdb4eb7 6760 /* This is required by WaCatErrorRejectionIssue:ivb */
6f1d69b0
ED
6761 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6762 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6763 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6764
0e088b8f 6765 g4x_disable_trickle_feed(dev);
6f1d69b0
ED
6766
6767 gen7_setup_fixed_func_scheduler(dev_priv);
97e1930f 6768
22721343
CW
6769 if (0) { /* causes HiZ corruption on ivb:gt1 */
6770 /* enable HiZ Raw Stall Optimization */
6771 I915_WRITE(CACHE_MODE_0_GEN7,
6772 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6773 }
116f2b6d 6774
ecdb4eb7 6775 /* WaDisable4x2SubspanOptimization:ivb */
97e1930f
DV
6776 I915_WRITE(CACHE_MODE_1,
6777 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
20848223 6778
a607c1a4
VS
6779 /*
6780 * BSpec recommends 8x4 when MSAA is used,
6781 * however in practice 16x4 seems fastest.
c5c98a58
VS
6782 *
6783 * Note that PS/WM thread counts depend on the WIZ hashing
6784 * disable bit, which we don't touch here, but it's good
6785 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
a607c1a4
VS
6786 */
6787 I915_WRITE(GEN7_GT_MODE,
98533251 6788 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
a607c1a4 6789
20848223
BW
6790 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6791 snpcr &= ~GEN6_MBC_SNPCR_MASK;
6792 snpcr |= GEN6_MBC_SNPCR_MED;
6793 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3107bd48 6794
ab5c608b
BW
6795 if (!HAS_PCH_NOP(dev))
6796 cpt_init_clock_gating(dev);
1d7aaa0c
DV
6797
6798 gen6_check_mch_setup(dev);
6f1d69b0
ED
6799}
6800
c6beb13e
VS
6801static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
6802{
6803 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6804
6805 /*
6806 * Disable trickle feed and enable pnd deadline calculation
6807 */
6808 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
6809 I915_WRITE(CBR1_VLV, 0);
6810}
6811
1fa61106 6812static void valleyview_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
6813{
6814 struct drm_i915_private *dev_priv = dev->dev_private;
6f1d69b0 6815
c6beb13e 6816 vlv_init_display_clock_gating(dev_priv);
6f1d69b0 6817
ecdb4eb7 6818 /* WaDisableEarlyCull:vlv */
87f8020e
JB
6819 I915_WRITE(_3D_CHICKEN3,
6820 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6821
ecdb4eb7 6822 /* WaDisableBackToBackFlipFix:vlv */
6f1d69b0
ED
6823 I915_WRITE(IVB_CHICKEN3,
6824 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6825 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6826
fad7d36e 6827 /* WaPsdDispatchEnable:vlv */
ecdb4eb7 6828 /* WaDisablePSDDualDispatchEnable:vlv */
12f3382b 6829 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
d3bc0303
JB
6830 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
6831 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
12f3382b 6832
4e04632e
AG
6833 /* WaDisable_RenderCache_OperationalFlush:vlv */
6834 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6835
ecdb4eb7 6836 /* WaForceL3Serialization:vlv */
61939d97
JB
6837 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6838 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6839
ecdb4eb7 6840 /* WaDisableDopClockGating:vlv */
8ab43976
JB
6841 I915_WRITE(GEN7_ROW_CHICKEN2,
6842 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6843
ecdb4eb7 6844 /* This is required by WaCatErrorRejectionIssue:vlv */
6f1d69b0
ED
6845 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6846 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6847 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6848
46680e0a
VS
6849 gen7_setup_fixed_func_scheduler(dev_priv);
6850
3c0edaeb 6851 /*
0f846f81 6852 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
ecdb4eb7 6853 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
0f846f81
JB
6854 */
6855 I915_WRITE(GEN6_UCGCTL2,
3c0edaeb 6856 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
0f846f81 6857
c98f5062
AG
6858 /* WaDisableL3Bank2xClockGate:vlv
6859 * Disabling L3 clock gating- MMIO 940c[25] = 1
6860 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
6861 I915_WRITE(GEN7_UCGCTL4,
6862 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
e3f33d46 6863
afd58e79
VS
6864 /*
6865 * BSpec says this must be set, even though
6866 * WaDisable4x2SubspanOptimization isn't listed for VLV.
6867 */
6b26c86d
DV
6868 I915_WRITE(CACHE_MODE_1,
6869 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7983117f 6870
da2518f9
VS
6871 /*
6872 * BSpec recommends 8x4 when MSAA is used,
6873 * however in practice 16x4 seems fastest.
6874 *
6875 * Note that PS/WM thread counts depend on the WIZ hashing
6876 * disable bit, which we don't touch here, but it's good
6877 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6878 */
6879 I915_WRITE(GEN7_GT_MODE,
6880 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6881
031994ee
VS
6882 /*
6883 * WaIncreaseL3CreditsForVLVB0:vlv
6884 * This is the hardware default actually.
6885 */
6886 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
6887
2d809570 6888 /*
ecdb4eb7 6889 * WaDisableVLVClockGating_VBIIssue:vlv
2d809570
JB
6890 * Disable clock gating on th GCFG unit to prevent a delay
6891 * in the reporting of vblank events.
6892 */
7a0d1eed 6893 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
6f1d69b0
ED
6894}
6895
a4565da8
VS
6896static void cherryview_init_clock_gating(struct drm_device *dev)
6897{
6898 struct drm_i915_private *dev_priv = dev->dev_private;
6899
c6beb13e 6900 vlv_init_display_clock_gating(dev_priv);
dd811e70 6901
232ce337
VS
6902 /* WaVSRefCountFullforceMissDisable:chv */
6903 /* WaDSRefCountFullforceMissDisable:chv */
6904 I915_WRITE(GEN7_FF_THREAD_MODE,
6905 I915_READ(GEN7_FF_THREAD_MODE) &
6906 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
acea6f95
VS
6907
6908 /* WaDisableSemaphoreAndSyncFlipWait:chv */
6909 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6910 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
0846697c
VS
6911
6912 /* WaDisableCSUnitClockGating:chv */
6913 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6914 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
c631780f
VS
6915
6916 /* WaDisableSDEUnitClockGating:chv */
6917 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6918 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6d50b065
VS
6919
6920 /*
6921 * GTT cache may not work with big pages, so if those
6922 * are ever enabled GTT cache may need to be disabled.
6923 */
6924 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
a4565da8
VS
6925}
6926
1fa61106 6927static void g4x_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
6928{
6929 struct drm_i915_private *dev_priv = dev->dev_private;
6930 uint32_t dspclk_gate;
6931
6932 I915_WRITE(RENCLK_GATE_D1, 0);
6933 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
6934 GS_UNIT_CLOCK_GATE_DISABLE |
6935 CL_UNIT_CLOCK_GATE_DISABLE);
6936 I915_WRITE(RAMCLK_GATE_D, 0);
6937 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
6938 OVRUNIT_CLOCK_GATE_DISABLE |
6939 OVCUNIT_CLOCK_GATE_DISABLE;
6940 if (IS_GM45(dev))
6941 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
6942 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
4358a374
DV
6943
6944 /* WaDisableRenderCachePipelinedFlush */
6945 I915_WRITE(CACHE_MODE_0,
6946 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
de1aa629 6947
4e04632e
AG
6948 /* WaDisable_RenderCache_OperationalFlush:g4x */
6949 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6950
0e088b8f 6951 g4x_disable_trickle_feed(dev);
6f1d69b0
ED
6952}
6953
1fa61106 6954static void crestline_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
6955{
6956 struct drm_i915_private *dev_priv = dev->dev_private;
6957
6958 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
6959 I915_WRITE(RENCLK_GATE_D2, 0);
6960 I915_WRITE(DSPCLK_GATE_D, 0);
6961 I915_WRITE(RAMCLK_GATE_D, 0);
6962 I915_WRITE16(DEUC, 0);
20f94967
VS
6963 I915_WRITE(MI_ARB_STATE,
6964 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
4e04632e
AG
6965
6966 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6967 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6f1d69b0
ED
6968}
6969
1fa61106 6970static void broadwater_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
6971{
6972 struct drm_i915_private *dev_priv = dev->dev_private;
6973
6974 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
6975 I965_RCC_CLOCK_GATE_DISABLE |
6976 I965_RCPB_CLOCK_GATE_DISABLE |
6977 I965_ISC_CLOCK_GATE_DISABLE |
6978 I965_FBC_CLOCK_GATE_DISABLE);
6979 I915_WRITE(RENCLK_GATE_D2, 0);
20f94967
VS
6980 I915_WRITE(MI_ARB_STATE,
6981 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
4e04632e
AG
6982
6983 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6984 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6f1d69b0
ED
6985}
6986
1fa61106 6987static void gen3_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
6988{
6989 struct drm_i915_private *dev_priv = dev->dev_private;
6990 u32 dstate = I915_READ(D_STATE);
6991
6992 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
6993 DSTATE_DOT_CLOCK_GATING;
6994 I915_WRITE(D_STATE, dstate);
13a86b85
CW
6995
6996 if (IS_PINEVIEW(dev))
6997 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
974a3b0f
DV
6998
6999 /* IIR "flip pending" means done if this bit is set */
7000 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
12fabbcb
VS
7001
7002 /* interrupts should cause a wake up from C3 */
3299254f 7003 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
dbb42748
VS
7004
7005 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7006 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
1038392b
VS
7007
7008 I915_WRITE(MI_ARB_STATE,
7009 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6f1d69b0
ED
7010}
7011
1fa61106 7012static void i85x_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
7013{
7014 struct drm_i915_private *dev_priv = dev->dev_private;
7015
7016 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
54e472ae
VS
7017
7018 /* interrupts should cause a wake up from C3 */
7019 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7020 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
1038392b
VS
7021
7022 I915_WRITE(MEM_MODE,
7023 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
6f1d69b0
ED
7024}
7025
1fa61106 7026static void i830_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
7027{
7028 struct drm_i915_private *dev_priv = dev->dev_private;
7029
7030 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
1038392b
VS
7031
7032 I915_WRITE(MEM_MODE,
7033 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7034 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
6f1d69b0
ED
7035}
7036
6f1d69b0
ED
7037void intel_init_clock_gating(struct drm_device *dev)
7038{
7039 struct drm_i915_private *dev_priv = dev->dev_private;
7040
c57e3551
DL
7041 if (dev_priv->display.init_clock_gating)
7042 dev_priv->display.init_clock_gating(dev);
6f1d69b0
ED
7043}
7044
7d708ee4
ID
7045void intel_suspend_hw(struct drm_device *dev)
7046{
7047 if (HAS_PCH_LPT(dev))
7048 lpt_suspend_hw(dev);
7049}
7050
1fa61106
ED
7051/* Set up chip specific power management-related functions */
7052void intel_init_pm(struct drm_device *dev)
7053{
7054 struct drm_i915_private *dev_priv = dev->dev_private;
7055
7ff0ebcc 7056 intel_fbc_init(dev_priv);
1fa61106 7057
c921aba8
DV
7058 /* For cxsr */
7059 if (IS_PINEVIEW(dev))
7060 i915_pineview_get_mem_freq(dev);
7061 else if (IS_GEN5(dev))
7062 i915_ironlake_get_mem_freq(dev);
7063
1fa61106 7064 /* For FIFO watermark updates */
f5ed50cb 7065 if (INTEL_INFO(dev)->gen >= 9) {
2af30a5c
PB
7066 skl_setup_wm_latency(dev);
7067
a82abe43
ID
7068 if (IS_BROXTON(dev))
7069 dev_priv->display.init_clock_gating =
7070 bxt_init_clock_gating;
2d41c0b5 7071 dev_priv->display.update_wm = skl_update_wm;
c83155a6 7072 } else if (HAS_PCH_SPLIT(dev)) {
fa50ad61 7073 ilk_setup_wm_latency(dev);
53615a5e 7074
bd602544
VS
7075 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7076 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7077 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7078 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
bf220452 7079 dev_priv->display.update_wm = ilk_update_wm;
86c8bbbe 7080 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
bf220452 7081 dev_priv->display.program_watermarks = ilk_program_watermarks;
bd602544
VS
7082 } else {
7083 DRM_DEBUG_KMS("Failed to read display plane latency. "
7084 "Disable CxSR\n");
7085 }
7086
7087 if (IS_GEN5(dev))
1fa61106 7088 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
bd602544 7089 else if (IS_GEN6(dev))
1fa61106 7090 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
bd602544 7091 else if (IS_IVYBRIDGE(dev))
1fa61106 7092 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
bd602544 7093 else if (IS_HASWELL(dev))
cad2a2d7 7094 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
bd602544 7095 else if (INTEL_INFO(dev)->gen == 8)
47c2bd97 7096 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
a4565da8 7097 } else if (IS_CHERRYVIEW(dev)) {
262cd2e1
VS
7098 vlv_setup_wm_latency(dev);
7099
7100 dev_priv->display.update_wm = vlv_update_wm;
a4565da8
VS
7101 dev_priv->display.init_clock_gating =
7102 cherryview_init_clock_gating;
1fa61106 7103 } else if (IS_VALLEYVIEW(dev)) {
26e1fe4f
VS
7104 vlv_setup_wm_latency(dev);
7105
7106 dev_priv->display.update_wm = vlv_update_wm;
1fa61106
ED
7107 dev_priv->display.init_clock_gating =
7108 valleyview_init_clock_gating;
1fa61106
ED
7109 } else if (IS_PINEVIEW(dev)) {
7110 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7111 dev_priv->is_ddr3,
7112 dev_priv->fsb_freq,
7113 dev_priv->mem_freq)) {
7114 DRM_INFO("failed to find known CxSR latency "
7115 "(found ddr%s fsb freq %d, mem freq %d), "
7116 "disabling CxSR\n",
7117 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7118 dev_priv->fsb_freq, dev_priv->mem_freq);
7119 /* Disable CxSR and never update its watermark again */
5209b1f4 7120 intel_set_memory_cxsr(dev_priv, false);
1fa61106
ED
7121 dev_priv->display.update_wm = NULL;
7122 } else
7123 dev_priv->display.update_wm = pineview_update_wm;
7124 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7125 } else if (IS_G4X(dev)) {
7126 dev_priv->display.update_wm = g4x_update_wm;
7127 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7128 } else if (IS_GEN4(dev)) {
7129 dev_priv->display.update_wm = i965_update_wm;
7130 if (IS_CRESTLINE(dev))
7131 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7132 else if (IS_BROADWATER(dev))
7133 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7134 } else if (IS_GEN3(dev)) {
7135 dev_priv->display.update_wm = i9xx_update_wm;
7136 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7137 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
feb56b93
DV
7138 } else if (IS_GEN2(dev)) {
7139 if (INTEL_INFO(dev)->num_pipes == 1) {
7140 dev_priv->display.update_wm = i845_update_wm;
1fa61106 7141 dev_priv->display.get_fifo_size = i845_get_fifo_size;
feb56b93
DV
7142 } else {
7143 dev_priv->display.update_wm = i9xx_update_wm;
1fa61106 7144 dev_priv->display.get_fifo_size = i830_get_fifo_size;
feb56b93
DV
7145 }
7146
7147 if (IS_I85X(dev) || IS_I865G(dev))
7148 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7149 else
7150 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7151 } else {
7152 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
1fa61106
ED
7153 }
7154}
7155
151a49d0 7156int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
42c0526c 7157{
4fc688ce 7158 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
42c0526c
BW
7159
7160 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7161 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7162 return -EAGAIN;
7163 }
7164
7165 I915_WRITE(GEN6_PCODE_DATA, *val);
dddab346 7166 I915_WRITE(GEN6_PCODE_DATA1, 0);
42c0526c
BW
7167 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7168
7169 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7170 500)) {
7171 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7172 return -ETIMEDOUT;
7173 }
7174
7175 *val = I915_READ(GEN6_PCODE_DATA);
7176 I915_WRITE(GEN6_PCODE_DATA, 0);
7177
7178 return 0;
7179}
7180
151a49d0 7181int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
42c0526c 7182{
4fc688ce 7183 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
42c0526c
BW
7184
7185 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7186 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7187 return -EAGAIN;
7188 }
7189
7190 I915_WRITE(GEN6_PCODE_DATA, val);
7191 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7192
7193 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7194 500)) {
7195 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7196 return -ETIMEDOUT;
7197 }
7198
7199 I915_WRITE(GEN6_PCODE_DATA, 0);
7200
7201 return 0;
7202}
a0e4e199 7203
dd06f88c 7204static int vlv_gpu_freq_div(unsigned int czclk_freq)
855ba3be 7205{
dd06f88c
VS
7206 switch (czclk_freq) {
7207 case 200:
7208 return 10;
7209 case 267:
7210 return 12;
7211 case 320:
7212 case 333:
dd06f88c 7213 return 16;
ab3fb157
VS
7214 case 400:
7215 return 20;
855ba3be
JB
7216 default:
7217 return -1;
7218 }
dd06f88c 7219}
855ba3be 7220
dd06f88c
VS
7221static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7222{
bfa7df01 7223 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
dd06f88c
VS
7224
7225 div = vlv_gpu_freq_div(czclk_freq);
7226 if (div < 0)
7227 return div;
7228
7229 return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
855ba3be
JB
7230}
7231
b55dd647 7232static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
855ba3be 7233{
bfa7df01 7234 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
855ba3be 7235
dd06f88c
VS
7236 mul = vlv_gpu_freq_div(czclk_freq);
7237 if (mul < 0)
7238 return mul;
855ba3be 7239
dd06f88c 7240 return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
855ba3be
JB
7241}
7242
b55dd647 7243static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
22b1b2f8 7244{
bfa7df01 7245 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
22b1b2f8 7246
9c06f674 7247 div = vlv_gpu_freq_div(czclk_freq);
dd06f88c
VS
7248 if (div < 0)
7249 return div;
9c06f674 7250 div /= 2;
22b1b2f8 7251
dd06f88c 7252 return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
22b1b2f8
D
7253}
7254
b55dd647 7255static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
22b1b2f8 7256{
bfa7df01 7257 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
22b1b2f8 7258
9c06f674 7259 mul = vlv_gpu_freq_div(czclk_freq);
dd06f88c
VS
7260 if (mul < 0)
7261 return mul;
9c06f674 7262 mul /= 2;
22b1b2f8 7263
1c14762d 7264 /* CHV needs even values */
dd06f88c 7265 return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
22b1b2f8
D
7266}
7267
616bc820 7268int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
22b1b2f8 7269{
80b6dda4 7270 if (IS_GEN9(dev_priv->dev))
500a3d2e
MK
7271 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
7272 GEN9_FREQ_SCALER);
80b6dda4 7273 else if (IS_CHERRYVIEW(dev_priv->dev))
616bc820 7274 return chv_gpu_freq(dev_priv, val);
22b1b2f8 7275 else if (IS_VALLEYVIEW(dev_priv->dev))
616bc820
VS
7276 return byt_gpu_freq(dev_priv, val);
7277 else
7278 return val * GT_FREQUENCY_MULTIPLIER;
22b1b2f8
D
7279}
7280
616bc820
VS
7281int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7282{
80b6dda4 7283 if (IS_GEN9(dev_priv->dev))
500a3d2e
MK
7284 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
7285 GT_FREQUENCY_MULTIPLIER);
80b6dda4 7286 else if (IS_CHERRYVIEW(dev_priv->dev))
616bc820 7287 return chv_freq_opcode(dev_priv, val);
22b1b2f8 7288 else if (IS_VALLEYVIEW(dev_priv->dev))
616bc820
VS
7289 return byt_freq_opcode(dev_priv, val);
7290 else
500a3d2e 7291 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
616bc820 7292}
22b1b2f8 7293
6ad790c0
CW
7294struct request_boost {
7295 struct work_struct work;
eed29a5b 7296 struct drm_i915_gem_request *req;
6ad790c0
CW
7297};
7298
7299static void __intel_rps_boost_work(struct work_struct *work)
7300{
7301 struct request_boost *boost = container_of(work, struct request_boost, work);
e61b9958 7302 struct drm_i915_gem_request *req = boost->req;
6ad790c0 7303
e61b9958
CW
7304 if (!i915_gem_request_completed(req, true))
7305 gen6_rps_boost(to_i915(req->ring->dev), NULL,
7306 req->emitted_jiffies);
6ad790c0 7307
e61b9958 7308 i915_gem_request_unreference__unlocked(req);
6ad790c0
CW
7309 kfree(boost);
7310}
7311
7312void intel_queue_rps_boost_for_request(struct drm_device *dev,
eed29a5b 7313 struct drm_i915_gem_request *req)
6ad790c0
CW
7314{
7315 struct request_boost *boost;
7316
eed29a5b 7317 if (req == NULL || INTEL_INFO(dev)->gen < 6)
6ad790c0
CW
7318 return;
7319
e61b9958
CW
7320 if (i915_gem_request_completed(req, true))
7321 return;
7322
6ad790c0
CW
7323 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
7324 if (boost == NULL)
7325 return;
7326
eed29a5b
DV
7327 i915_gem_request_reference(req);
7328 boost->req = req;
6ad790c0
CW
7329
7330 INIT_WORK(&boost->work, __intel_rps_boost_work);
7331 queue_work(to_i915(dev)->wq, &boost->work);
7332}
7333
f742a552 7334void intel_pm_setup(struct drm_device *dev)
907b28c5
CW
7335{
7336 struct drm_i915_private *dev_priv = dev->dev_private;
7337
f742a552 7338 mutex_init(&dev_priv->rps.hw_lock);
8d3afd7d 7339 spin_lock_init(&dev_priv->rps.client_lock);
f742a552 7340
907b28c5
CW
7341 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7342 intel_gen6_powersave_work);
1854d5ca 7343 INIT_LIST_HEAD(&dev_priv->rps.clients);
2e1b8730
CW
7344 INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
7345 INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
5d584b2e 7346
33688d95 7347 dev_priv->pm.suspended = false;
1f814dac 7348 atomic_set(&dev_priv->pm.wakeref_count, 0);
2b19efeb 7349 atomic_set(&dev_priv->pm.atomic_seq, 0);
907b28c5 7350}
This page took 1.468814 seconds and 5 git commands to generate.